index
int64
0
0
repo_id
stringlengths
9
205
file_path
stringlengths
31
246
content
stringlengths
1
12.2M
__index_level_0__
int64
0
10k
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/test/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/test/java/org/apache/flink/connector/opensearch/table/OpensearchDynamicSinkITCase.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.table; import org.apache.flink.api.common.time.Deadline; import org.apache.flink.api.connector.sink2.Sink; import org.apache.flink.connector.opensearch.OpensearchUtil; import org.apache.flink.connector.opensearch.test.DockerImageVersions; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.DataTypes; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.catalog.Column; import org.apache.flink.table.catalog.ResolvedSchema; import org.apache.flink.table.catalog.UniqueConstraint; import org.apache.flink.table.connector.sink.DynamicTableSink; import org.apache.flink.table.connector.sink.SinkV2Provider; import org.apache.flink.table.data.GenericRowData; import org.apache.flink.table.data.RowData; import org.apache.flink.table.data.StringData; import org.apache.flink.table.data.TimestampData; import org.apache.flink.types.RowKind; import org.apache.flink.util.TestLoggerExtension; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.get.GetRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestHighLevelClient; import org.opensearch.rest.RestStatus; import org.opensearch.search.SearchHits; import org.opensearch.testcontainers.OpensearchContainer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testcontainers.junit.jupiter.Container; import org.testcontainers.junit.jupiter.Testcontainers; import java.time.Duration; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.ZoneId; import java.time.format.DateTimeFormatter; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import static org.apache.flink.table.api.Expressions.row; import static org.assertj.core.api.Assertions.assertThat; /** IT tests for {@link OpensearchDynamicSink}. */ @ExtendWith(TestLoggerExtension.class) @Testcontainers class OpensearchDynamicSinkITCase { private static final Logger LOG = LoggerFactory.getLogger(OpensearchDynamicSinkITCase.class); @Container private static final OpensearchContainer OS_CONTAINER = OpensearchUtil.createOpensearchContainer(DockerImageVersions.OPENSEARCH_1, LOG); private TestContext getPrefilledTestContext(String index) { return TestContext.context() .withOption(OpensearchConnectorOptions.INDEX_OPTION.key(), index) .withOption( OpensearchConnectorOptions.HOSTS_OPTION.key(), OS_CONTAINER.getHttpHostAddress()) .withOption(OpensearchConnectorOptions.ALLOW_INSECURE.key(), "true") .withOption( OpensearchConnectorOptions.USERNAME_OPTION.key(), OS_CONTAINER.getUsername()) .withOption( OpensearchConnectorOptions.PASSWORD_OPTION.key(), OS_CONTAINER.getPassword()); } private String getConnectorSql(String index) { return String.format("'%s'='%s',\n", "connector", "opensearch") + String.format( "'%s'='%s',\n", OpensearchConnectorOptions.INDEX_OPTION.key(), index) + String.format( "'%s'='%s', \n", OpensearchConnectorOptions.HOSTS_OPTION.key(), OS_CONTAINER.getHttpHostAddress()) + String.format( "'%s'='%s', \n", OpensearchConnectorOptions.USERNAME_OPTION.key(), OS_CONTAINER.getUsername()) + String.format( "'%s'='%s', \n", OpensearchConnectorOptions.PASSWORD_OPTION.key(), OS_CONTAINER.getPassword()) + String.format( "'%s'='%s'\n", OpensearchConnectorOptions.ALLOW_INSECURE.key(), true); } @Test public void testWritingDocuments() throws Exception { ResolvedSchema schema = new ResolvedSchema( Arrays.asList( Column.physical("a", DataTypes.BIGINT().notNull()), Column.physical("b", DataTypes.TIME()), Column.physical("c", DataTypes.STRING().notNull()), Column.physical("d", DataTypes.FLOAT()), Column.physical("e", DataTypes.TINYINT().notNull()), Column.physical("f", DataTypes.DATE()), Column.physical("g", DataTypes.TIMESTAMP().notNull())), Collections.emptyList(), UniqueConstraint.primaryKey("name", Arrays.asList("a", "g"))); GenericRowData rowData = GenericRowData.of( 1L, 12345, StringData.fromString("ABCDE"), 12.12f, (byte) 2, 12345, TimestampData.fromLocalDateTime( LocalDateTime.parse("2012-12-12T12:12:12"))); String index = "writing-documents"; OpensearchDynamicSinkFactory sinkFactory = new OpensearchDynamicSinkFactory(); DynamicTableSink.SinkRuntimeProvider runtimeProvider = sinkFactory .createDynamicTableSink( getPrefilledTestContext(index).withSchema(schema).build()) .getSinkRuntimeProvider(new OpensearchUtil.MockContext()); final SinkV2Provider sinkProvider = (SinkV2Provider) runtimeProvider; final Sink<RowData> sink = sinkProvider.createSink(); StreamExecutionEnvironment environment = StreamExecutionEnvironment.getExecutionEnvironment(); environment.setParallelism(4); rowData.setRowKind(RowKind.UPDATE_AFTER); environment.<RowData>fromElements(rowData).sinkTo(sink); environment.execute(); RestHighLevelClient client = OpensearchUtil.createClient(OS_CONTAINER); Map<String, Object> response = client.get(new GetRequest(index, "1_2012-12-12T12:12:12"), RequestOptions.DEFAULT) .getSource(); Map<Object, Object> expectedMap = new HashMap<>(); expectedMap.put("a", 1); expectedMap.put("b", "00:00:12"); expectedMap.put("c", "ABCDE"); expectedMap.put("d", 12.12d); expectedMap.put("e", 2); expectedMap.put("f", "2003-10-20"); expectedMap.put("g", "2012-12-12 12:12:12"); assertThat(response).isEqualTo(expectedMap); } @Test public void testWritingDocumentsFromTableApi() throws Exception { TableEnvironment tableEnvironment = TableEnvironment.create(EnvironmentSettings.inStreamingMode()); String index = "table-api"; tableEnvironment.executeSql( "CREATE TABLE osTable (" + "a BIGINT NOT NULL,\n" + "b TIME,\n" + "c STRING NOT NULL,\n" + "d FLOAT,\n" + "e TINYINT NOT NULL,\n" + "f DATE,\n" + "g TIMESTAMP NOT NULL,\n" + "h as a + 2,\n" + "PRIMARY KEY (a, g) NOT ENFORCED\n" + ")\n" + "WITH (\n" + getConnectorSql(index) + ")"); tableEnvironment .fromValues( row( 1L, LocalTime.ofNanoOfDay(12345L * 1_000_000L), "ABCDE", 12.12f, (byte) 2, LocalDate.ofEpochDay(12345), LocalDateTime.parse("2012-12-12T12:12:12"))) .executeInsert("osTable") .await(); RestHighLevelClient client = OpensearchUtil.createClient(OS_CONTAINER); Map<String, Object> response = client.get(new GetRequest(index, "1_2012-12-12T12:12:12"), RequestOptions.DEFAULT) .getSource(); Map<Object, Object> expectedMap = new HashMap<>(); expectedMap.put("a", 1); expectedMap.put("b", "00:00:12"); expectedMap.put("c", "ABCDE"); expectedMap.put("d", 12.12d); expectedMap.put("e", 2); expectedMap.put("f", "2003-10-20"); expectedMap.put("g", "2012-12-12 12:12:12"); assertThat(response).isEqualTo(expectedMap); } @Test public void testWritingDocumentsNoPrimaryKey() throws Exception { TableEnvironment tableEnvironment = TableEnvironment.create(EnvironmentSettings.inStreamingMode()); String index = "no-primary-key"; tableEnvironment.executeSql( "CREATE TABLE osTable (" + "a BIGINT NOT NULL,\n" + "b TIME,\n" + "c STRING NOT NULL,\n" + "d FLOAT,\n" + "e TINYINT NOT NULL,\n" + "f DATE,\n" + "g TIMESTAMP NOT NULL\n" + ")\n" + "WITH (\n" + getConnectorSql(index) + ")"); tableEnvironment .fromValues( row( 1L, LocalTime.ofNanoOfDay(12345L * 1_000_000L), "ABCDE", 12.12f, (byte) 2, LocalDate.ofEpochDay(12345), LocalDateTime.parse("2012-12-12T12:12:12")), row( 2L, LocalTime.ofNanoOfDay(12345L * 1_000_000L), "FGHIJK", 13.13f, (byte) 4, LocalDate.ofEpochDay(12345), LocalDateTime.parse("2013-12-12T13:13:13"))) .executeInsert("osTable") .await(); RestHighLevelClient client = OpensearchUtil.createClient(OS_CONTAINER); // search API does not return documents that were not indexed, we might need to query // the index a few times Deadline deadline = Deadline.fromNow(Duration.ofSeconds(30)); SearchHits hits; do { hits = client.search(new SearchRequest(index), RequestOptions.DEFAULT).getHits(); if (hits.getTotalHits().value < 2) { Thread.sleep(200); } } while (hits.getTotalHits().value < 2 && deadline.hasTimeLeft()); if (hits.getTotalHits().value < 2) { throw new AssertionError("Could not retrieve results from Opensearch."); } HashSet<Map<String, Object>> resultSet = new HashSet<>(); resultSet.add(hits.getAt(0).getSourceAsMap()); resultSet.add(hits.getAt(1).getSourceAsMap()); Map<Object, Object> expectedMap1 = new HashMap<>(); expectedMap1.put("a", 1); expectedMap1.put("b", "00:00:12"); expectedMap1.put("c", "ABCDE"); expectedMap1.put("d", 12.12d); expectedMap1.put("e", 2); expectedMap1.put("f", "2003-10-20"); expectedMap1.put("g", "2012-12-12 12:12:12"); Map<Object, Object> expectedMap2 = new HashMap<>(); expectedMap2.put("a", 2); expectedMap2.put("b", "00:00:12"); expectedMap2.put("c", "FGHIJK"); expectedMap2.put("d", 13.13d); expectedMap2.put("e", 4); expectedMap2.put("f", "2003-10-20"); expectedMap2.put("g", "2013-12-12 13:13:13"); HashSet<Map<Object, Object>> expectedSet = new HashSet<>(); expectedSet.add(expectedMap1); expectedSet.add(expectedMap2); assertThat(resultSet).isEqualTo(expectedSet); } @Test public void testWritingDocumentsWithDynamicIndex() throws Exception { TableEnvironment tableEnvironment = TableEnvironment.create(EnvironmentSettings.inStreamingMode()); String index = "dynamic-index-{b|yyyy-MM-dd}"; tableEnvironment.executeSql( "CREATE TABLE osTable (" + "a BIGINT NOT NULL,\n" + "b TIMESTAMP NOT NULL,\n" + "PRIMARY KEY (a) NOT ENFORCED\n" + ")\n" + "WITH (\n" + getConnectorSql(index) + ")"); tableEnvironment .fromValues(row(1L, LocalDateTime.parse("2012-12-12T12:12:12"))) .executeInsert("osTable") .await(); RestHighLevelClient client = OpensearchUtil.createClient(OS_CONTAINER); Map<String, Object> response = client.get(new GetRequest("dynamic-index-2012-12-12", "1"), RequestOptions.DEFAULT) .getSource(); Map<Object, Object> expectedMap = new HashMap<>(); expectedMap.put("a", 1); expectedMap.put("b", "2012-12-12 12:12:12"); assertThat(response).isEqualTo(expectedMap); } @Test public void testWritingDocumentsWithDynamicIndexFromSystemTime() throws Exception { TableEnvironment tableEnvironment = TableEnvironment.create(EnvironmentSettings.inStreamingMode()); DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd"); tableEnvironment .getConfig() .getConfiguration() .setString("table.local-time-zone", "Asia/Shanghai"); String dynamicIndex1 = "dynamic-index-" + dateTimeFormatter.format(LocalDateTime.now(ZoneId.of("Asia/Shanghai"))) + "_index"; String index = "dynamic-index-{now()|yyyy-MM-dd}_index"; tableEnvironment.executeSql( "CREATE TABLE esTable (" + "a BIGINT NOT NULL,\n" + "b TIMESTAMP NOT NULL,\n" + "PRIMARY KEY (a) NOT ENFORCED\n" + ")\n" + "WITH (\n" + getConnectorSql(index) + ")"); String dynamicIndex2 = "dynamic-index-" + dateTimeFormatter.format(LocalDateTime.now(ZoneId.of("Asia/Shanghai"))) + "_index"; tableEnvironment .fromValues(row(1L, LocalDateTime.parse("2012-12-12T12:12:12"))) .executeInsert("esTable") .await(); RestHighLevelClient client = OpensearchUtil.createClient(OS_CONTAINER); Map<String, Object> response; try { response = client.get(new GetRequest(dynamicIndex1, "1"), RequestOptions.DEFAULT) .getSource(); } catch (OpenSearchStatusException e) { if (e.status() == RestStatus.NOT_FOUND) { response = client.get(new GetRequest(dynamicIndex2, "1"), RequestOptions.DEFAULT) .getSource(); } else { throw e; } } Map<Object, Object> expectedMap = new HashMap<>(); expectedMap.put("a", 1); expectedMap.put("b", "2012-12-12 12:12:12"); assertThat(response).isEqualTo(expectedMap); } }
2,900
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/test/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/test/java/org/apache/flink/connector/opensearch/table/OpensearchDynamicSinkFactoryTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.table; import org.apache.flink.api.common.typeutils.base.VoidSerializer; import org.apache.flink.connector.opensearch.OpensearchUtil; import org.apache.flink.table.api.DataTypes; import org.apache.flink.table.api.ValidationException; import org.apache.flink.table.catalog.Column; import org.apache.flink.table.catalog.ResolvedSchema; import org.apache.flink.table.catalog.UniqueConstraint; import org.apache.flink.table.connector.ChangelogMode; import org.apache.flink.table.connector.sink.DynamicTableSink; import org.apache.flink.table.connector.sink.SinkV2Provider; import org.apache.flink.types.RowKind; import org.apache.flink.util.TestLoggerExtension; import org.assertj.core.api.ThrowableAssert; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import java.util.Arrays; import java.util.Collections; import static org.apache.flink.table.factories.FactoryUtil.SINK_PARALLELISM; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Tests for validation in {@link OpensearchDynamicSinkFactory}. */ @ExtendWith(TestLoggerExtension.class) class OpensearchDynamicSinkFactoryTest { private TestContext createPrefilledTestContext() { return TestContext.context() .withOption(OpensearchConnectorOptions.INDEX_OPTION.key(), "MyIndex") .withOption( OpensearchConnectorOptions.HOSTS_OPTION.key(), "http://localhost:12345"); } @Test public void validateEmptyConfiguration() { OpensearchDynamicSinkFactory sinkFactory = new OpensearchDynamicSinkFactory(); assertValidationException( "One or more required options are missing.\n" + "\n" + "Missing required options are:\n" + "\n" + "hosts\n" + "index", () -> sinkFactory.createDynamicTableSink(TestContext.context().build())); } void assertValidationException( String expectedMessage, ThrowableAssert.ThrowingCallable executable) { assertThatThrownBy(executable) .isInstanceOf(ValidationException.class) .hasMessage(expectedMessage); } @Test public void validateWrongIndex() { OpensearchDynamicSinkFactory sinkFactory = new OpensearchDynamicSinkFactory(); assertValidationException( "'index' must not be empty", () -> sinkFactory.createDynamicTableSink( createPrefilledTestContext() .withOption( OpensearchConnectorOptions.INDEX_OPTION.key(), "") .build())); } @Test public void validateWrongHosts() { OpensearchDynamicSinkFactory sinkFactory = new OpensearchDynamicSinkFactory(); assertValidationException( "Could not parse host 'wrong-host' in option 'hosts'. It should follow the format 'http://host_name:port'.", () -> sinkFactory.createDynamicTableSink( createPrefilledTestContext() .withOption( OpensearchConnectorOptions.HOSTS_OPTION.key(), "wrong-host") .build())); } @Test public void validateWrongFlushSize() { OpensearchDynamicSinkFactory sinkFactory = new OpensearchDynamicSinkFactory(); assertValidationException( "'sink.bulk-flush.max-size' must be in MB granularity. Got: 1024 bytes", () -> sinkFactory.createDynamicTableSink( createPrefilledTestContext() .withOption( OpensearchConnectorOptions .BULK_FLUSH_MAX_SIZE_OPTION .key(), "1kb") .build())); } @Test public void validateWrongRetries() { OpensearchDynamicSinkFactory sinkFactory = new OpensearchDynamicSinkFactory(); assertValidationException( "'sink.bulk-flush.backoff.max-retries' must be at least 1. Got: 0", () -> sinkFactory.createDynamicTableSink( createPrefilledTestContext() .withOption( OpensearchConnectorOptions .BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION .key(), "0") .build())); } @Test public void validateWrongMaxActions() { OpensearchDynamicSinkFactory sinkFactory = new OpensearchDynamicSinkFactory(); assertValidationException( "'sink.bulk-flush.max-actions' must be at least 1. Got: -2", () -> sinkFactory.createDynamicTableSink( createPrefilledTestContext() .withOption( OpensearchConnectorOptions .BULK_FLUSH_MAX_ACTIONS_OPTION .key(), "-2") .build())); } @Test public void validateWrongBackoffDelay() { OpensearchDynamicSinkFactory sinkFactory = new OpensearchDynamicSinkFactory(); assertValidationException( "Invalid value for option 'sink.bulk-flush.backoff.delay'.", () -> sinkFactory.createDynamicTableSink( createPrefilledTestContext() .withOption( OpensearchConnectorOptions .BULK_FLUSH_BACKOFF_DELAY_OPTION .key(), "-1s") .build())); } @Test public void validatePrimaryKeyOnIllegalColumn() { OpensearchDynamicSinkFactory sinkFactory = new OpensearchDynamicSinkFactory(); assertValidationException( "The table has a primary key on columns of illegal types: " + "[ARRAY, MAP, MULTISET, ROW, RAW, VARBINARY].", () -> sinkFactory.createDynamicTableSink( createPrefilledTestContext() .withSchema( new ResolvedSchema( Arrays.asList( Column.physical( "a", DataTypes.BIGINT() .notNull()), Column.physical( "b", DataTypes.ARRAY( DataTypes .BIGINT() .notNull()) .notNull()), Column.physical( "c", DataTypes.MAP( DataTypes .BIGINT(), DataTypes .STRING()) .notNull()), Column.physical( "d", DataTypes.MULTISET( DataTypes .BIGINT() .notNull()) .notNull()), Column.physical( "e", DataTypes.ROW( DataTypes .FIELD( "a", DataTypes .BIGINT())) .notNull()), Column.physical( "f", DataTypes.RAW( Void.class, VoidSerializer .INSTANCE) .notNull()), Column.physical( "g", DataTypes.BYTES() .notNull())), Collections.emptyList(), UniqueConstraint.primaryKey( "name", Arrays.asList( "a", "b", "c", "d", "e", "f", "g")))) .build())); } @Test public void validateWrongCredential() { OpensearchDynamicSinkFactory sinkFactory = new OpensearchDynamicSinkFactory(); assertValidationException( "'username' and 'password' must be set at the same time. Got: username 'username' and password ''", () -> sinkFactory.createDynamicTableSink( createPrefilledTestContext() .withOption( OpensearchConnectorOptions.USERNAME_OPTION.key(), "username") .withOption( OpensearchConnectorOptions.PASSWORD_OPTION.key(), "") .build())); } @Test public void testSinkParallelism() { OpensearchDynamicSinkFactory sinkFactory = new OpensearchDynamicSinkFactory(); DynamicTableSink sink = sinkFactory.createDynamicTableSink( createPrefilledTestContext() .withOption(SINK_PARALLELISM.key(), "2") .build()); assertThat(sink).isInstanceOf(OpensearchDynamicSink.class); OpensearchDynamicSink opensearchSink = (OpensearchDynamicSink) sink; SinkV2Provider provider = (SinkV2Provider) opensearchSink.getSinkRuntimeProvider(new OpensearchUtil.MockContext()); assertThat(2).isEqualTo(provider.getParallelism().get()); } @Test public void validateDynamicIndexOnChangelogStream() { OpensearchDynamicSinkFactory sinkFactory = new OpensearchDynamicSinkFactory(); DynamicTableSink sink = sinkFactory.createDynamicTableSink( createPrefilledTestContext() .withOption( OpensearchConnectorOptions.INDEX_OPTION.key(), "dynamic-index-{now()|yyyy-MM-dd}_index") .build()); ChangelogMode changelogMode = ChangelogMode.newBuilder() .addContainedKind(RowKind.DELETE) .addContainedKind(RowKind.INSERT) .build(); assertValidationException( "Dynamic indexing based on system time only works on append only stream.", () -> sink.getChangelogMode(changelogMode)); } }
2,901
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/test/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/test/java/org/apache/flink/connector/opensearch/table/IndexGeneratorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.table; import org.apache.flink.table.api.DataTypes; import org.apache.flink.table.api.TableException; import org.apache.flink.table.data.GenericRowData; import org.apache.flink.table.data.RowData; import org.apache.flink.table.data.StringData; import org.apache.flink.table.data.TimestampData; import org.apache.flink.table.types.DataType; import org.junit.jupiter.api.Test; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.format.DateTimeFormatter; import java.time.temporal.UnsupportedTemporalTypeException; import java.util.Arrays; import java.util.List; import static org.assertj.core.api.Assertions.assertThat; /** Suite tests for {@link IndexGenerator}. */ class IndexGeneratorTest { private static final List<String> fieldNames = Arrays.asList( "id", "item", "log_ts", "log_date", "order_timestamp", "log_time", "local_datetime", "local_date", "local_time", "note", "status"); private static final List<DataType> dataTypes = Arrays.asList( DataTypes.INT(), DataTypes.STRING(), DataTypes.BIGINT(), DataTypes.DATE().bridgedTo(Date.class), DataTypes.TIMESTAMP().bridgedTo(Timestamp.class), DataTypes.TIME().bridgedTo(Time.class), DataTypes.TIMESTAMP().bridgedTo(LocalDateTime.class), DataTypes.DATE().bridgedTo(LocalDate.class), DataTypes.TIME().bridgedTo(LocalTime.class), DataTypes.STRING(), DataTypes.BOOLEAN()); private static final List<RowData> rows = Arrays.asList( GenericRowData.of( 1, StringData.fromString("apple"), Timestamp.valueOf("2020-03-18 12:12:14").getTime(), (int) Date.valueOf("2020-03-18").toLocalDate().toEpochDay(), TimestampData.fromTimestamp(Timestamp.valueOf("2020-03-18 12:12:14")), (int) (Time.valueOf("12:12:14").toLocalTime().toNanoOfDay() / 1_000_000L), TimestampData.fromLocalDateTime( LocalDateTime.of(2020, 3, 18, 12, 12, 14, 1000)), (int) LocalDate.of(2020, 3, 18).toEpochDay(), (int) (LocalTime.of(12, 13, 14, 2000).toNanoOfDay() / 1_000_000L), "test1", true), GenericRowData.of( 2, StringData.fromString("peanut"), Timestamp.valueOf("2020-03-19 12:22:14").getTime(), (int) Date.valueOf("2020-03-19").toLocalDate().toEpochDay(), TimestampData.fromTimestamp(Timestamp.valueOf("2020-03-19 12:22:21")), (int) (Time.valueOf("12:22:21").toLocalTime().toNanoOfDay() / 1_000_000L), TimestampData.fromLocalDateTime( LocalDateTime.of(2020, 3, 19, 12, 22, 14, 1000)), (int) LocalDate.of(2020, 3, 19).toEpochDay(), (int) (LocalTime.of(12, 13, 14, 2000).toNanoOfDay() / 1_000_000L), "test2", false)); @Test public void testDynamicIndexFromTimestamp() { IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator( "{order_timestamp|yyyy_MM_dd_HH-ss}_index", fieldNames, dataTypes); indexGenerator.open(); assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("2020_03_18_12-14_index"); IndexGenerator indexGenerator1 = IndexGeneratorFactory.createIndexGenerator( "{order_timestamp|yyyy_MM_dd_HH_mm}_index", fieldNames, dataTypes); indexGenerator1.open(); assertThat(indexGenerator1.generate(rows.get(1))).isEqualTo("2020_03_19_12_22_index"); } @Test public void testDynamicIndexFromLocalDateTime() { IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator( "{local_datetime|yyyy_MM_dd_HH-ss}_index", fieldNames, dataTypes); indexGenerator.open(); assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("2020_03_18_12-14_index"); IndexGenerator indexGenerator1 = IndexGeneratorFactory.createIndexGenerator( "{local_datetime|yyyy_MM_dd_HH_mm}_index", fieldNames, dataTypes); indexGenerator1.open(); assertThat(indexGenerator1.generate(rows.get(1))).isEqualTo("2020_03_19_12_22_index"); } @Test public void testDynamicIndexFromDate() { IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator( "my-index-{log_date|yyyy/MM/dd}", fieldNames, dataTypes); indexGenerator.open(); assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-2020/03/18"); assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-2020/03/19"); } @Test public void testDynamicIndexFromLocalDate() { IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator( "my-index-{local_date|yyyy/MM/dd}", fieldNames, dataTypes); indexGenerator.open(); assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-2020/03/18"); assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-2020/03/19"); } @Test public void testDynamicIndexFromTime() { IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator( "my-index-{log_time|HH-mm}", fieldNames, dataTypes); indexGenerator.open(); assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-12-12"); assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-12-22"); } @Test public void testDynamicIndexFromLocalTime() { IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator( "my-index-{local_time|HH-mm}", fieldNames, dataTypes); indexGenerator.open(); assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-12-13"); assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-12-13"); } @Test public void testDynamicIndexDefaultFormat() { IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator( "my-index-{local_time|}", fieldNames, dataTypes); indexGenerator.open(); assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-12_13_14"); assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-12_13_14"); } @Test public void testGeneralDynamicIndex() { IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator("index_{item}", fieldNames, dataTypes); indexGenerator.open(); assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("index_apple"); assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("index_peanut"); } @Test public void testStaticIndex() { IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator("my-index", fieldNames, dataTypes); indexGenerator.open(); assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index"); assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index"); } @Test public void testUnknownField() { String expectedExceptionMsg = "Unknown field 'unknown_ts' in index pattern 'my-index-{unknown_ts|yyyy-MM-dd}'," + " please check the field name."; try { IndexGeneratorFactory.createIndexGenerator( "my-index-{unknown_ts|yyyy-MM-dd}", fieldNames, dataTypes); } catch (TableException e) { assertThat(e.getMessage()).isEqualTo(expectedExceptionMsg); } } @Test public void testUnsupportedTimeType() { String expectedExceptionMsg = "Unsupported type 'INT' found in Opensearch dynamic index field, " + "time-related pattern only support types are: DATE,TIME,TIMESTAMP."; try { IndexGeneratorFactory.createIndexGenerator( "my-index-{id|yyyy-MM-dd}", fieldNames, dataTypes); } catch (TableException e) { assertThat(e.getMessage()).isEqualTo(expectedExceptionMsg); } } @Test public void testUnsupportedMultiParametersType() { String expectedExceptionMsg = "Chaining dynamic index pattern my-index-{local_date}-{local_time} is not supported," + " only support single dynamic index pattern."; try { IndexGeneratorFactory.createIndexGenerator( "my-index-{local_date}-{local_time}", fieldNames, dataTypes); } catch (TableException e) { assertThat(e.getMessage()).isEqualTo(expectedExceptionMsg); } } @Test public void testDynamicIndexUnsupportedFormat() { String expectedExceptionMsg = "Unsupported field: HourOfDay"; try { IndexGeneratorFactory.createIndexGenerator( "my-index-{local_date|yyyy/MM/dd HH:mm}", fieldNames, dataTypes); } catch (UnsupportedTemporalTypeException e) { assertThat(e.getMessage()).isEqualTo(expectedExceptionMsg); } } @Test public void testUnsupportedIndexFieldType() { String expectedExceptionMsg = "Unsupported type BOOLEAN of index field, Supported types are:" + " [DATE, TIME_WITHOUT_TIME_ZONE, TIMESTAMP_WITHOUT_TIME_ZONE, TIMESTAMP_WITH_TIME_ZONE," + " TIMESTAMP_WITH_LOCAL_TIME_ZONE, VARCHAR, CHAR, TINYINT, INTEGER, BIGINT]"; try { IndexGeneratorFactory.createIndexGenerator("index_{status}", fieldNames, dataTypes); } catch (IllegalArgumentException e) { assertThat(e.getMessage()).isEqualTo(expectedExceptionMsg); } } @Test public void testDynamicIndexFromSystemTime() { List<String> supportedUseCases = Arrays.asList( "now()", "NOW()", "now( )", "NOW(\t)", "\t NOW( ) \t", "current_timestamp", "CURRENT_TIMESTAMP", "\tcurrent_timestamp\t", " current_timestamp "); supportedUseCases.stream() .forEach( f -> { DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy_MM_dd"); IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator( String.format("my-index-{%s|yyyy_MM_dd}", f), fieldNames, dataTypes); indexGenerator.open(); // The date may change during the running of the unit test. // Generate expected index-name based on the current time // before and after calling the generate method. String expectedIndex1 = "my-index-" + LocalDateTime.now().format(dateTimeFormatter); String actualIndex = indexGenerator.generate(rows.get(1)); String expectedIndex2 = "my-index-" + LocalDateTime.now().format(dateTimeFormatter); assertThat( actualIndex.equals(expectedIndex1) || actualIndex.equals(expectedIndex2)) .isTrue(); }); List<String> invalidUseCases = Arrays.asList( "now", "now(", "NOW", "NOW)", "current_timestamp()", "CURRENT_TIMESTAMP()", "CURRENT_timestamp"); invalidUseCases.stream() .forEach( f -> { String expectedExceptionMsg = String.format( "Unknown field '%s' in index pattern 'my-index-{%s|yyyy_MM_dd}'," + " please check the field name.", f, f); try { IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator( String.format("my-index-{%s|yyyy_MM_dd}", f), fieldNames, dataTypes); indexGenerator.open(); } catch (TableException e) { assertThat(e.getMessage()).isEqualTo(expectedExceptionMsg); } }); } }
2,902
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/streaming/connectors
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/streaming/connectors/opensearch/OpensearchSink.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.streaming.connectors.opensearch; import org.apache.flink.annotation.PublicEvolving; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.configuration.Configuration; import org.apache.flink.runtime.state.FunctionInitializationContext; import org.apache.flink.runtime.state.FunctionSnapshotContext; import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction; import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; import org.apache.flink.streaming.connectors.opensearch.util.NoOpFailureHandler; import org.apache.flink.util.InstantiationUtil; import org.apache.flink.util.Preconditions; import org.apache.http.HttpHost; import org.opensearch.action.ActionRequest; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.action.bulk.BulkItemResponse; import org.opensearch.action.bulk.BulkProcessor; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkResponse; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientBuilder; import org.opensearch.client.RestHighLevelClient; import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.rest.RestStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.annotation.Nullable; import java.io.IOException; import java.io.Serializable; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import static org.apache.flink.util.Preconditions.checkArgument; import static org.apache.flink.util.Preconditions.checkNotNull; /** * Base class for all Flink Opensearch Sinks. * * <p>This class implements the common behaviour across Opensearch versions, such as the use of an * internal {@link BulkProcessor} to buffer multiple {@link ActionRequest}s before sending the * requests to the cluster, as well as passing input records to the user provided {@link * OpensearchSinkFunction} for processing. * * @param <T> Type of the elements handled by this sink * @deprecated This sink has been deprecated in favor of {@link * org.apache.flink.connector.opensearch.sink.OpensearchSink} */ @Deprecated @PublicEvolving public class OpensearchSink<T> extends RichSinkFunction<T> implements CheckpointedFunction { private static final long serialVersionUID = -1007596293618451942L; private static final Logger LOG = LoggerFactory.getLogger(OpensearchSink.class); // ------------------------------------------------------------------------ // Internal bulk processor configuration // ------------------------------------------------------------------------ public static final String CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS = "bulk.flush.max.actions"; public static final String CONFIG_KEY_BULK_FLUSH_MAX_SIZE_MB = "bulk.flush.max.size.mb"; public static final String CONFIG_KEY_BULK_FLUSH_INTERVAL_MS = "bulk.flush.interval.ms"; public static final String CONFIG_KEY_BULK_FLUSH_BACKOFF_ENABLE = "bulk.flush.backoff.enable"; public static final String CONFIG_KEY_BULK_FLUSH_BACKOFF_TYPE = "bulk.flush.backoff.type"; public static final String CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES = "bulk.flush.backoff.retries"; public static final String CONFIG_KEY_BULK_FLUSH_BACKOFF_DELAY = "bulk.flush.backoff.delay"; /** Used to control whether the retry delay should increase exponentially or remain constant. */ @PublicEvolving public enum FlushBackoffType { CONSTANT, EXPONENTIAL } /** * Provides a backoff policy for bulk requests. Whenever a bulk request is rejected due to * resource constraints (i.e. the client's internal thread pool is full), the backoff policy * decides how long the bulk processor will wait before the operation is retried internally. * * <p>This is a proxy for version specific backoff policies. */ public static class BulkFlushBackoffPolicy implements Serializable { private static final long serialVersionUID = -6022851996101826049L; // the default values follow the Opensearch default settings for BulkProcessor private FlushBackoffType backoffType = FlushBackoffType.EXPONENTIAL; private int maxRetryCount = 8; private long delayMillis = 50; public FlushBackoffType getBackoffType() { return backoffType; } public int getMaxRetryCount() { return maxRetryCount; } public long getDelayMillis() { return delayMillis; } public void setBackoffType(FlushBackoffType backoffType) { this.backoffType = checkNotNull(backoffType); } public void setMaxRetryCount(int maxRetryCount) { checkArgument(maxRetryCount >= 0); this.maxRetryCount = maxRetryCount; } public void setDelayMillis(long delayMillis) { checkArgument(delayMillis >= 0); this.delayMillis = delayMillis; } } private final Integer bulkProcessorFlushMaxActions; private final Integer bulkProcessorFlushMaxSizeMb; private final Long bulkProcessorFlushIntervalMillis; private final BulkFlushBackoffPolicy bulkProcessorFlushBackoffPolicy; // ------------------------------------------------------------------------ // User-facing API and configuration // ------------------------------------------------------------------------ /** * The config map that contains configuration for the bulk flushing behaviours. * * <p>For {@link org.opensearch.client.transport.TransportClient} based implementations, this * config map would also contain Opensearch-shipped configuration, and therefore this config map * would also be forwarded when creating the Opensearch client. */ private final Map<String, String> userConfig; /** * The function that is used to construct multiple {@link ActionRequest ActionRequests} from * each incoming element. */ private final OpensearchSinkFunction<T> opensearchSinkFunction; /** User-provided handler for failed {@link ActionRequest ActionRequests}. */ private final ActionRequestFailureHandler failureHandler; /** * If true, the producer will wait until all outstanding action requests have been sent to * Opensearch. */ private boolean flushOnCheckpoint = true; /** * Provided to the user via the {@link OpensearchSinkFunction} to add {@link ActionRequest * ActionRequests}. */ private transient RequestIndexer requestIndexer; /** * Provided to the {@link ActionRequestFailureHandler} to allow users to re-index failed * requests. */ private transient BufferingNoOpRequestIndexer failureRequestIndexer; // ------------------------------------------------------------------------ // Internals for the Flink Opensearch Sink // ------------------------------------------------------------------------ /** Opensearch client instance. */ private transient RestHighLevelClient client; /** * Number of pending action requests not yet acknowledged by Opensearch. This value is * maintained only if {@link OpensearchSink#flushOnCheckpoint} is {@code true}. * * <p>This is incremented whenever the user adds (or re-adds through the {@link * ActionRequestFailureHandler}) requests to the {@link RequestIndexer}. It is decremented for * each completed request of a bulk request, in {@link BulkProcessor.Listener#afterBulk(long, * BulkRequest, BulkResponse)} and {@link BulkProcessor.Listener#afterBulk(long, BulkRequest, * Throwable)}. */ private AtomicLong numPendingRequests = new AtomicLong(0); /** User-provided HTTP Host. */ private final List<HttpHost> httpHosts; /** The factory to configure the rest client. */ private final RestClientFactory restClientFactory; /** Bulk processor to buffer and send requests to Opensearch, created using the client. */ private transient BulkProcessor bulkProcessor; /** * This is set from inside the {@link BulkProcessor.Listener} if a {@link Throwable} was thrown * in callbacks and the user considered it should fail the sink via the {@link * ActionRequestFailureHandler#onFailure(ActionRequest, Throwable, int, RequestIndexer)} method. * * <p>Errors will be checked and rethrown before processing each input element, and when the * sink is closed. */ private final AtomicReference<Throwable> failureThrowable = new AtomicReference<>(); private OpensearchSink( Map<String, String> userConfig, List<HttpHost> httpHosts, OpensearchSinkFunction<T> opensearchSinkFunction, ActionRequestFailureHandler failureHandler, RestClientFactory restClientFactory) { checkArgument(httpHosts != null && !httpHosts.isEmpty()); this.httpHosts = httpHosts; this.restClientFactory = checkNotNull(restClientFactory); this.opensearchSinkFunction = checkNotNull(opensearchSinkFunction); this.failureHandler = checkNotNull(failureHandler); // we eagerly check if the user-provided sink function and failure handler is serializable; // otherwise, if they aren't serializable, users will merely get a non-informative error // message // "OpensearchSink is not serializable" checkArgument( InstantiationUtil.isSerializable(opensearchSinkFunction), "The implementation of the provided OpensearchSinkFunction is not serializable. " + "The object probably contains or references non-serializable fields."); checkArgument( InstantiationUtil.isSerializable(failureHandler), "The implementation of the provided ActionRequestFailureHandler is not serializable. " + "The object probably contains or references non-serializable fields."); // extract and remove bulk processor related configuration from the user-provided config, // so that the resulting user config only contains configuration related to the // Opensearch client. checkNotNull(userConfig); // copy config so we can remove entries without side-effects userConfig = new HashMap<>(userConfig); ParameterTool params = ParameterTool.fromMap(userConfig); if (params.has(CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS)) { bulkProcessorFlushMaxActions = params.getInt(CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS); userConfig.remove(CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS); } else { bulkProcessorFlushMaxActions = null; } if (params.has(CONFIG_KEY_BULK_FLUSH_MAX_SIZE_MB)) { bulkProcessorFlushMaxSizeMb = params.getInt(CONFIG_KEY_BULK_FLUSH_MAX_SIZE_MB); userConfig.remove(CONFIG_KEY_BULK_FLUSH_MAX_SIZE_MB); } else { bulkProcessorFlushMaxSizeMb = null; } if (params.has(CONFIG_KEY_BULK_FLUSH_INTERVAL_MS)) { bulkProcessorFlushIntervalMillis = params.getLong(CONFIG_KEY_BULK_FLUSH_INTERVAL_MS); userConfig.remove(CONFIG_KEY_BULK_FLUSH_INTERVAL_MS); } else { bulkProcessorFlushIntervalMillis = null; } boolean bulkProcessorFlushBackoffEnable = params.getBoolean(CONFIG_KEY_BULK_FLUSH_BACKOFF_ENABLE, true); userConfig.remove(CONFIG_KEY_BULK_FLUSH_BACKOFF_ENABLE); if (bulkProcessorFlushBackoffEnable) { this.bulkProcessorFlushBackoffPolicy = new BulkFlushBackoffPolicy(); if (params.has(CONFIG_KEY_BULK_FLUSH_BACKOFF_TYPE)) { bulkProcessorFlushBackoffPolicy.setBackoffType( FlushBackoffType.valueOf(params.get(CONFIG_KEY_BULK_FLUSH_BACKOFF_TYPE))); userConfig.remove(CONFIG_KEY_BULK_FLUSH_BACKOFF_TYPE); } if (params.has(CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES)) { bulkProcessorFlushBackoffPolicy.setMaxRetryCount( params.getInt(CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES)); userConfig.remove(CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES); } if (params.has(CONFIG_KEY_BULK_FLUSH_BACKOFF_DELAY)) { bulkProcessorFlushBackoffPolicy.setDelayMillis( params.getLong(CONFIG_KEY_BULK_FLUSH_BACKOFF_DELAY)); userConfig.remove(CONFIG_KEY_BULK_FLUSH_BACKOFF_DELAY); } } else { bulkProcessorFlushBackoffPolicy = null; } this.userConfig = userConfig; } /** * Disable flushing on checkpoint. When disabled, the sink will not wait for all pending action * requests to be acknowledged by Opensearch on checkpoints. * * <p>NOTE: If flushing on checkpoint is disabled, the Flink Opensearch Sink does NOT provide * any strong guarantees for at-least-once delivery of action requests. */ public void disableFlushOnCheckpoint() { this.flushOnCheckpoint = false; } @Override public void open(Configuration parameters) throws Exception { client = createClient(); bulkProcessor = buildBulkProcessor(new BulkProcessorListener()); requestIndexer = new OpensearchBulkProcessorIndexer( bulkProcessor, flushOnCheckpoint, numPendingRequests); failureRequestIndexer = new BufferingNoOpRequestIndexer(); opensearchSinkFunction.open(); } @Override public void invoke(T value, Context context) throws Exception { checkAsyncErrorsAndRequests(); opensearchSinkFunction.process(value, getRuntimeContext(), requestIndexer); } @Override public void initializeState(FunctionInitializationContext context) throws Exception { // no initialization needed } @Override public void snapshotState(FunctionSnapshotContext context) throws Exception { checkAsyncErrorsAndRequests(); if (flushOnCheckpoint) { while (numPendingRequests.get() != 0) { bulkProcessor.flush(); checkAsyncErrorsAndRequests(); } } } @Override public void close() throws Exception { opensearchSinkFunction.close(); if (bulkProcessor != null) { bulkProcessor.close(); bulkProcessor = null; } if (client != null) { client.close(); client = null; } // make sure any errors from callbacks are rethrown checkErrorAndRethrow(); } /** Build the {@link BulkProcessor}. */ private BulkProcessor buildBulkProcessor(BulkProcessor.Listener listener) { checkNotNull(listener); BulkProcessor.Builder bulkProcessorBuilder = BulkProcessor.builder( (request, bulkListener) -> client.bulkAsync(request, RequestOptions.DEFAULT, bulkListener), listener); // This makes flush() blocking bulkProcessorBuilder.setConcurrentRequests(0); if (bulkProcessorFlushMaxActions != null) { bulkProcessorBuilder.setBulkActions(bulkProcessorFlushMaxActions); } if (bulkProcessorFlushMaxSizeMb != null) { configureBulkSize(bulkProcessorBuilder); } if (bulkProcessorFlushIntervalMillis != null) { configureFlushInterval(bulkProcessorBuilder); } // if backoff retrying is disabled, bulkProcessorFlushBackoffPolicy will be null configureBulkProcessorBackoff(bulkProcessorBuilder, bulkProcessorFlushBackoffPolicy); return bulkProcessorBuilder.build(); } /** * Creates an Opensearch client implementing {@link AutoCloseable}. * * @param clientConfig The configuration to use when constructing the client. * @return The created client. * @throws IOException IOException */ private RestHighLevelClient createClient() throws IOException { RestClientBuilder builder = RestClient.builder(httpHosts.toArray(new HttpHost[httpHosts.size()])); restClientFactory.configureRestClientBuilder(builder); RestHighLevelClient rhlClient = new RestHighLevelClient(builder); verifyClientConnection(rhlClient); return rhlClient; } /** * Verify the client connection by making a test request/ping to the Opensearch cluster. * * <p>Called by {@link OpensearchSink#open(org.apache.flink.configuration.Configuration)} after * creating the client. This makes sure the underlying client is closed if the connection is not * successful and preventing thread leak. * * @param client the Opensearch client. */ private void verifyClientConnection(RestHighLevelClient client) throws IOException { if (LOG.isInfoEnabled()) { LOG.info("Pinging Opensearch cluster via hosts {} ...", httpHosts); } if (!client.ping(RequestOptions.DEFAULT)) { throw new RuntimeException("There are no reachable Opensearch nodes!"); } if (LOG.isInfoEnabled()) { LOG.info("Opensearch RestHighLevelClient is connected to {}", httpHosts.toString()); } } /** * Set backoff-related configurations on the provided {@link BulkProcessor.Builder}. The builder * will be later on used to instantiate the actual {@link BulkProcessor}. * * @param builder the {@link BulkProcessor.Builder} to configure. * @param flushBackoffPolicy user-provided backoff retry settings ({@code null} if the user * disabled backoff retries). */ private static void configureBulkProcessorBackoff( BulkProcessor.Builder builder, @Nullable OpensearchSink.BulkFlushBackoffPolicy flushBackoffPolicy) { BackoffPolicy backoffPolicy; if (flushBackoffPolicy != null) { switch (flushBackoffPolicy.getBackoffType()) { case CONSTANT: backoffPolicy = BackoffPolicy.constantBackoff( new TimeValue(flushBackoffPolicy.getDelayMillis()), flushBackoffPolicy.getMaxRetryCount()); break; case EXPONENTIAL: default: backoffPolicy = BackoffPolicy.exponentialBackoff( new TimeValue(flushBackoffPolicy.getDelayMillis()), flushBackoffPolicy.getMaxRetryCount()); } } else { backoffPolicy = BackoffPolicy.noBackoff(); } builder.setBackoffPolicy(backoffPolicy); } private void configureBulkSize(BulkProcessor.Builder bulkProcessorBuilder) { final ByteSizeUnit sizeUnit; if (bulkProcessorFlushMaxSizeMb == -1) { // bulk size can be disabled with -1, however the ByteSizeValue constructor accepts -1 // only with BYTES as the size unit sizeUnit = ByteSizeUnit.BYTES; } else { sizeUnit = ByteSizeUnit.MB; } bulkProcessorBuilder.setBulkSize(new ByteSizeValue(bulkProcessorFlushMaxSizeMb, sizeUnit)); } private void configureFlushInterval(BulkProcessor.Builder bulkProcessorBuilder) { if (bulkProcessorFlushIntervalMillis == -1) { bulkProcessorBuilder.setFlushInterval(null); } else { bulkProcessorBuilder.setFlushInterval( TimeValue.timeValueMillis(bulkProcessorFlushIntervalMillis)); } } private void checkErrorAndRethrow() { Throwable cause = failureThrowable.get(); if (cause != null) { throw new RuntimeException("An error occurred in OpensearchSink.", cause); } } private void checkAsyncErrorsAndRequests() { checkErrorAndRethrow(); failureRequestIndexer.processBufferedRequests(requestIndexer); } private class BulkProcessorListener implements BulkProcessor.Listener { @Override public void beforeBulk(long executionId, BulkRequest request) {} @Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { if (response.hasFailures()) { BulkItemResponse itemResponse; Throwable failure; RestStatus restStatus; DocWriteRequest actionRequest; try { for (int i = 0; i < response.getItems().length; i++) { itemResponse = response.getItems()[i]; failure = extractFailureCauseFromBulkItemResponse(itemResponse); if (failure != null) { restStatus = itemResponse.getFailure().getStatus(); actionRequest = request.requests().get(i); if (restStatus == null) { if (actionRequest instanceof ActionRequest) { failureHandler.onFailure( (ActionRequest) actionRequest, failure, -1, failureRequestIndexer); } else { throw new UnsupportedOperationException( "The sink currently only supports ActionRequests"); } } else { if (actionRequest instanceof ActionRequest) { failureHandler.onFailure( (ActionRequest) actionRequest, failure, restStatus.getStatus(), failureRequestIndexer); } else { throw new UnsupportedOperationException( "The sink currently only supports ActionRequests"); } } } } } catch (Throwable t) { // fail the sink and skip the rest of the items // if the failure handler decides to throw an exception failureThrowable.compareAndSet(null, t); } } if (flushOnCheckpoint) { numPendingRequests.getAndAdd(-request.numberOfActions()); } } @Override public void afterBulk(long executionId, BulkRequest request, Throwable failure) { try { for (DocWriteRequest writeRequest : request.requests()) { if (writeRequest instanceof ActionRequest) { failureHandler.onFailure( (ActionRequest) writeRequest, failure, -1, failureRequestIndexer); } else { throw new UnsupportedOperationException( "The sink currently only supports ActionRequests"); } } } catch (Throwable t) { // fail the sink and skip the rest of the items // if the failure handler decides to throw an exception failureThrowable.compareAndSet(null, t); } if (flushOnCheckpoint) { numPendingRequests.getAndAdd(-request.numberOfActions()); } } } /** * Extracts the cause of failure of a bulk item action. * * @param bulkItemResponse the bulk item response to extract cause of failure * @return the extracted {@link Throwable} from the response ({@code null} is the response is * successful). */ private static Throwable extractFailureCauseFromBulkItemResponse( BulkItemResponse bulkItemResponse) { if (!bulkItemResponse.isFailed()) { return null; } else { return bulkItemResponse.getFailure().getCause(); } } long getNumPendingRequests() { if (flushOnCheckpoint) { return numPendingRequests.get(); } else { throw new UnsupportedOperationException( "The number of pending requests is not maintained when flushing on checkpoint is disabled."); } } /** * A builder for creating an {@link OpensearchSink}. * * @param <T> Type of the elements handled by the sink this builder creates. * @deprecated This has been deprecated, please use {@link * org.apache.flink.connector.opensearch.sink.OpensearchSinkBuilder}. */ @Deprecated @PublicEvolving public static class Builder<T> { private final List<HttpHost> httpHosts; private final OpensearchSinkFunction<T> opensearchSinkFunction; private Map<String, String> bulkRequestsConfig = new HashMap<>(); private ActionRequestFailureHandler failureHandler = new NoOpFailureHandler(); private RestClientFactory restClientFactory = restClientBuilder -> {}; /** * Creates a new {@code OpensearchSink} that connects to the cluster using a {@link * RestHighLevelClient}. * * @param httpHosts The list of {@link HttpHost} to which the {@link RestHighLevelClient} * connects to. * @param opensearchSinkFunction This is used to generate multiple {@link ActionRequest} * from the incoming element. */ public Builder(List<HttpHost> httpHosts, OpensearchSinkFunction<T> opensearchSinkFunction) { this.httpHosts = Preconditions.checkNotNull(httpHosts); this.opensearchSinkFunction = Preconditions.checkNotNull(opensearchSinkFunction); } /** * Sets the maximum number of actions to buffer for each bulk request. You can pass -1 to * disable it. * * @param numMaxActions the maximum number of actions to buffer per bulk request. */ public void setBulkFlushMaxActions(int numMaxActions) { Preconditions.checkArgument( numMaxActions == -1 || numMaxActions > 0, "Max number of buffered actions must be larger than 0."); this.bulkRequestsConfig.put( CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, String.valueOf(numMaxActions)); } /** * Sets the maximum size of buffered actions, in mb, per bulk request. You can pass -1 to * disable it. * * @param maxSizeMb the maximum size of buffered actions, in mb. */ public void setBulkFlushMaxSizeMb(int maxSizeMb) { Preconditions.checkArgument( maxSizeMb == -1 || maxSizeMb > 0, "Max size of buffered actions must be larger than 0."); this.bulkRequestsConfig.put( CONFIG_KEY_BULK_FLUSH_MAX_SIZE_MB, String.valueOf(maxSizeMb)); } /** * Sets the bulk flush interval, in milliseconds. You can pass -1 to disable it. * * @param intervalMillis the bulk flush interval, in milliseconds. */ public void setBulkFlushInterval(long intervalMillis) { Preconditions.checkArgument( intervalMillis == -1 || intervalMillis >= 0, "Interval (in milliseconds) between each flush must be larger than or equal to 0."); this.bulkRequestsConfig.put( CONFIG_KEY_BULK_FLUSH_INTERVAL_MS, String.valueOf(intervalMillis)); } /** * Sets whether or not to enable bulk flush backoff behaviour. * * @param enabled whether or not to enable backoffs. */ public void setBulkFlushBackoff(boolean enabled) { this.bulkRequestsConfig.put( CONFIG_KEY_BULK_FLUSH_BACKOFF_ENABLE, String.valueOf(enabled)); } /** * Sets the type of back of to use when flushing bulk requests. * * @param flushBackoffType the backoff type to use. */ public void setBulkFlushBackoffType(FlushBackoffType flushBackoffType) { this.bulkRequestsConfig.put( CONFIG_KEY_BULK_FLUSH_BACKOFF_TYPE, Preconditions.checkNotNull(flushBackoffType).toString()); } /** * Sets the maximum number of retries for a backoff attempt when flushing bulk requests. * * @param maxRetries the maximum number of retries for a backoff attempt when flushing bulk * requests */ public void setBulkFlushBackoffRetries(int maxRetries) { Preconditions.checkArgument( maxRetries > 0, "Max number of backoff attempts must be larger than 0."); this.bulkRequestsConfig.put( CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES, String.valueOf(maxRetries)); } /** * Sets the amount of delay between each backoff attempt when flushing bulk requests, in * milliseconds. * * @param delayMillis the amount of delay between each backoff attempt when flushing bulk * requests, in milliseconds. */ public void setBulkFlushBackoffDelay(long delayMillis) { Preconditions.checkArgument( delayMillis >= 0, "Delay (in milliseconds) between each backoff attempt must be larger than or equal to 0."); this.bulkRequestsConfig.put( CONFIG_KEY_BULK_FLUSH_BACKOFF_DELAY, String.valueOf(delayMillis)); } /** * Sets a failure handler for action requests. * * @param failureHandler This is used to handle failed {@link ActionRequest}. */ public void setFailureHandler(ActionRequestFailureHandler failureHandler) { this.failureHandler = Preconditions.checkNotNull(failureHandler); } /** * Sets a REST client factory for custom client configuration. * * @param restClientFactory the factory that configures the rest client. */ public void setRestClientFactory(RestClientFactory restClientFactory) { this.restClientFactory = Preconditions.checkNotNull(restClientFactory); } /** * Creates the Opensearch sink. * * @return the created Opensearch sink. */ public OpensearchSink<T> build() { return new OpensearchSink<>( bulkRequestsConfig, httpHosts, opensearchSinkFunction, failureHandler, restClientFactory); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } Builder<?> builder = (Builder<?>) o; return Objects.equals(httpHosts, builder.httpHosts) && Objects.equals(opensearchSinkFunction, builder.opensearchSinkFunction) && Objects.equals(bulkRequestsConfig, builder.bulkRequestsConfig) && Objects.equals(failureHandler, builder.failureHandler) && Objects.equals(restClientFactory, builder.restClientFactory); } @Override public int hashCode() { return Objects.hash( httpHosts, opensearchSinkFunction, bulkRequestsConfig, failureHandler, restClientFactory); } } }
2,903
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/streaming/connectors
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/streaming/connectors/opensearch/RequestIndexer.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.streaming.connectors.opensearch; import org.apache.flink.annotation.PublicEvolving; import org.opensearch.action.ActionRequest; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.update.UpdateRequest; /** * Users add multiple delete, index or update requests to a {@link RequestIndexer} to prepare them * for sending to an Opensearch cluster. * * @deprecated This has been deprecated and will be removed in the future. */ @Deprecated @PublicEvolving public interface RequestIndexer { /** * Add multiple {@link ActionRequest} to the indexer to prepare for sending requests to * Opensearch. * * @param actionRequests The multiple {@link ActionRequest} to add. * @deprecated use the {@link DeleteRequest}, {@link IndexRequest} or {@link UpdateRequest} */ @Deprecated default void add(ActionRequest... actionRequests) { for (ActionRequest actionRequest : actionRequests) { if (actionRequest instanceof IndexRequest) { add((IndexRequest) actionRequest); } else if (actionRequest instanceof DeleteRequest) { add((DeleteRequest) actionRequest); } else if (actionRequest instanceof UpdateRequest) { add((UpdateRequest) actionRequest); } else { throw new IllegalArgumentException( "RequestIndexer only supports Index, Delete and Update requests"); } } } /** * Add multiple {@link DeleteRequest} to the indexer to prepare for sending requests to * Opensearch. * * @param deleteRequests The multiple {@link DeleteRequest} to add. */ void add(DeleteRequest... deleteRequests); /** * Add multiple {@link IndexRequest} to the indexer to prepare for sending requests to * Opensearch. * * @param indexRequests The multiple {@link IndexRequest} to add. */ void add(IndexRequest... indexRequests); /** * Add multiple {@link UpdateRequest} to the indexer to prepare for sending requests to * Opensearch. * * @param updateRequests The multiple {@link UpdateRequest} to add. */ void add(UpdateRequest... updateRequests); }
2,904
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/streaming/connectors
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/streaming/connectors/opensearch/OpensearchBulkProcessorIndexer.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.streaming.connectors.opensearch; import org.apache.flink.annotation.Internal; import org.opensearch.action.ActionRequest; import org.opensearch.action.bulk.BulkProcessor; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.update.UpdateRequest; import java.util.concurrent.atomic.AtomicLong; import static org.apache.flink.util.Preconditions.checkNotNull; /** * Implementation of a {@link RequestIndexer}, using a {@link BulkProcessor}. {@link ActionRequest * ActionRequests} will be buffered before sending a bulk request to the Opensearch cluster. * * <p>Note: This class is binary compatible to Opensearch 1.x. */ @Internal class OpensearchBulkProcessorIndexer implements RequestIndexer { private final BulkProcessor bulkProcessor; private final boolean flushOnCheckpoint; private final AtomicLong numPendingRequestsRef; OpensearchBulkProcessorIndexer( BulkProcessor bulkProcessor, boolean flushOnCheckpoint, AtomicLong numPendingRequestsRef) { this.bulkProcessor = checkNotNull(bulkProcessor); this.flushOnCheckpoint = flushOnCheckpoint; this.numPendingRequestsRef = checkNotNull(numPendingRequestsRef); } @Override public void add(DeleteRequest... deleteRequests) { for (DeleteRequest deleteRequest : deleteRequests) { if (flushOnCheckpoint) { numPendingRequestsRef.getAndIncrement(); } this.bulkProcessor.add(deleteRequest); } } @Override public void add(IndexRequest... indexRequests) { for (IndexRequest indexRequest : indexRequests) { if (flushOnCheckpoint) { numPendingRequestsRef.getAndIncrement(); } this.bulkProcessor.add(indexRequest); } } @Override public void add(UpdateRequest... updateRequests) { for (UpdateRequest updateRequest : updateRequests) { if (flushOnCheckpoint) { numPendingRequestsRef.getAndIncrement(); } this.bulkProcessor.add(updateRequest); } } }
2,905
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/streaming/connectors
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/streaming/connectors/opensearch/BufferingNoOpRequestIndexer.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.streaming.connectors.opensearch; import org.apache.flink.annotation.Internal; import org.opensearch.action.ActionRequest; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.update.UpdateRequest; import javax.annotation.concurrent.NotThreadSafe; import java.util.Collections; import java.util.concurrent.ConcurrentLinkedQueue; /** * Implementation of a {@link RequestIndexer} that buffers {@link ActionRequest ActionRequests} * before re-sending them to the Opensearch cluster upon request. */ @Internal @NotThreadSafe class BufferingNoOpRequestIndexer implements RequestIndexer { private ConcurrentLinkedQueue<ActionRequest> bufferedRequests; BufferingNoOpRequestIndexer() { this.bufferedRequests = new ConcurrentLinkedQueue<ActionRequest>(); } @Override public void add(DeleteRequest... deleteRequests) { Collections.addAll(bufferedRequests, deleteRequests); } @Override public void add(IndexRequest... indexRequests) { Collections.addAll(bufferedRequests, indexRequests); } @Override public void add(UpdateRequest... updateRequests) { Collections.addAll(bufferedRequests, updateRequests); } void processBufferedRequests(RequestIndexer actualIndexer) { for (ActionRequest request : bufferedRequests) { if (request instanceof IndexRequest) { actualIndexer.add((IndexRequest) request); } else if (request instanceof DeleteRequest) { actualIndexer.add((DeleteRequest) request); } else if (request instanceof UpdateRequest) { actualIndexer.add((UpdateRequest) request); } } bufferedRequests.clear(); } }
2,906
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/streaming/connectors
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/streaming/connectors/opensearch/OpensearchSinkFunction.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.streaming.connectors.opensearch; import org.apache.flink.annotation.PublicEvolving; import org.apache.flink.api.common.functions.Function; import org.apache.flink.api.common.functions.RuntimeContext; import org.opensearch.action.ActionRequest; import java.io.Serializable; /** * Creates multiple {@link ActionRequest ActionRequests} from an element in a stream. * * <p>This is used by sinks to prepare elements for sending them to Opensearch. * * <p>Example: * * <pre>{@code * private static class TestOpensearchSinkFunction implements * OpensearchSinkFunction<Tuple2<Integer, String>> { * * public IndexRequest createIndexRequest(Tuple2<Integer, String> element) { * Map<String, Object> json = new HashMap<>(); * json.put("data", element.f1); * * return Requests.indexRequest() * .index("my-index") * .id(element.f0.toString()) * .source(json); * } * * public void process(Tuple2<Integer, String> element, RuntimeContext ctx, RequestIndexer indexer) { * indexer.add(createIndexRequest(element)); * } * } * * }</pre> * * @param <T> The type of the element handled by this {@code OpensearchSinkFunction} * @deprecated This has been deprecated and will be removed in the future. */ @Deprecated @PublicEvolving public interface OpensearchSinkFunction<T> extends Serializable, Function { /** * Initialization method for the function. It is called once before the actual working process * methods. */ default void open() throws Exception {} /** * Initialization method for the function. It is called once before the actual working process * methods. */ default void open(RuntimeContext ctx) throws Exception { open(); } /** Tear-down method for the function. It is called when the sink closes. */ default void close() throws Exception {} /** * Process the incoming element to produce multiple {@link ActionRequest ActionsRequests}. The * produced requests should be added to the provided {@link RequestIndexer}. * * @param element incoming element to process * @param ctx runtime context containing information about the sink instance * @param indexer request indexer that {@code ActionRequest} should be added to */ void process(T element, RuntimeContext ctx, RequestIndexer indexer); }
2,907
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/streaming/connectors
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/streaming/connectors/opensearch/ActionRequestFailureHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.streaming.connectors.opensearch; import org.apache.flink.annotation.PublicEvolving; import org.opensearch.action.ActionRequest; import java.io.Serializable; /** * An implementation of {@link ActionRequestFailureHandler} is provided by the user to define how * failed {@link ActionRequest ActionRequests} should be handled, e.g. dropping them, reprocessing * malformed documents, or simply requesting them to be sent to Opensearch again if the failure is * only temporary. * * <p>Example: * * <pre>{@code * private static class ExampleActionRequestFailureHandler implements ActionRequestFailureHandler { * * @Override * void onFailure(ActionRequest action, Throwable failure, int restStatusCode, RequestIndexer indexer) throws Throwable { * if (ExceptionUtils.findThrowable(failure, OpenSearchRejectedExecutionException.class).isPresent()) { * // full queue; re-add document for indexing * indexer.add(action); * } else if (ExceptionUtils.findThrowable(failure, OpensearchParseException.class).isPresent()) { * // malformed document; simply drop request without failing sink * } else { * // for all other failures, fail the sink; * // here the failure is simply rethrown, but users can also choose to throw custom exceptions * throw failure; * } * } * } * * }</pre> * * <p>The above example will let the sink re-add requests that failed due to queue capacity * saturation and drop requests with malformed documents, without failing the sink. For all other * failures, the sink will fail. * * @deprecated This has been deprecated and will be removed in the future. */ @Deprecated @PublicEvolving public interface ActionRequestFailureHandler extends Serializable { /** * Handle a failed {@link ActionRequest}. * * @param action the {@link ActionRequest} that failed due to the failure * @param failure the cause of failure * @param restStatusCode the REST status code of the failure (-1 if none can be retrieved) * @param indexer request indexer to re-add the failed action, if intended to do so * @throws Throwable if the sink should fail on this failure, the implementation should rethrow * the exception or a custom one */ void onFailure( ActionRequest action, Throwable failure, int restStatusCode, RequestIndexer indexer) throws Throwable; }
2,908
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/streaming/connectors
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/streaming/connectors/opensearch/RestClientFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.streaming.connectors.opensearch; import org.apache.flink.annotation.PublicEvolving; import org.opensearch.client.RestClientBuilder; import java.io.Serializable; /** * A factory that is used to configure the {@link org.opensearch.client.RestHighLevelClient} * internally used in the {@link OpensearchSink}. */ @PublicEvolving public interface RestClientFactory extends Serializable { /** * Configures the rest client builder. * * @param restClientBuilder the configured rest client builder. */ void configureRestClientBuilder(RestClientBuilder restClientBuilder); }
2,909
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/streaming/connectors/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/streaming/connectors/opensearch/util/NoOpFailureHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.streaming.connectors.opensearch.util; import org.apache.flink.annotation.Internal; import org.apache.flink.streaming.connectors.opensearch.ActionRequestFailureHandler; import org.apache.flink.streaming.connectors.opensearch.RequestIndexer; import org.opensearch.action.ActionRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** An {@link ActionRequestFailureHandler} that simply fails the sink on any failures. */ @Internal public class NoOpFailureHandler implements ActionRequestFailureHandler { private static final long serialVersionUID = 737941343410827885L; private static final Logger LOG = LoggerFactory.getLogger(NoOpFailureHandler.class); @Override public void onFailure( ActionRequest action, Throwable failure, int restStatusCode, RequestIndexer indexer) throws Throwable { LOG.error("Failed Opensearch item request: {}", failure.getMessage(), failure); // simply fail the sink throw failure; } @Override public boolean equals(Object o) { return o instanceof NoOpFailureHandler; } @Override public int hashCode() { return NoOpFailureHandler.class.hashCode(); } }
2,910
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/sink/OpensearchSink.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.sink; import org.apache.flink.annotation.PublicEvolving; import org.apache.flink.annotation.VisibleForTesting; import org.apache.flink.api.connector.sink2.Sink; import org.apache.flink.api.connector.sink2.SinkWriter; import org.apache.flink.connector.base.DeliveryGuarantee; import org.apache.http.HttpHost; import java.io.IOException; import java.util.List; import static org.apache.flink.util.Preconditions.checkArgument; import static org.apache.flink.util.Preconditions.checkNotNull; /** * Flink Sink to insert or update data in an Opensearch index. The sink supports the following * delivery guarantees. * * <ul> * <li>{@link DeliveryGuarantee#NONE} does not provide any guarantees: actions are flushed to * Opensearch only depending on the configurations of the bulk processor. In case of a * failure, it might happen that actions are lost if the bulk processor still has buffered * actions. * <li>{@link DeliveryGuarantee#AT_LEAST_ONCE} on a checkpoint the sink will wait until all * buffered actions are flushed to and acknowledged by Opensearch. No actions will be lost but * actions might be sent to Opensearch multiple times when Flink restarts. These additional * requests may cause inconsistent data in Opensearch right after the restart, but eventually * everything will be consistent again. * </ul> * * @param <IN> type of the records converted to Opensearch actions * @see OpensearchSinkBuilder on how to construct a OpensearchSink */ @PublicEvolving public class OpensearchSink<IN> implements Sink<IN> { private final List<HttpHost> hosts; private final OpensearchEmitter<? super IN> emitter; private final BulkProcessorConfig buildBulkProcessorConfig; private final NetworkClientConfig networkClientConfig; private final DeliveryGuarantee deliveryGuarantee; private final RestClientFactory restClientFactory; private final FailureHandler failureHandler; OpensearchSink( List<HttpHost> hosts, OpensearchEmitter<? super IN> emitter, DeliveryGuarantee deliveryGuarantee, BulkProcessorConfig buildBulkProcessorConfig, NetworkClientConfig networkClientConfig, RestClientFactory restClientFactory, FailureHandler failureHandler) { this.hosts = checkNotNull(hosts); checkArgument(!hosts.isEmpty(), "Hosts cannot be empty."); this.emitter = checkNotNull(emitter); this.deliveryGuarantee = checkNotNull(deliveryGuarantee); this.buildBulkProcessorConfig = checkNotNull(buildBulkProcessorConfig); this.networkClientConfig = checkNotNull(networkClientConfig); this.restClientFactory = checkNotNull(restClientFactory); this.failureHandler = checkNotNull(failureHandler); } @Override public SinkWriter<IN> createWriter(InitContext context) throws IOException { return new OpensearchWriter<>( hosts, emitter, deliveryGuarantee == DeliveryGuarantee.AT_LEAST_ONCE, buildBulkProcessorConfig, networkClientConfig, context.metricGroup(), context.getMailboxExecutor(), restClientFactory, failureHandler); } @VisibleForTesting DeliveryGuarantee getDeliveryGuarantee() { return deliveryGuarantee; } }
2,911
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/sink/NetworkClientConfig.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.sink; import javax.annotation.Nullable; import java.io.Serializable; import java.util.Optional; class NetworkClientConfig implements Serializable { @Nullable private final String username; @Nullable private final String password; @Nullable private final String connectionPathPrefix; @Nullable private final Integer connectionRequestTimeout; @Nullable private final Integer connectionTimeout; @Nullable private final Integer socketTimeout; @Nullable private final Boolean allowInsecure; NetworkClientConfig( @Nullable String username, @Nullable String password, @Nullable String connectionPathPrefix, @Nullable Integer connectionRequestTimeout, @Nullable Integer connectionTimeout, @Nullable Integer socketTimeout, @Nullable Boolean allowInsecure) { this.username = username; this.password = password; this.connectionPathPrefix = connectionPathPrefix; this.connectionRequestTimeout = connectionRequestTimeout; this.connectionTimeout = connectionTimeout; this.socketTimeout = socketTimeout; this.allowInsecure = allowInsecure; } @Nullable public String getUsername() { return username; } @Nullable public String getPassword() { return password; } @Nullable public Integer getConnectionRequestTimeout() { return connectionRequestTimeout; } @Nullable public Integer getConnectionTimeout() { return connectionTimeout; } @Nullable public Integer getSocketTimeout() { return socketTimeout; } @Nullable public String getConnectionPathPrefix() { return connectionPathPrefix; } public Optional<Boolean> isAllowInsecure() { return Optional.ofNullable(allowInsecure); } }
2,912
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/sink/DefaultRestClientFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.sink; import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.client.CredentialsProvider; import org.apache.http.conn.ssl.TrustAllStrategy; import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.ssl.SSLContexts; import org.opensearch.client.RestClientBuilder; import java.security.KeyManagementException; import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; /** Provides the default implementation for {@link RestClientFactory}. */ public class DefaultRestClientFactory implements RestClientFactory { private static final long serialVersionUID = 1L; @Override public void configureRestClientBuilder( RestClientBuilder builder, RestClientConfig networkClientConfig) { if (networkClientConfig.getConnectionPathPrefix() != null) { builder.setPathPrefix(networkClientConfig.getConnectionPathPrefix()); } builder.setHttpClientConfigCallback( httpClientBuilder -> { configureHttpClientBuilder(httpClientBuilder, networkClientConfig); return httpClientBuilder; }); if (networkClientConfig.getConnectionRequestTimeout() != null || networkClientConfig.getConnectionTimeout() != null || networkClientConfig.getSocketTimeout() != null) { builder.setRequestConfigCallback( requestConfigBuilder -> { if (networkClientConfig.getConnectionRequestTimeout() != null) { requestConfigBuilder.setConnectionRequestTimeout( networkClientConfig.getConnectionRequestTimeout()); } if (networkClientConfig.getConnectionTimeout() != null) { requestConfigBuilder.setConnectTimeout( networkClientConfig.getConnectionTimeout()); } if (networkClientConfig.getSocketTimeout() != null) { requestConfigBuilder.setSocketTimeout( networkClientConfig.getSocketTimeout()); } return requestConfigBuilder; }); } } protected void configureHttpClientBuilder( HttpAsyncClientBuilder httpClientBuilder, RestClientConfig networkClientConfig) { if (networkClientConfig.getPassword() != null && networkClientConfig.getUsername() != null) { final CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); credentialsProvider.setCredentials( AuthScope.ANY, new UsernamePasswordCredentials( networkClientConfig.getUsername(), networkClientConfig.getPassword())); httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider); } if (networkClientConfig.isAllowInsecure().orElse(false)) { try { httpClientBuilder.setSSLContext( SSLContexts.custom().loadTrustMaterial(new TrustAllStrategy()).build()); } catch (final NoSuchAlgorithmException | KeyStoreException | KeyManagementException ex) { throw new IllegalStateException("Unable to create custom SSL context", ex); } } } }
2,913
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/sink/FlushBackoffType.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.sink; import org.apache.flink.annotation.PublicEvolving; /** * Used to control whether the sink should retry failed requests at all or with which kind back off * strategy. */ @PublicEvolving public enum FlushBackoffType { /** After every failure, it waits a configured time until the retries are exhausted. */ CONSTANT, /** * After every failure, it waits initially the configured time and increases the waiting time * exponentially until the retries are exhausted. */ EXPONENTIAL, /** The failure is not retried. */ NONE, }
2,914
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/sink/RequestIndexer.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.sink; import org.apache.flink.annotation.PublicEvolving; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.update.UpdateRequest; /** * Users add multiple delete, index or update requests to a {@link RequestIndexer} to prepare them * for sending to an Opensearch cluster. */ @PublicEvolving public interface RequestIndexer { /** * Add multiple {@link DeleteRequest} to the indexer to prepare for sending requests to * Opensearch. * * @param deleteRequests The multiple {@link DeleteRequest} to add. */ void add(DeleteRequest... deleteRequests); /** * Add multiple {@link IndexRequest} to the indexer to prepare for sending requests to * Opensearch. * * @param indexRequests The multiple {@link IndexRequest} to add. */ void add(IndexRequest... indexRequests); /** * Add multiple {@link UpdateRequest} to the indexer to prepare for sending requests to * Opensearch. * * @param updateRequests The multiple {@link UpdateRequest} to add. */ void add(UpdateRequest... updateRequests); }
2,915
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/sink/OpensearchWriter.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.sink; import org.apache.flink.annotation.VisibleForTesting; import org.apache.flink.api.common.operators.MailboxExecutor; import org.apache.flink.api.connector.sink2.SinkWriter; import org.apache.flink.metrics.Counter; import org.apache.flink.metrics.groups.SinkWriterMetricGroup; import org.apache.flink.util.FlinkRuntimeException; import org.apache.flink.util.function.ThrowingRunnable; import org.apache.http.HttpHost; import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.action.bulk.BulkItemResponse; import org.opensearch.action.bulk.BulkProcessor; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkResponse; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.update.UpdateRequest; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientBuilder; import org.opensearch.client.RestHighLevelClient; import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.rest.RestStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.List; import static org.apache.flink.util.ExceptionUtils.firstOrSuppressed; import static org.apache.flink.util.Preconditions.checkNotNull; class OpensearchWriter<IN> implements SinkWriter<IN> { private static final Logger LOG = LoggerFactory.getLogger(OpensearchWriter.class); public static final FailureHandler DEFAULT_FAILURE_HANDLER = ex -> { throw new FlinkRuntimeException(ex); }; private final OpensearchEmitter<? super IN> emitter; private final MailboxExecutor mailboxExecutor; private final boolean flushOnCheckpoint; private final BulkProcessor bulkProcessor; private final RestHighLevelClient client; private final RequestIndexer requestIndexer; private final Counter numBytesOutCounter; private final FailureHandler failureHandler; private long pendingActions = 0; private boolean checkpointInProgress = false; private volatile long lastSendTime = 0; private volatile long ackTime = Long.MAX_VALUE; private volatile boolean closed = false; /** * Constructor creating an Opensearch writer. * * @param hosts the reachable Opensearch cluster nodes * @param emitter converting incoming records to Opensearch actions * @param flushOnCheckpoint if true all until now received records are flushed after every * checkpoint * @param bulkProcessorConfig describing the flushing and failure handling of the used {@link * BulkProcessor} * @param networkClientConfig describing properties of the network connection used to connect to * the Opensearch cluster * @param metricGroup for the sink writer * @param mailboxExecutor Flink's mailbox executor * @param restClientFactory Flink's mailbox executor */ OpensearchWriter( List<HttpHost> hosts, OpensearchEmitter<? super IN> emitter, boolean flushOnCheckpoint, BulkProcessorConfig bulkProcessorConfig, NetworkClientConfig networkClientConfig, SinkWriterMetricGroup metricGroup, MailboxExecutor mailboxExecutor, RestClientFactory restClientFactory, FailureHandler failureHandler) { this.emitter = checkNotNull(emitter); this.flushOnCheckpoint = flushOnCheckpoint; this.mailboxExecutor = checkNotNull(mailboxExecutor); final RestClientBuilder builder = RestClient.builder(hosts.toArray(new HttpHost[0])); checkNotNull(restClientFactory) .configureRestClientBuilder( builder, new DefaultRestClientConfig(networkClientConfig)); this.client = new RestHighLevelClient(builder); this.bulkProcessor = createBulkProcessor(bulkProcessorConfig); this.requestIndexer = new DefaultRequestIndexer(metricGroup.getNumRecordsSendCounter()); checkNotNull(metricGroup); metricGroup.setCurrentSendTimeGauge(() -> ackTime - lastSendTime); this.numBytesOutCounter = metricGroup.getIOMetricGroup().getNumBytesOutCounter(); try { emitter.open(); } catch (Exception e) { throw new FlinkRuntimeException("Failed to open the OpensearchEmitter", e); } this.failureHandler = failureHandler; } @Override public void write(IN element, Context context) throws IOException, InterruptedException { // do not allow new bulk writes until all actions are flushed while (checkpointInProgress) { mailboxExecutor.yield(); } emitter.emit(element, context, requestIndexer); } @Override public void flush(boolean endOfInput) throws IOException, InterruptedException { checkpointInProgress = true; while (pendingActions != 0 && (flushOnCheckpoint || endOfInput)) { bulkProcessor.flush(); LOG.info("Waiting for the response of {} pending actions.", pendingActions); mailboxExecutor.yield(); } checkpointInProgress = false; } @VisibleForTesting void blockingFlushAllActions() throws InterruptedException { while (pendingActions != 0) { bulkProcessor.flush(); LOG.info("Waiting for the response of {} pending actions.", pendingActions); mailboxExecutor.yield(); } } @Override public void close() throws Exception { closed = true; emitter.close(); bulkProcessor.close(); client.close(); } private BulkProcessor createBulkProcessor(BulkProcessorConfig bulkProcessorConfig) { final BulkProcessor.Builder builder = BulkProcessor.builder( new BulkRequestConsumerFactory() { // This cannot be inlined as a // lambda because then // deserialization fails @Override public void accept( BulkRequest bulkRequest, ActionListener<BulkResponse> bulkResponseActionListener) { client.bulkAsync( bulkRequest, RequestOptions.DEFAULT, bulkResponseActionListener); } }, new BulkListener()); if (bulkProcessorConfig.getBulkFlushMaxActions() != -1) { builder.setBulkActions(bulkProcessorConfig.getBulkFlushMaxActions()); } if (bulkProcessorConfig.getBulkFlushMaxMb() != -1) { builder.setBulkSize( new ByteSizeValue(bulkProcessorConfig.getBulkFlushMaxMb(), ByteSizeUnit.MB)); } if (bulkProcessorConfig.getBulkFlushInterval() != -1) { builder.setFlushInterval(new TimeValue(bulkProcessorConfig.getBulkFlushInterval())); } BackoffPolicy backoffPolicy; final TimeValue backoffDelay = new TimeValue(bulkProcessorConfig.getBulkFlushBackOffDelay()); final int maxRetryCount = bulkProcessorConfig.getBulkFlushBackoffRetries(); switch (bulkProcessorConfig.getFlushBackoffType()) { case CONSTANT: backoffPolicy = BackoffPolicy.constantBackoff(backoffDelay, maxRetryCount); break; case EXPONENTIAL: backoffPolicy = BackoffPolicy.exponentialBackoff(backoffDelay, maxRetryCount); break; case NONE: backoffPolicy = BackoffPolicy.noBackoff(); break; default: throw new IllegalArgumentException( "Received unknown backoff policy type " + bulkProcessorConfig.getFlushBackoffType()); } builder.setBackoffPolicy(backoffPolicy); // This makes flush() blocking builder.setConcurrentRequests(0); return builder.build(); } private class BulkListener implements BulkProcessor.Listener { @Override public void beforeBulk(long executionId, BulkRequest request) { LOG.info("Sending bulk of {} actions to Opensearch.", request.numberOfActions()); lastSendTime = System.currentTimeMillis(); numBytesOutCounter.inc(request.estimatedSizeInBytes()); } @Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { ackTime = System.currentTimeMillis(); enqueueActionInMailbox( () -> extractFailures(request, response), "opensearchSuccessCallback"); } @Override public void afterBulk(long executionId, BulkRequest request, Throwable failure) { enqueueActionInMailbox( () -> { throw new FlinkRuntimeException("Complete bulk has failed.", failure); }, "opensearchErrorCallback"); } } private void enqueueActionInMailbox( ThrowingRunnable<? extends Exception> action, String actionName) { // If the writer is cancelled before the last bulk response (i.e. no flush on checkpoint // configured or shutdown without a final // checkpoint) the mailbox might already be shutdown, so we should not enqueue any // actions. if (isClosed()) { return; } mailboxExecutor.execute(action, actionName); } private void extractFailures(BulkRequest request, BulkResponse response) { if (!response.hasFailures()) { pendingActions -= request.numberOfActions(); return; } Throwable chainedFailures = null; for (int i = 0; i < response.getItems().length; i++) { final BulkItemResponse itemResponse = response.getItems()[i]; if (!itemResponse.isFailed()) { continue; } final Throwable failure = itemResponse.getFailure().getCause(); if (failure == null) { continue; } final RestStatus restStatus = itemResponse.getFailure().getStatus(); final DocWriteRequest<?> actionRequest = request.requests().get(i); chainedFailures = firstOrSuppressed( wrapException(restStatus, failure, actionRequest), chainedFailures); } if (chainedFailures == null) { return; } failureHandler.onFailure(chainedFailures); } private static Throwable wrapException( RestStatus restStatus, Throwable rootFailure, DocWriteRequest<?> actionRequest) { if (restStatus == null) { return new FlinkRuntimeException( String.format("Single action %s of bulk request failed.", actionRequest), rootFailure); } else { return new FlinkRuntimeException( String.format( "Single action %s of bulk request failed with status %s.", actionRequest, restStatus.getStatus()), rootFailure); } } private boolean isClosed() { if (closed) { LOG.warn("Writer was closed before all records were acknowledged by Opensearch."); } return closed; } private class DefaultRequestIndexer implements RequestIndexer { private final Counter numRecordsSendCounter; public DefaultRequestIndexer(Counter numRecordsSendCounter) { this.numRecordsSendCounter = checkNotNull(numRecordsSendCounter); } @Override public void add(DeleteRequest... deleteRequests) { for (final DeleteRequest deleteRequest : deleteRequests) { numRecordsSendCounter.inc(); pendingActions++; bulkProcessor.add(deleteRequest); } } @Override public void add(IndexRequest... indexRequests) { for (final IndexRequest indexRequest : indexRequests) { numRecordsSendCounter.inc(); pendingActions++; bulkProcessor.add(indexRequest); } } @Override public void add(UpdateRequest... updateRequests) { for (final UpdateRequest updateRequest : updateRequests) { numRecordsSendCounter.inc(); pendingActions++; bulkProcessor.add(updateRequest); } } } }
2,916
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/sink/BulkProcessorConfig.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.sink; import java.io.Serializable; import static org.apache.flink.util.Preconditions.checkNotNull; class BulkProcessorConfig implements Serializable { private final int bulkFlushMaxActions; private final int bulkFlushMaxMb; private final long bulkFlushInterval; private final FlushBackoffType flushBackoffType; private final int bulkFlushBackoffRetries; private final long bulkFlushBackOffDelay; BulkProcessorConfig( int bulkFlushMaxActions, int bulkFlushMaxMb, long bulkFlushInterval, FlushBackoffType flushBackoffType, int bulkFlushBackoffRetries, long bulkFlushBackOffDelay) { this.bulkFlushMaxActions = bulkFlushMaxActions; this.bulkFlushMaxMb = bulkFlushMaxMb; this.bulkFlushInterval = bulkFlushInterval; this.flushBackoffType = checkNotNull(flushBackoffType); this.bulkFlushBackoffRetries = bulkFlushBackoffRetries; this.bulkFlushBackOffDelay = bulkFlushBackOffDelay; } public int getBulkFlushMaxActions() { return bulkFlushMaxActions; } public int getBulkFlushMaxMb() { return bulkFlushMaxMb; } public long getBulkFlushInterval() { return bulkFlushInterval; } public FlushBackoffType getFlushBackoffType() { return flushBackoffType; } public int getBulkFlushBackoffRetries() { return bulkFlushBackoffRetries; } public long getBulkFlushBackOffDelay() { return bulkFlushBackOffDelay; } }
2,917
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/sink/DefaultRestClientConfig.java
package org.apache.flink.connector.opensearch.sink; import javax.annotation.Nullable; import java.util.Optional; /** Provides the default implementation for {@link RestClientFactory.RestClientConfig}. */ class DefaultRestClientConfig implements RestClientFactory.RestClientConfig { private final NetworkClientConfig networkClientConfig; DefaultRestClientConfig(NetworkClientConfig networkClientConfig) { this.networkClientConfig = networkClientConfig; } @Override public @Nullable String getUsername() { return networkClientConfig.getUsername(); } @Override public @Nullable String getPassword() { return networkClientConfig.getPassword(); } @Override public @Nullable Integer getConnectionRequestTimeout() { return networkClientConfig.getConnectionRequestTimeout(); } @Override public @Nullable Integer getConnectionTimeout() { return networkClientConfig.getConnectionTimeout(); } @Override public @Nullable Integer getSocketTimeout() { return networkClientConfig.getSocketTimeout(); } @Override public @Nullable String getConnectionPathPrefix() { return networkClientConfig.getConnectionPathPrefix(); } @Override public Optional<Boolean> isAllowInsecure() { return networkClientConfig.isAllowInsecure(); } }
2,918
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/sink/BulkRequestConsumerFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.sink; import org.apache.flink.annotation.Internal; import org.opensearch.action.ActionListener; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkResponse; import java.util.function.BiConsumer; /** * {@link BulkRequestConsumerFactory} is used to bridge incompatible Opensearch Java API calls * across different Opensearch versions. */ @Internal interface BulkRequestConsumerFactory extends BiConsumer<BulkRequest, ActionListener<BulkResponse>> {}
2,919
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/sink/OpensearchEmitter.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.sink; import org.apache.flink.annotation.PublicEvolving; import org.apache.flink.api.common.functions.Function; import org.apache.flink.api.connector.sink2.SinkWriter; import org.opensearch.action.ActionRequest; /** * Creates none or multiple {@link ActionRequest ActionRequests} from the incoming elements. * * <p>This is used by sinks to prepare elements for sending them to Opensearch. * * <p>Example: * * <pre>{@code * private static class TestOpensearchEmitter implements OpensearchEmitter<Tuple2<Integer, String>> { * * public IndexRequest createIndexRequest(Tuple2<Integer, String> element) { * Map<String, Object> document = new HashMap<>(); * document.put("data", element.f1); * * return Requests.indexRequest() * .index("my-index") * .id(element.f0.toString()) * .source(document); * } * * public void emit(Tuple2<Integer, String> element, RequestIndexer indexer) { * indexer.add(createIndexRequest(element)); * } * } * * }</pre> * * @param <T> The type of the element handled by this {@link OpensearchEmitter} */ @PublicEvolving public interface OpensearchEmitter<T> extends Function { /** * Initialization method for the function. It is called once before the actual working process * methods. */ default void open() throws Exception {} /** Tear-down method for the function. It is called when the sink closes. */ default void close() throws Exception {} /** * Process the incoming element to produce multiple {@link ActionRequest ActionRequests}. The * produced requests should be added to the provided {@link RequestIndexer}. * * @param element incoming element to process * @param context to access additional information about the record * @param indexer request indexer that {@code ActionRequest} should be added to */ void emit(T element, SinkWriter.Context context, RequestIndexer indexer); }
2,920
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/sink/OpensearchSinkBuilder.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.sink; import org.apache.flink.annotation.PublicEvolving; import org.apache.flink.connector.base.DeliveryGuarantee; import org.apache.flink.util.InstantiationUtil; import org.apache.http.HttpHost; import java.util.Arrays; import java.util.List; import static org.apache.flink.connector.opensearch.sink.OpensearchWriter.DEFAULT_FAILURE_HANDLER; import static org.apache.flink.util.Preconditions.checkArgument; import static org.apache.flink.util.Preconditions.checkNotNull; import static org.apache.flink.util.Preconditions.checkState; /** * Builder to construct an Opensearch compatible {@link OpensearchSink}. * * <p>The following example shows the minimal setup to create a OpensearchSink that submits actions * on checkpoint or the default number of actions was buffered (1000). * * <pre>{@code * OpensearchSink<String> sink = new OpensearchSinkBuilder<String>() * .setHosts(new HttpHost("localhost:9200") * .setEmitter((element, context, indexer) -> { * indexer.add( * new IndexRequest("my-index") * .id(element.f0.toString()) * .source(element.f1) * ); * }) * .setDeliveryGuarantee(DeliveryGuarantee.AT_LEAST_ONCE) * .build(); * }</pre> * * @param <IN> type of the records converted to Opensearch actions */ @PublicEvolving public class OpensearchSinkBuilder<IN> { private int bulkFlushMaxActions = 1000; private int bulkFlushMaxMb = -1; private long bulkFlushInterval = -1; private FlushBackoffType bulkFlushBackoffType = FlushBackoffType.NONE; private int bulkFlushBackoffRetries = -1; private long bulkFlushBackOffDelay = -1; private DeliveryGuarantee deliveryGuarantee = DeliveryGuarantee.AT_LEAST_ONCE; private List<HttpHost> hosts; protected OpensearchEmitter<? super IN> emitter; private String username; private String password; private String connectionPathPrefix; private Integer connectionTimeout; private Integer connectionRequestTimeout; private Integer socketTimeout; private Boolean allowInsecure; private RestClientFactory restClientFactory; private FailureHandler failureHandler = DEFAULT_FAILURE_HANDLER; public OpensearchSinkBuilder() { restClientFactory = new DefaultRestClientFactory(); } @SuppressWarnings("unchecked") protected <S extends OpensearchSinkBuilder<?>> S self() { return (S) this; } /** * Sets the emitter which is invoked on every record to convert it to Opensearch actions. * * @param emitter to process records into Opensearch actions. * @return this builder */ public <T extends IN> OpensearchSinkBuilder<T> setEmitter( OpensearchEmitter<? super T> emitter) { checkNotNull(emitter); checkState( InstantiationUtil.isSerializable(emitter), "The Opensearch emitter must be serializable."); final OpensearchSinkBuilder<T> self = self(); self.emitter = emitter; return self; } /** * Sets the hosts where the Opensearch cluster nodes are reachable. * * @param hosts http addresses describing the node locations * @return this builder */ public OpensearchSinkBuilder<IN> setHosts(HttpHost... hosts) { checkNotNull(hosts); checkState(hosts.length > 0, "Hosts cannot be empty."); this.hosts = Arrays.asList(hosts); return self(); } /** * Sets the wanted {@link DeliveryGuarantee}. The default delivery guarantee is {@link * DeliveryGuarantee#NONE} * * @param deliveryGuarantee which describes the record emission behaviour * @return this builder */ public OpensearchSinkBuilder<IN> setDeliveryGuarantee(DeliveryGuarantee deliveryGuarantee) { checkState( deliveryGuarantee != DeliveryGuarantee.EXACTLY_ONCE, "Opensearch sink does not support the EXACTLY_ONCE guarantee."); this.deliveryGuarantee = checkNotNull(deliveryGuarantee); return self(); } /** * Sets the maximum number of actions to buffer for each bulk request. You can pass -1 to * disable it. The default flush size 1000. * * @param numMaxActions the maximum number of actions to buffer per bulk request. * @return this builder */ public OpensearchSinkBuilder<IN> setBulkFlushMaxActions(int numMaxActions) { checkState( numMaxActions == -1 || numMaxActions > 0, "Max number of buffered actions must be larger than 0."); this.bulkFlushMaxActions = numMaxActions; return self(); } /** * Sets the maximum size of buffered actions, in mb, per bulk request. You can pass -1 to * disable it. * * @param maxSizeMb the maximum size of buffered actions, in mb. * @return this builder */ public OpensearchSinkBuilder<IN> setBulkFlushMaxSizeMb(int maxSizeMb) { checkState( maxSizeMb == -1 || maxSizeMb > 0, "Max size of buffered actions must be larger than 0."); this.bulkFlushMaxMb = maxSizeMb; return self(); } /** * Sets the bulk flush interval, in milliseconds. You can pass -1 to disable it. * * @param intervalMillis the bulk flush interval, in milliseconds. * @return this builder */ public OpensearchSinkBuilder<IN> setBulkFlushInterval(long intervalMillis) { checkState( intervalMillis == -1 || intervalMillis >= 0, "Interval (in milliseconds) between each flush must be larger than " + "or equal to 0."); this.bulkFlushInterval = intervalMillis; return self(); } /** * Sets the type of back off to use when flushing bulk requests. The default bulk flush back off * type is {@link FlushBackoffType#NONE}. * * <p>Sets the amount of delay between each backoff attempt when flushing bulk requests, in * milliseconds. * * <p>Sets the maximum number of retries for a backoff attempt when flushing bulk requests. * * @param flushBackoffType the backoff type to use. * @return this builder */ public OpensearchSinkBuilder<IN> setBulkFlushBackoffStrategy( FlushBackoffType flushBackoffType, int maxRetries, long delayMillis) { this.bulkFlushBackoffType = checkNotNull(flushBackoffType); checkState( flushBackoffType != FlushBackoffType.NONE, "FlushBackoffType#NONE does not require a configuration it is the default, retries and delay are ignored."); checkState(maxRetries > 0, "Max number of backoff attempts must be larger than 0."); this.bulkFlushBackoffRetries = maxRetries; checkState( delayMillis >= 0, "Delay (in milliseconds) between each backoff attempt must be larger " + "than or equal to 0."); this.bulkFlushBackOffDelay = delayMillis; return self(); } /** * Sets the username used to authenticate the connection with the Opensearch cluster. * * @param username of the Opensearch cluster user * @return this builder */ public OpensearchSinkBuilder<IN> setConnectionUsername(String username) { checkNotNull(username); this.username = username; return self(); } /** * Sets the password used to authenticate the conection with the Opensearch cluster. * * @param password of the Opensearch cluster user * @return this builder */ public OpensearchSinkBuilder<IN> setConnectionPassword(String password) { checkNotNull(password); this.password = password; return self(); } /** * Sets a prefix which used for every REST communication to the Opensearch cluster. * * @param prefix for the communication * @return this builder */ public OpensearchSinkBuilder<IN> setConnectionPathPrefix(String prefix) { checkNotNull(prefix); this.connectionPathPrefix = prefix; return self(); } /** * Sets the timeout for requesting the connection of the Opensearch cluster from the connection * manager. * * @param timeout for the connection request * @return this builder */ public OpensearchSinkBuilder<IN> setConnectionRequestTimeout(int timeout) { checkState(timeout >= 0, "Connection request timeout must be larger than or equal to 0."); this.connectionRequestTimeout = timeout; return self(); } /** * Sets the timeout for establishing a connection of the Opensearch cluster. * * @param timeout for the connection * @return this builder */ public OpensearchSinkBuilder<IN> setConnectionTimeout(int timeout) { checkState(timeout >= 0, "Connection timeout must be larger than or equal to 0."); this.connectionTimeout = timeout; return self(); } /** * Sets the timeout for waiting for data or, put differently, a maximum period inactivity * between two consecutive data packets. * * @param timeout for the socket * @return this builder */ public OpensearchSinkBuilder<IN> setSocketTimeout(int timeout) { checkState(timeout >= 0, "Socket timeout must be larger than or equal to 0."); this.socketTimeout = timeout; return self(); } /** * Allows to bypass the certificates chain validation and connect to insecure network endpoints * (for example, servers which use self-signed certificates). * * @param allowInsecure allow or not to insecure network endpoints * @return this builder */ public OpensearchSinkBuilder<IN> setAllowInsecure(boolean allowInsecure) { this.allowInsecure = allowInsecure; return self(); } /** * Sets the {@link RestClientFactory} to be used for configuring the instance of the OpenSearch * REST client. * * @param restClientFactory the {@link RestClientFactory} instance * @return this builder */ public OpensearchSinkBuilder<IN> setRestClientFactory(RestClientFactory restClientFactory) { this.restClientFactory = checkNotNull(restClientFactory); return self(); } /** * Allows to set custom failure handler. If not set, then the DEFAULT_FAILURE_HANDLER will be * used which throws a runtime exception upon receiving a failure. * * @param failureHandler the custom handler * @return this builder */ public OpensearchSinkBuilder<IN> setFailureHandler(FailureHandler failureHandler) { checkNotNull(failureHandler); this.failureHandler = failureHandler; return self(); } /** * Constructs the {@link OpensearchSink} with the properties configured this builder. * * @return {@link OpensearchSink} */ public OpensearchSink<IN> build() { checkNotNull(emitter); checkNotNull(hosts); NetworkClientConfig networkClientConfig = buildNetworkClientConfig(); BulkProcessorConfig bulkProcessorConfig = buildBulkProcessorConfig(); return new OpensearchSink<>( hosts, emitter, deliveryGuarantee, bulkProcessorConfig, networkClientConfig, restClientFactory, failureHandler); } private NetworkClientConfig buildNetworkClientConfig() { checkArgument(!hosts.isEmpty(), "Hosts cannot be empty."); return new NetworkClientConfig( username, password, connectionPathPrefix, connectionRequestTimeout, connectionTimeout, socketTimeout, allowInsecure); } private BulkProcessorConfig buildBulkProcessorConfig() { return new BulkProcessorConfig( bulkFlushMaxActions, bulkFlushMaxMb, bulkFlushInterval, bulkFlushBackoffType, bulkFlushBackoffRetries, bulkFlushBackOffDelay); } @Override public String toString() { return "OpensearchSinkBuilder{" + "bulkFlushMaxActions=" + bulkFlushMaxActions + ", bulkFlushMaxMb=" + bulkFlushMaxMb + ", bulkFlushInterval=" + bulkFlushInterval + ", bulkFlushBackoffType=" + bulkFlushBackoffType + ", bulkFlushBackoffRetries=" + bulkFlushBackoffRetries + ", bulkFlushBackOffDelay=" + bulkFlushBackOffDelay + ", deliveryGuarantee=" + deliveryGuarantee + ", hosts=" + hosts + ", emitter=" + emitter + ", username='" + username + '\'' + ", password='" + password + '\'' + ", connectionPathPrefix='" + connectionPathPrefix + '\'' + ", allowInsecure='" + allowInsecure + '\'' + '}'; } }
2,921
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/sink/RestClientFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.sink; import org.apache.flink.annotation.PublicEvolving; import org.opensearch.client.RestClientBuilder; import javax.annotation.Nullable; import java.io.Serializable; import java.util.Optional; /** * A factory that is used to configure the {@link org.opensearch.client.RestHighLevelClient} * internally used in the {@link OpensearchSink}. */ @PublicEvolving public interface RestClientFactory extends Serializable { /** The REST client configuration. */ @PublicEvolving interface RestClientConfig { /** * Gets the configured username. * * @return the configured username */ @Nullable String getUsername(); /** * Gets the configured password. * * @return the configured password */ @Nullable String getPassword(); /** * Gets the configured connection request timeout. * * @return the configured connection request timeout */ @Nullable Integer getConnectionRequestTimeout(); /** * Gets the configured connection timeout. * * @return the configured connection timeout */ @Nullable Integer getConnectionTimeout(); /** * Gets the configured socket timeout. * * @return the configured socket timeout */ @Nullable Integer getSocketTimeout(); /** * Gets the configured connection path prefix. * * @return the configured connection path prefix */ @Nullable String getConnectionPathPrefix(); /** * Returns if the insecure HTTPS connections are allowed or not (self-signed certificates, * etc). * * @return if the insecure HTTPS connections are allowed or not */ Optional<Boolean> isAllowInsecure(); } /** * Configures the rest client builder. * * @param restClientBuilder the configured REST client builder. * @param clientConfig the client network configuration */ void configureRestClientBuilder( RestClientBuilder restClientBuilder, RestClientConfig clientConfig); }
2,922
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/sink/FailureHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.sink; import org.apache.flink.annotation.PublicEvolving; import java.io.Serializable; /** Handler to process failures. */ @PublicEvolving @FunctionalInterface public interface FailureHandler extends Serializable { void onFailure(Throwable failure); }
2,923
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/table/OpensearchValidationUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.table; import org.apache.flink.annotation.Internal; import org.apache.flink.table.api.ValidationException; import org.apache.flink.table.types.DataType; import org.apache.flink.table.types.logical.DistinctType; import org.apache.flink.table.types.logical.LogicalTypeFamily; import org.apache.flink.table.types.logical.LogicalTypeRoot; import java.util.LinkedHashSet; import java.util.List; import java.util.Set; import java.util.stream.Collectors; /** Utility methods for validating Opensearch properties. */ @Internal class OpensearchValidationUtils { private static final Set<LogicalTypeRoot> ALLOWED_PRIMARY_KEY_TYPES = new LinkedHashSet<>(); static { ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.CHAR); ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.VARCHAR); ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.BOOLEAN); ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.DECIMAL); ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.TINYINT); ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.SMALLINT); ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.INTEGER); ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.BIGINT); ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.FLOAT); ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.DOUBLE); ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.DATE); ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.TIME_WITHOUT_TIME_ZONE); ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.TIMESTAMP_WITHOUT_TIME_ZONE); ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.TIMESTAMP_WITH_TIME_ZONE); ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE); ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.INTERVAL_YEAR_MONTH); ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.INTERVAL_DAY_TIME); } /** * Checks that the table does not have a primary key defined on illegal types. In Opensearch the * primary key is used to calculate the Opensearch document id, which is a string of up to 512 * bytes. It cannot have whitespaces. As of now it is calculated by concatenating the fields. * Certain types do not have a good string representation to be used in this scenario. The * illegal types are mostly {@link LogicalTypeFamily#COLLECTION} types and {@link * LogicalTypeRoot#RAW} type. */ public static void validatePrimaryKey(DataType primaryKeyDataType) { List<DataType> fieldDataTypes = DataType.getFieldDataTypes(primaryKeyDataType); List<LogicalTypeRoot> illegalTypes = fieldDataTypes.stream() .map(DataType::getLogicalType) .map( logicalType -> { if (logicalType.is(LogicalTypeRoot.DISTINCT_TYPE)) { return ((DistinctType) logicalType) .getSourceType() .getTypeRoot(); } else { return logicalType.getTypeRoot(); } }) .filter(t -> !ALLOWED_PRIMARY_KEY_TYPES.contains(t)) .collect(Collectors.toList()); if (!illegalTypes.isEmpty()) { throw new ValidationException( String.format( "The table has a primary key on columns of illegal types: %s.", illegalTypes)); } } private OpensearchValidationUtils() {} }
2,924
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/table/KeyExtractor.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.table; import org.apache.flink.annotation.Internal; import org.apache.flink.table.data.RowData; import org.apache.flink.table.types.logical.DistinctType; import org.apache.flink.table.types.logical.LogicalType; import org.apache.flink.util.function.SerializableFunction; import java.io.Serializable; import java.time.Duration; import java.time.LocalDate; import java.time.LocalTime; import java.time.Period; import java.util.List; /** An extractor for a Opensearch key from a {@link RowData}. */ @Internal class KeyExtractor implements SerializableFunction<RowData, String> { private final FieldFormatter[] fieldFormatters; private final String keyDelimiter; private interface FieldFormatter extends Serializable { String format(RowData rowData); } private KeyExtractor(FieldFormatter[] fieldFormatters, String keyDelimiter) { this.fieldFormatters = fieldFormatters; this.keyDelimiter = keyDelimiter; } @Override public String apply(RowData rowData) { final StringBuilder builder = new StringBuilder(); for (int i = 0; i < fieldFormatters.length; i++) { if (i > 0) { builder.append(keyDelimiter); } final String value = fieldFormatters[i].format(rowData); builder.append(value); } return builder.toString(); } public static SerializableFunction<RowData, String> createKeyExtractor( List<LogicalTypeWithIndex> primaryKeyTypesWithIndex, String keyDelimiter) { if (!primaryKeyTypesWithIndex.isEmpty()) { FieldFormatter[] formatters = primaryKeyTypesWithIndex.stream() .map( logicalTypeWithIndex -> toFormatter( logicalTypeWithIndex.index, logicalTypeWithIndex.logicalType)) .toArray(FieldFormatter[]::new); return new KeyExtractor(formatters, keyDelimiter); } else { return (row) -> null; } } private static FieldFormatter toFormatter(int index, LogicalType type) { switch (type.getTypeRoot()) { case DATE: return (row) -> LocalDate.ofEpochDay(row.getInt(index)).toString(); case TIME_WITHOUT_TIME_ZONE: return (row) -> LocalTime.ofNanoOfDay((long) row.getInt(index) * 1_000_000L).toString(); case INTERVAL_YEAR_MONTH: return (row) -> Period.ofDays(row.getInt(index)).toString(); case INTERVAL_DAY_TIME: return (row) -> Duration.ofMillis(row.getLong(index)).toString(); case DISTINCT_TYPE: return toFormatter(index, ((DistinctType) type).getSourceType()); default: RowData.FieldGetter fieldGetter = RowData.createFieldGetter(type, index); return (row) -> fieldGetter.getFieldOrNull(row).toString(); } } }
2,925
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/table/LogicalTypeWithIndex.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.table; import org.apache.flink.table.types.logical.LogicalType; class LogicalTypeWithIndex { public final int index; public final LogicalType logicalType; LogicalTypeWithIndex(int index, LogicalType logicalType) { this.index = index; this.logicalType = logicalType; } }
2,926
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/table/OpensearchConfiguration.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.table; import org.apache.flink.annotation.Internal; import org.apache.flink.configuration.MemorySize; import org.apache.flink.configuration.ReadableConfig; import org.apache.flink.connector.base.DeliveryGuarantee; import org.apache.flink.connector.opensearch.sink.FlushBackoffType; import org.apache.flink.table.api.ValidationException; import org.apache.http.HttpHost; import java.time.Duration; import java.util.List; import java.util.Optional; import java.util.stream.Collectors; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.ALLOW_INSECURE; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.BULK_FLUSH_BACKOFF_DELAY_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.BULK_FLUSH_BACKOFF_TYPE_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.BULK_FLUSH_INTERVAL_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.BULK_FLUSH_MAX_ACTIONS_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.BULK_FLUSH_MAX_SIZE_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.CONNECTION_PATH_PREFIX_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.CONNECTION_REQUEST_TIMEOUT; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.CONNECTION_TIMEOUT; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.DELIVERY_GUARANTEE_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.HOSTS_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.INDEX_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.KEY_DELIMITER_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.PASSWORD_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.SOCKET_TIMEOUT; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.USERNAME_OPTION; import static org.apache.flink.table.factories.FactoryUtil.SINK_PARALLELISM; import static org.apache.flink.util.Preconditions.checkNotNull; /** Opensearch base configuration. */ @Internal class OpensearchConfiguration { protected final ReadableConfig config; OpensearchConfiguration(ReadableConfig config) { this.config = checkNotNull(config); } public int getBulkFlushMaxActions() { return config.get(BULK_FLUSH_MAX_ACTIONS_OPTION); } public MemorySize getBulkFlushMaxByteSize() { return config.get(BULK_FLUSH_MAX_SIZE_OPTION); } public long getBulkFlushInterval() { return config.get(BULK_FLUSH_INTERVAL_OPTION).toMillis(); } public DeliveryGuarantee getDeliveryGuarantee() { return config.get(DELIVERY_GUARANTEE_OPTION); } public Optional<String> getUsername() { return config.getOptional(USERNAME_OPTION); } public Optional<String> getPassword() { return config.getOptional(PASSWORD_OPTION); } public Optional<FlushBackoffType> getBulkFlushBackoffType() { return config.getOptional(BULK_FLUSH_BACKOFF_TYPE_OPTION); } public Optional<Integer> getBulkFlushBackoffRetries() { return config.getOptional(BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION); } public Optional<Long> getBulkFlushBackoffDelay() { return config.getOptional(BULK_FLUSH_BACKOFF_DELAY_OPTION).map(Duration::toMillis); } public String getIndex() { return config.get(INDEX_OPTION); } public String getKeyDelimiter() { return config.get(KEY_DELIMITER_OPTION); } public Optional<String> getPathPrefix() { return config.getOptional(CONNECTION_PATH_PREFIX_OPTION); } public Optional<Duration> getConnectionRequestTimeout() { return config.getOptional(CONNECTION_REQUEST_TIMEOUT); } public Optional<Duration> getConnectionTimeout() { return config.getOptional(CONNECTION_TIMEOUT); } public Optional<Duration> getSocketTimeout() { return config.getOptional(SOCKET_TIMEOUT); } public List<HttpHost> getHosts() { return config.get(HOSTS_OPTION).stream() .map(OpensearchConfiguration::validateAndParseHostsString) .collect(Collectors.toList()); } public Optional<Integer> getParallelism() { return config.getOptional(SINK_PARALLELISM); } public Optional<Boolean> isAllowInsecure() { return config.getOptional(ALLOW_INSECURE); } private static HttpHost validateAndParseHostsString(String host) { try { HttpHost httpHost = HttpHost.create(host); if (httpHost.getPort() < 0) { throw new ValidationException( String.format( "Could not parse host '%s' in option '%s'. It should follow the format 'http://host_name:port'. Missing port.", host, HOSTS_OPTION.key())); } if (httpHost.getSchemeName() == null) { throw new ValidationException( String.format( "Could not parse host '%s' in option '%s'. It should follow the format 'http://host_name:port'. Missing scheme.", host, HOSTS_OPTION.key())); } return httpHost; } catch (Exception e) { throw new ValidationException( String.format( "Could not parse host '%s' in option '%s'. It should follow the format 'http://host_name:port'.", host, HOSTS_OPTION.key()), e); } } }
2,927
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/table/OpensearchDynamicSink.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.table; import org.apache.flink.annotation.Internal; import org.apache.flink.api.common.serialization.SerializationSchema; import org.apache.flink.connector.opensearch.sink.FlushBackoffType; import org.apache.flink.connector.opensearch.sink.OpensearchSink; import org.apache.flink.connector.opensearch.sink.OpensearchSinkBuilder; import org.apache.flink.table.api.ValidationException; import org.apache.flink.table.connector.ChangelogMode; import org.apache.flink.table.connector.format.EncodingFormat; import org.apache.flink.table.connector.sink.DynamicTableSink; import org.apache.flink.table.connector.sink.SinkV2Provider; import org.apache.flink.table.data.RowData; import org.apache.flink.table.types.DataType; import org.apache.flink.types.RowKind; import org.apache.flink.util.StringUtils; import org.apache.http.HttpHost; import org.opensearch.common.xcontent.XContentType; import java.time.ZoneId; import java.util.List; import java.util.Objects; import java.util.function.Function; import static org.apache.flink.util.Preconditions.checkNotNull; /** * A {@link DynamicTableSink} that describes how to create a {@link OpensearchSink} from a logical * description. */ @Internal class OpensearchDynamicSink implements DynamicTableSink { final EncodingFormat<SerializationSchema<RowData>> format; final DataType physicalRowDataType; final List<LogicalTypeWithIndex> primaryKeyLogicalTypesWithIndex; final OpensearchConfiguration config; final ZoneId localTimeZoneId; final String summaryString; final boolean isDynamicIndexWithSystemTime; OpensearchDynamicSink( EncodingFormat<SerializationSchema<RowData>> format, OpensearchConfiguration config, List<LogicalTypeWithIndex> primaryKeyLogicalTypesWithIndex, DataType physicalRowDataType, String summaryString, ZoneId localTimeZoneId) { this.format = checkNotNull(format); this.physicalRowDataType = checkNotNull(physicalRowDataType); this.primaryKeyLogicalTypesWithIndex = checkNotNull(primaryKeyLogicalTypesWithIndex); this.config = checkNotNull(config); this.summaryString = checkNotNull(summaryString); this.localTimeZoneId = localTimeZoneId; this.isDynamicIndexWithSystemTime = isDynamicIndexWithSystemTime(); } public boolean isDynamicIndexWithSystemTime() { IndexGeneratorFactory.IndexHelper indexHelper = new IndexGeneratorFactory.IndexHelper(); return indexHelper.checkIsDynamicIndexWithSystemTimeFormat(config.getIndex()); } Function<RowData, String> createKeyExtractor() { return KeyExtractor.createKeyExtractor( primaryKeyLogicalTypesWithIndex, config.getKeyDelimiter()); } IndexGenerator createIndexGenerator() { return IndexGeneratorFactory.createIndexGenerator( config.getIndex(), DataType.getFieldNames(physicalRowDataType), DataType.getFieldDataTypes(physicalRowDataType), localTimeZoneId); } @Override public ChangelogMode getChangelogMode(ChangelogMode requestedMode) { ChangelogMode.Builder builder = ChangelogMode.newBuilder(); for (RowKind kind : requestedMode.getContainedKinds()) { if (kind != RowKind.UPDATE_BEFORE) { builder.addContainedKind(kind); } } if (isDynamicIndexWithSystemTime && !requestedMode.containsOnly(RowKind.INSERT)) { throw new ValidationException( "Dynamic indexing based on system time only works on append only stream."); } return builder.build(); } @Override public SinkRuntimeProvider getSinkRuntimeProvider(Context context) { SerializationSchema<RowData> format = this.format.createRuntimeEncoder(context, physicalRowDataType); final RowOpensearchEmitter rowOpensearchEmitter = new RowOpensearchEmitter( createIndexGenerator(), format, XContentType.JSON, createKeyExtractor()); final OpensearchSinkBuilder<RowData> builder = new OpensearchSinkBuilder<>(); builder.setEmitter(rowOpensearchEmitter); builder.setHosts(config.getHosts().toArray(new HttpHost[0])); builder.setDeliveryGuarantee(config.getDeliveryGuarantee()); builder.setBulkFlushMaxActions(config.getBulkFlushMaxActions()); builder.setBulkFlushMaxSizeMb(config.getBulkFlushMaxByteSize().getMebiBytes()); builder.setBulkFlushInterval(config.getBulkFlushInterval()); if (config.getBulkFlushBackoffType().isPresent()) { FlushBackoffType backoffType = config.getBulkFlushBackoffType().get(); int backoffMaxRetries = config.getBulkFlushBackoffRetries().get(); long backoffDelayMs = config.getBulkFlushBackoffDelay().get(); builder.setBulkFlushBackoffStrategy(backoffType, backoffMaxRetries, backoffDelayMs); } if (config.getUsername().isPresent() && !StringUtils.isNullOrWhitespaceOnly(config.getUsername().get())) { builder.setConnectionUsername(config.getUsername().get()); } if (config.getPassword().isPresent() && !StringUtils.isNullOrWhitespaceOnly(config.getPassword().get())) { builder.setConnectionPassword(config.getPassword().get()); } if (config.getPathPrefix().isPresent() && !StringUtils.isNullOrWhitespaceOnly(config.getPathPrefix().get())) { builder.setConnectionPathPrefix(config.getPathPrefix().get()); } if (config.getConnectionRequestTimeout().isPresent()) { builder.setConnectionRequestTimeout( (int) config.getConnectionRequestTimeout().get().getSeconds()); } if (config.getConnectionTimeout().isPresent()) { builder.setConnectionTimeout((int) config.getConnectionTimeout().get().getSeconds()); } if (config.getSocketTimeout().isPresent()) { builder.setSocketTimeout((int) config.getSocketTimeout().get().getSeconds()); } if (config.isAllowInsecure().isPresent()) { builder.setAllowInsecure(config.isAllowInsecure().get()); } return SinkV2Provider.of(builder.build(), config.getParallelism().orElse(null)); } @Override public DynamicTableSink copy() { return new OpensearchDynamicSink( format, config, primaryKeyLogicalTypesWithIndex, physicalRowDataType, summaryString, localTimeZoneId); } @Override public String asSummaryString() { return summaryString; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } OpensearchDynamicSink that = (OpensearchDynamicSink) o; return Objects.equals(format, that.format) && Objects.equals(physicalRowDataType, that.physicalRowDataType) && Objects.equals( primaryKeyLogicalTypesWithIndex, that.primaryKeyLogicalTypesWithIndex) && Objects.equals(config, that.config) && Objects.equals(summaryString, that.summaryString); } @Override public int hashCode() { return Objects.hash( format, physicalRowDataType, primaryKeyLogicalTypesWithIndex, config, summaryString); } }
2,928
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/table/IndexGeneratorBase.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.table; import org.apache.flink.annotation.Internal; import java.util.Objects; /** Base class for {@link IndexGenerator}. */ @Internal public abstract class IndexGeneratorBase implements IndexGenerator { private static final long serialVersionUID = 1L; protected final String index; public IndexGeneratorBase(String index) { this.index = index; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof IndexGeneratorBase)) { return false; } IndexGeneratorBase that = (IndexGeneratorBase) o; return index.equals(that.index); } @Override public int hashCode() { return Objects.hash(index); } }
2,929
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/table/IndexGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.table; import org.apache.flink.annotation.Internal; import org.apache.flink.table.data.RowData; import org.apache.flink.types.Row; import java.io.Serializable; /** This interface is responsible to generate index name from given {@link Row} record. */ @Internal interface IndexGenerator extends Serializable { /** * Initialize the index generator, this will be called only once before {@link * #generate(RowData)} is called. */ default void open() {} /** Generate index name according to the given row. */ String generate(RowData row); }
2,930
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/table/RowOpensearchEmitter.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.table; import org.apache.flink.api.common.serialization.SerializationSchema; import org.apache.flink.api.connector.sink2.SinkWriter; import org.apache.flink.connector.opensearch.sink.OpensearchEmitter; import org.apache.flink.connector.opensearch.sink.RequestIndexer; import org.apache.flink.metrics.MetricGroup; import org.apache.flink.metrics.groups.UnregisteredMetricsGroup; import org.apache.flink.table.api.TableException; import org.apache.flink.table.data.RowData; import org.apache.flink.util.FlinkRuntimeException; import org.apache.flink.util.SimpleUserCodeClassLoader; import org.apache.flink.util.UserCodeClassLoader; import org.opensearch.action.ActionRequest; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.update.UpdateRequest; import org.opensearch.common.xcontent.XContentType; import java.util.function.Function; import static org.apache.flink.util.Preconditions.checkNotNull; /** Sink function for converting upserts into Opensearch {@link ActionRequest}s. */ class RowOpensearchEmitter implements OpensearchEmitter<RowData> { private final IndexGenerator indexGenerator; private final SerializationSchema<RowData> serializationSchema; private final XContentType contentType; private final Function<RowData, String> createKey; public RowOpensearchEmitter( IndexGenerator indexGenerator, SerializationSchema<RowData> serializationSchema, XContentType contentType, Function<RowData, String> createKey) { this.indexGenerator = checkNotNull(indexGenerator); this.serializationSchema = checkNotNull(serializationSchema); this.contentType = checkNotNull(contentType); this.createKey = checkNotNull(createKey); } @Override public void open() throws Exception { try { serializationSchema.open( new SerializationSchema.InitializationContext() { @Override public MetricGroup getMetricGroup() { return new UnregisteredMetricsGroup(); } @Override public UserCodeClassLoader getUserCodeClassLoader() { return SimpleUserCodeClassLoader.create( RowOpensearchEmitter.class.getClassLoader()); } }); } catch (Exception e) { throw new FlinkRuntimeException("Failed to initialize serialization schema.", e); } indexGenerator.open(); } @Override public void emit(RowData element, SinkWriter.Context context, RequestIndexer indexer) { switch (element.getRowKind()) { case INSERT: case UPDATE_AFTER: processUpsert(element, indexer); break; case UPDATE_BEFORE: case DELETE: processDelete(element, indexer); break; default: throw new TableException("Unsupported message kind: " + element.getRowKind()); } } private void processUpsert(RowData row, RequestIndexer indexer) { final byte[] document = serializationSchema.serialize(row); final String key = createKey.apply(row); if (key != null) { final UpdateRequest updateRequest = new UpdateRequest(indexGenerator.generate(row), key) .doc(document, contentType) .upsert(document, contentType); indexer.add(updateRequest); } else { final IndexRequest indexRequest = new IndexRequest(indexGenerator.generate(row)) .id(key) .source(document, contentType); indexer.add(indexRequest); } } private void processDelete(RowData row, RequestIndexer indexer) { final String key = createKey.apply(row); final DeleteRequest deleteRequest = new DeleteRequest(indexGenerator.generate(row), key); indexer.add(deleteRequest); } }
2,931
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/table/AbstractTimeIndexGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.table; import org.apache.flink.annotation.Internal; import java.time.format.DateTimeFormatter; /** Abstract class for time related {@link IndexGenerator}. */ @Internal abstract class AbstractTimeIndexGenerator extends IndexGeneratorBase { private final String dateTimeFormat; protected transient DateTimeFormatter dateTimeFormatter; public AbstractTimeIndexGenerator(String index, String dateTimeFormat) { super(index); this.dateTimeFormat = dateTimeFormat; } @Override public void open() { this.dateTimeFormatter = DateTimeFormatter.ofPattern(dateTimeFormat); } }
2,932
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/table/IndexGeneratorFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.table; import org.apache.flink.annotation.Internal; import org.apache.flink.table.api.TableException; import org.apache.flink.table.data.RowData; import org.apache.flink.table.data.TimestampData; import org.apache.flink.table.types.DataType; import org.apache.flink.table.types.logical.LogicalType; import org.apache.flink.table.types.logical.LogicalTypeRoot; import javax.annotation.Nonnull; import java.io.Serializable; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.ZoneId; import java.time.ZoneOffset; import java.time.format.DateTimeFormatter; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Factory of {@link IndexGenerator}. * * <p>Flink supports both static index and dynamic index. * * <p>If you want to have a static index, this option value should be a plain string, e.g. * 'myusers', all the records will be consistently written into "myusers" index. * * <p>If you want to have a dynamic index, you can use '{field_name}' to reference a field value in * the record to dynamically generate a target index. You can also use * '{field_name|date_format_string}' to convert a field value of TIMESTAMP/DATE/TIME type into the * format specified by date_format_string. The date_format_string is compatible with {@link * java.text.SimpleDateFormat}. For example, if the option value is 'myusers_{log_ts|yyyy-MM-dd}', * then a record with log_ts field value 2020-03-27 12:25:55 will be written into * "myusers_2020-03-27" index. */ @Internal final class IndexGeneratorFactory { private IndexGeneratorFactory() {} public static IndexGenerator createIndexGenerator( String index, List<String> fieldNames, List<DataType> dataTypes, ZoneId localTimeZoneId) { final IndexHelper indexHelper = new IndexHelper(); if (indexHelper.checkIsDynamicIndex(index)) { return createRuntimeIndexGenerator( index, fieldNames.toArray(new String[0]), dataTypes.toArray(new DataType[0]), indexHelper, localTimeZoneId); } else { return new StaticIndexGenerator(index); } } public static IndexGenerator createIndexGenerator( String index, List<String> fieldNames, List<DataType> dataTypes) { return createIndexGenerator(index, fieldNames, dataTypes, ZoneId.systemDefault()); } interface DynamicFormatter extends Serializable { String format(@Nonnull Object fieldValue, DateTimeFormatter formatter); } private static IndexGenerator createRuntimeIndexGenerator( String index, String[] fieldNames, DataType[] fieldTypes, IndexHelper indexHelper, ZoneId localTimeZoneId) { final String dynamicIndexPatternStr = indexHelper.extractDynamicIndexPatternStr(index); final String indexPrefix = index.substring(0, index.indexOf(dynamicIndexPatternStr)); final String indexSuffix = index.substring(indexPrefix.length() + dynamicIndexPatternStr.length()); if (indexHelper.checkIsDynamicIndexWithSystemTimeFormat(index)) { final String dateTimeFormat = indexHelper.extractDateFormat( index, LogicalTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE); return new AbstractTimeIndexGenerator(index, dateTimeFormat) { @Override public String generate(RowData row) { return indexPrefix .concat(LocalDateTime.now(localTimeZoneId).format(dateTimeFormatter)) .concat(indexSuffix); } }; } final boolean isDynamicIndexWithFormat = indexHelper.checkIsDynamicIndexWithFormat(index); final int indexFieldPos = indexHelper.extractIndexFieldPos(index, fieldNames, isDynamicIndexWithFormat); final LogicalType indexFieldType = fieldTypes[indexFieldPos].getLogicalType(); final LogicalTypeRoot indexFieldLogicalTypeRoot = indexFieldType.getTypeRoot(); // validate index field type indexHelper.validateIndexFieldType(indexFieldLogicalTypeRoot); // time extract dynamic index pattern final RowData.FieldGetter fieldGetter = RowData.createFieldGetter(indexFieldType, indexFieldPos); if (isDynamicIndexWithFormat) { final String dateTimeFormat = indexHelper.extractDateFormat(index, indexFieldLogicalTypeRoot); DynamicFormatter formatFunction = createFormatFunction(indexFieldType, indexFieldLogicalTypeRoot); return new AbstractTimeIndexGenerator(index, dateTimeFormat) { @Override public String generate(RowData row) { Object fieldOrNull = fieldGetter.getFieldOrNull(row); final String formattedField; // TODO we can possibly optimize it to use the nullability of the field if (fieldOrNull != null) { formattedField = formatFunction.format(fieldOrNull, dateTimeFormatter); } else { formattedField = "null"; } return indexPrefix.concat(formattedField).concat(indexSuffix); } }; } // general dynamic index pattern return new IndexGeneratorBase(index) { @Override public String generate(RowData row) { Object indexField = fieldGetter.getFieldOrNull(row); return indexPrefix .concat(indexField == null ? "null" : indexField.toString()) .concat(indexSuffix); } }; } private static DynamicFormatter createFormatFunction( LogicalType indexFieldType, LogicalTypeRoot indexFieldLogicalTypeRoot) { switch (indexFieldLogicalTypeRoot) { case DATE: return (value, dateTimeFormatter) -> { Integer indexField = (Integer) value; return LocalDate.ofEpochDay(indexField).format(dateTimeFormatter); }; case TIME_WITHOUT_TIME_ZONE: return (value, dateTimeFormatter) -> { Integer indexField = (Integer) value; return LocalTime.ofNanoOfDay(indexField * 1_000_000L).format(dateTimeFormatter); }; case TIMESTAMP_WITHOUT_TIME_ZONE: return (value, dateTimeFormatter) -> { TimestampData indexField = (TimestampData) value; return indexField.toLocalDateTime().format(dateTimeFormatter); }; case TIMESTAMP_WITH_TIME_ZONE: throw new UnsupportedOperationException( "TIMESTAMP_WITH_TIME_ZONE is not supported yet"); case TIMESTAMP_WITH_LOCAL_TIME_ZONE: return (value, dateTimeFormatter) -> { TimestampData indexField = (TimestampData) value; return indexField.toInstant().atZone(ZoneOffset.UTC).format(dateTimeFormatter); }; default: throw new TableException( String.format( "Unsupported type '%s' found in Opensearch dynamic index field, " + "time-related pattern only support types are: DATE,TIME,TIMESTAMP.", indexFieldType)); } } /** * Helper class for {@link IndexGeneratorFactory}, this helper can use to validate index field * type ans parse index format from pattern. */ static class IndexHelper { private static final Pattern dynamicIndexPattern = Pattern.compile("\\{[^\\{\\}]+\\}?"); private static final Pattern dynamicIndexTimeExtractPattern = Pattern.compile(".*\\{.+\\|.*\\}.*"); private static final Pattern dynamicIndexSystemTimeExtractPattern = Pattern.compile( ".*\\{\\s*(now\\(\\s*\\)|NOW\\(\\s*\\)|current_timestamp|CURRENT_TIMESTAMP)\\s*\\|.*\\}.*"); private static final List<LogicalTypeRoot> supportedTypes = new ArrayList<>(); private static final Map<LogicalTypeRoot, String> defaultFormats = new HashMap<>(); static { // time related types supportedTypes.add(LogicalTypeRoot.DATE); supportedTypes.add(LogicalTypeRoot.TIME_WITHOUT_TIME_ZONE); supportedTypes.add(LogicalTypeRoot.TIMESTAMP_WITHOUT_TIME_ZONE); supportedTypes.add(LogicalTypeRoot.TIMESTAMP_WITH_TIME_ZONE); supportedTypes.add(LogicalTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE); // general types supportedTypes.add(LogicalTypeRoot.VARCHAR); supportedTypes.add(LogicalTypeRoot.CHAR); supportedTypes.add(LogicalTypeRoot.TINYINT); supportedTypes.add(LogicalTypeRoot.INTEGER); supportedTypes.add(LogicalTypeRoot.BIGINT); } static { defaultFormats.put(LogicalTypeRoot.DATE, "yyyy_MM_dd"); defaultFormats.put(LogicalTypeRoot.TIME_WITHOUT_TIME_ZONE, "HH_mm_ss"); defaultFormats.put(LogicalTypeRoot.TIMESTAMP_WITHOUT_TIME_ZONE, "yyyy_MM_dd_HH_mm_ss"); defaultFormats.put(LogicalTypeRoot.TIMESTAMP_WITH_TIME_ZONE, "yyyy_MM_dd_HH_mm_ss"); defaultFormats.put( LogicalTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE, "yyyy_MM_dd_HH_mm_ssX"); } /** Validate the index field Type. */ void validateIndexFieldType(LogicalTypeRoot logicalType) { if (!supportedTypes.contains(logicalType)) { throw new IllegalArgumentException( String.format( "Unsupported type %s of index field, " + "Supported types are: %s", logicalType, supportedTypes)); } } /** Get the default date format. */ String getDefaultFormat(LogicalTypeRoot logicalType) { return defaultFormats.get(logicalType); } /** Check general dynamic index is enabled or not by index pattern. */ boolean checkIsDynamicIndex(String index) { final Matcher matcher = dynamicIndexPattern.matcher(index); int count = 0; while (matcher.find()) { count++; } if (count > 1) { throw new TableException( String.format( "Chaining dynamic index pattern %s is not supported," + " only support single dynamic index pattern.", index)); } return count == 1; } /** Check time extract dynamic index is enabled or not by index pattern. */ boolean checkIsDynamicIndexWithFormat(String index) { return dynamicIndexTimeExtractPattern.matcher(index).matches(); } /** Extract dynamic index pattern string from index pattern string. */ String extractDynamicIndexPatternStr(String index) { int start = index.indexOf("{"); int end = index.lastIndexOf("}"); return index.substring(start, end + 1); } /** Check generate dynamic index is from system time or not. */ boolean checkIsDynamicIndexWithSystemTimeFormat(String index) { return dynamicIndexSystemTimeExtractPattern.matcher(index).matches(); } /** Extract index field position in a fieldNames, return the field position. */ int extractIndexFieldPos( String index, String[] fieldNames, boolean isDynamicIndexWithFormat) { List<String> fieldList = Arrays.asList(fieldNames); String indexFieldName; if (isDynamicIndexWithFormat) { indexFieldName = index.substring(index.indexOf("{") + 1, index.indexOf("|")); } else { indexFieldName = index.substring(index.indexOf("{") + 1, index.indexOf("}")); } if (!fieldList.contains(indexFieldName)) { throw new TableException( String.format( "Unknown field '%s' in index pattern '%s', please check the field name.", indexFieldName, index)); } return fieldList.indexOf(indexFieldName); } /** Extract dateTime format by the date format that extracted from index pattern string. */ private String extractDateFormat(String index, LogicalTypeRoot logicalType) { String format = index.substring(index.indexOf("|") + 1, index.indexOf("}")); if ("".equals(format)) { format = getDefaultFormat(logicalType); } return format; } } }
2,933
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/table/OpensearchDynamicSinkFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.table; import org.apache.flink.annotation.Internal; import org.apache.flink.api.common.serialization.SerializationSchema; import org.apache.flink.configuration.ConfigOption; import org.apache.flink.configuration.ReadableConfig; import org.apache.flink.table.api.ValidationException; import org.apache.flink.table.api.config.TableConfigOptions; import org.apache.flink.table.catalog.Column; import org.apache.flink.table.catalog.ResolvedSchema; import org.apache.flink.table.connector.Projection; import org.apache.flink.table.connector.format.EncodingFormat; import org.apache.flink.table.connector.sink.DynamicTableSink; import org.apache.flink.table.data.RowData; import org.apache.flink.table.factories.DynamicTableSinkFactory; import org.apache.flink.table.factories.FactoryUtil; import org.apache.flink.table.factories.SerializationFormatFactory; import org.apache.flink.table.types.DataType; import org.apache.flink.table.types.logical.LogicalType; import org.apache.flink.util.StringUtils; import java.time.ZoneId; import java.util.Arrays; import java.util.List; import java.util.Optional; import java.util.Set; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.ALLOW_INSECURE; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.BULK_FLUSH_BACKOFF_DELAY_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.BULK_FLUSH_BACKOFF_TYPE_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.BULK_FLUSH_INTERVAL_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.BULK_FLUSH_MAX_ACTIONS_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.BULK_FLUSH_MAX_SIZE_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.CONNECTION_PATH_PREFIX_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.CONNECTION_REQUEST_TIMEOUT; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.CONNECTION_TIMEOUT; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.DELIVERY_GUARANTEE_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.FORMAT_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.HOSTS_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.INDEX_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.KEY_DELIMITER_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.PASSWORD_OPTION; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.SOCKET_TIMEOUT; import static org.apache.flink.connector.opensearch.table.OpensearchConnectorOptions.USERNAME_OPTION; import static org.apache.flink.table.factories.FactoryUtil.SINK_PARALLELISM; import static org.opensearch.common.Strings.capitalize; /** A {@link DynamicTableSinkFactory} for discovering OpensearchDynamicSink. */ @Internal public class OpensearchDynamicSinkFactory implements DynamicTableSinkFactory { private static final String FACTORY_IDENTIFIER = "opensearch"; @Override public DynamicTableSink createDynamicTableSink(Context context) { List<LogicalTypeWithIndex> primaryKeyLogicalTypesWithIndex = getPrimaryKeyLogicalTypesWithIndex(context); final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); EncodingFormat<SerializationSchema<RowData>> format = helper.discoverEncodingFormat(SerializationFormatFactory.class, FORMAT_OPTION); OpensearchConfiguration config = getConfiguration(helper); helper.validate(); validateConfiguration(config); return new OpensearchDynamicSink( format, config, primaryKeyLogicalTypesWithIndex, context.getPhysicalRowDataType(), capitalize(FACTORY_IDENTIFIER), getLocalTimeZoneId(context.getConfiguration())); } private static ZoneId getLocalTimeZoneId(ReadableConfig readableConfig) { final String zone = readableConfig.get(TableConfigOptions.LOCAL_TIME_ZONE); final ZoneId zoneId = TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone) ? ZoneId.systemDefault() : ZoneId.of(zone); return zoneId; } private static OpensearchConfiguration getConfiguration(FactoryUtil.TableFactoryHelper helper) { return new OpensearchConfiguration(helper.getOptions()); } private static void validateConfiguration(OpensearchConfiguration config) { config.getHosts(); // validate hosts validate( config.getIndex().length() >= 1, () -> String.format("'%s' must not be empty", INDEX_OPTION.key())); int maxActions = config.getBulkFlushMaxActions(); validate( maxActions == -1 || maxActions >= 1, () -> String.format( "'%s' must be at least 1. Got: %s", BULK_FLUSH_MAX_ACTIONS_OPTION.key(), maxActions)); long maxSize = config.getBulkFlushMaxByteSize().getBytes(); long mb1 = 1024 * 1024; validate( maxSize == -1 || (maxSize >= mb1 && maxSize % mb1 == 0), () -> String.format( "'%s' must be in MB granularity. Got: %s", BULK_FLUSH_MAX_SIZE_OPTION.key(), config.getBulkFlushMaxByteSize().toHumanReadableString())); validate( config.getBulkFlushBackoffRetries().map(retries -> retries >= 1).orElse(true), () -> String.format( "'%s' must be at least 1. Got: %s", BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION.key(), config.getBulkFlushBackoffRetries().get())); if (config.getUsername().isPresent() && !StringUtils.isNullOrWhitespaceOnly(config.getUsername().get())) { validate( config.getPassword().isPresent() && !StringUtils.isNullOrWhitespaceOnly(config.getPassword().get()), () -> String.format( "'%s' and '%s' must be set at the same time. Got: username '%s' and password '%s'", USERNAME_OPTION.key(), PASSWORD_OPTION.key(), config.getUsername().get(), config.getPassword().orElse(""))); } } private static void validate(boolean condition, Supplier<String> message) { if (!condition) { throw new ValidationException(message.get()); } } private static List<LogicalTypeWithIndex> getPrimaryKeyLogicalTypesWithIndex(Context context) { DataType physicalRowDataType = context.getPhysicalRowDataType(); int[] primaryKeyIndexes = context.getPrimaryKeyIndexes(); if (primaryKeyIndexes.length != 0) { DataType pkDataType = Projection.of(primaryKeyIndexes).project(physicalRowDataType); OpensearchValidationUtils.validatePrimaryKey(pkDataType); } ResolvedSchema resolvedSchema = context.getCatalogTable().getResolvedSchema(); return Arrays.stream(primaryKeyIndexes) .mapToObj( index -> { Optional<Column> column = resolvedSchema.getColumn(index); if (!column.isPresent()) { throw new IllegalStateException( String.format( "No primary key column found with index '%s'.", index)); } LogicalType logicalType = column.get().getDataType().getLogicalType(); return new LogicalTypeWithIndex(index, logicalType); }) .collect(Collectors.toList()); } @Override public Set<ConfigOption<?>> requiredOptions() { return Stream.of(HOSTS_OPTION, INDEX_OPTION).collect(Collectors.toSet()); } @Override public Set<ConfigOption<?>> forwardOptions() { return Stream.of( HOSTS_OPTION, INDEX_OPTION, PASSWORD_OPTION, USERNAME_OPTION, KEY_DELIMITER_OPTION, BULK_FLUSH_MAX_ACTIONS_OPTION, BULK_FLUSH_MAX_SIZE_OPTION, BULK_FLUSH_INTERVAL_OPTION, BULK_FLUSH_BACKOFF_TYPE_OPTION, BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION, BULK_FLUSH_BACKOFF_DELAY_OPTION, CONNECTION_PATH_PREFIX_OPTION, CONNECTION_REQUEST_TIMEOUT, CONNECTION_TIMEOUT, SOCKET_TIMEOUT, ALLOW_INSECURE) .collect(Collectors.toSet()); } @Override public Set<ConfigOption<?>> optionalOptions() { return Stream.of( KEY_DELIMITER_OPTION, BULK_FLUSH_MAX_SIZE_OPTION, BULK_FLUSH_MAX_ACTIONS_OPTION, BULK_FLUSH_INTERVAL_OPTION, BULK_FLUSH_BACKOFF_TYPE_OPTION, BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION, BULK_FLUSH_BACKOFF_DELAY_OPTION, CONNECTION_PATH_PREFIX_OPTION, CONNECTION_REQUEST_TIMEOUT, CONNECTION_TIMEOUT, SOCKET_TIMEOUT, FORMAT_OPTION, DELIVERY_GUARANTEE_OPTION, PASSWORD_OPTION, USERNAME_OPTION, SINK_PARALLELISM, ALLOW_INSECURE) .collect(Collectors.toSet()); } @Override public String factoryIdentifier() { return FACTORY_IDENTIFIER; } }
2,934
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/table/OpensearchSinkBuilderSupplier.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.table; import org.apache.flink.connector.opensearch.sink.OpensearchSinkBuilder; import java.util.function.Supplier; interface OpensearchSinkBuilderSupplier<T> extends Supplier<OpensearchSinkBuilder<T>> {}
2,935
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/table/OpensearchConnectorOptions.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.table; import org.apache.flink.annotation.PublicEvolving; import org.apache.flink.configuration.ConfigOption; import org.apache.flink.configuration.ConfigOptions; import org.apache.flink.configuration.MemorySize; import org.apache.flink.connector.base.DeliveryGuarantee; import org.apache.flink.connector.opensearch.sink.FlushBackoffType; import java.time.Duration; import java.util.List; /** * Base options for the Opensearch connector. Needs to be public so that the {@link * org.apache.flink.table.api.TableDescriptor} can access it. */ @PublicEvolving public class OpensearchConnectorOptions { OpensearchConnectorOptions() {} public static final ConfigOption<List<String>> HOSTS_OPTION = ConfigOptions.key("hosts") .stringType() .asList() .noDefaultValue() .withDescription("Opensearch hosts to connect to."); public static final ConfigOption<String> INDEX_OPTION = ConfigOptions.key("index") .stringType() .noDefaultValue() .withDescription("Opensearch index for every record."); public static final ConfigOption<String> PASSWORD_OPTION = ConfigOptions.key("password") .stringType() .noDefaultValue() .withDescription("Password used to connect to Opensearch instance."); public static final ConfigOption<String> USERNAME_OPTION = ConfigOptions.key("username") .stringType() .noDefaultValue() .withDescription("Username used to connect to Opensearch instance."); public static final ConfigOption<String> KEY_DELIMITER_OPTION = ConfigOptions.key("document-id.key-delimiter") .stringType() .defaultValue("_") .withDescription( "Delimiter for composite keys e.g., \"$\" would result in IDs \"KEY1$KEY2$KEY3\"."); public static final ConfigOption<Integer> BULK_FLUSH_MAX_ACTIONS_OPTION = ConfigOptions.key("sink.bulk-flush.max-actions") .intType() .defaultValue(1000) .withDescription("Maximum number of actions to buffer for each bulk request."); public static final ConfigOption<MemorySize> BULK_FLUSH_MAX_SIZE_OPTION = ConfigOptions.key("sink.bulk-flush.max-size") .memoryType() .defaultValue(MemorySize.parse("2mb")) .withDescription("Maximum size of buffered actions per bulk request"); public static final ConfigOption<Duration> BULK_FLUSH_INTERVAL_OPTION = ConfigOptions.key("sink.bulk-flush.interval") .durationType() .defaultValue(Duration.ofSeconds(1)) .withDescription("Bulk flush interval"); public static final ConfigOption<FlushBackoffType> BULK_FLUSH_BACKOFF_TYPE_OPTION = ConfigOptions.key("sink.bulk-flush.backoff.strategy") .enumType(FlushBackoffType.class) .noDefaultValue() .withDescription("Backoff strategy"); public static final ConfigOption<Integer> BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION = ConfigOptions.key("sink.bulk-flush.backoff.max-retries") .intType() .noDefaultValue() .withDescription("Maximum number of retries."); public static final ConfigOption<Duration> BULK_FLUSH_BACKOFF_DELAY_OPTION = ConfigOptions.key("sink.bulk-flush.backoff.delay") .durationType() .noDefaultValue() .withDescription("Delay between each backoff attempt."); public static final ConfigOption<String> CONNECTION_PATH_PREFIX_OPTION = ConfigOptions.key("connection.path-prefix") .stringType() .noDefaultValue() .withDescription("Prefix string to be added to every REST communication."); public static final ConfigOption<Duration> CONNECTION_REQUEST_TIMEOUT = ConfigOptions.key("connection.request-timeout") .durationType() .noDefaultValue() .withDescription( "The timeout for requesting a connection from the connection manager."); public static final ConfigOption<Duration> CONNECTION_TIMEOUT = ConfigOptions.key("connection.timeout") .durationType() .noDefaultValue() .withDescription("The timeout for establishing a connection."); public static final ConfigOption<Duration> SOCKET_TIMEOUT = ConfigOptions.key("socket.timeout") .durationType() .noDefaultValue() .withDescription( "The socket timeout (SO_TIMEOUT) for waiting for data or, put differently," + "a maximum period inactivity between two consecutive data packets."); public static final ConfigOption<String> FORMAT_OPTION = ConfigOptions.key("format") .stringType() .defaultValue("json") .withDescription( "The format must produce a valid JSON document. " + "Please refer to the documentation on formats for more details."); public static final ConfigOption<DeliveryGuarantee> DELIVERY_GUARANTEE_OPTION = ConfigOptions.key("sink.delivery-guarantee") .enumType(DeliveryGuarantee.class) .defaultValue(DeliveryGuarantee.AT_LEAST_ONCE) .withDescription("Optional delivery guarantee when committing."); public static final ConfigOption<Boolean> ALLOW_INSECURE = ConfigOptions.key("allow-insecure") .booleanType() .defaultValue(false) .withDescription( "Allow insecure connections to HTTPS endpoints (disable certificates validation)"); }
2,936
0
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch
Create_ds/flink-connector-opensearch/flink-connector-opensearch/src/main/java/org/apache/flink/connector/opensearch/table/StaticIndexGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.connector.opensearch.table; import org.apache.flink.annotation.Internal; import org.apache.flink.table.data.RowData; /** A static {@link IndexGenerator} which generate fixed index name. */ @Internal final class StaticIndexGenerator extends IndexGeneratorBase { public StaticIndexGenerator(String index) { super(index); } public String generate(RowData row) { return index; } }
2,937
0
Create_ds/bahir/streaming-twitter/examples/src/main/java/org/apache/spark/examples/streaming
Create_ds/bahir/streaming-twitter/examples/src/main/java/org/apache/spark/examples/streaming/twitter/JavaTwitterHashTagJoinSentiments.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.examples.streaming.twitter; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.spark.SparkConf; import org.apache.spark.api.java.JavaPairRDD; import org.apache.spark.api.java.function.FlatMapFunction; import org.apache.spark.api.java.function.Function; import org.apache.spark.api.java.function.Function2; import org.apache.spark.api.java.function.PairFunction; import org.apache.spark.api.java.function.VoidFunction; import org.apache.spark.streaming.Duration; import org.apache.spark.streaming.api.java.JavaDStream; import org.apache.spark.streaming.api.java.JavaPairDStream; import org.apache.spark.streaming.api.java.JavaReceiverInputDStream; import org.apache.spark.streaming.api.java.JavaStreamingContext; import org.apache.spark.streaming.twitter.TwitterUtils; import scala.Tuple2; import twitter4j.Status; import java.util.Arrays; import java.util.Iterator; import java.util.List; /** * Displays the most positive hash tags by joining the streaming Twitter data with a static RDD of * the AFINN word list (http://neuro.imm.dtu.dk/wiki/AFINN) */ public class JavaTwitterHashTagJoinSentiments { public static void main(String[] args) { if (args.length < 4) { System.err.println("Usage: JavaTwitterHashTagJoinSentiments <consumer key>" + " <consumer secret> <access token> <access token secret> [<filters>]"); System.exit(1); } //StreamingExamples.setStreamingLogLevels(); // Set logging level if log4j not configured (override by adding log4j.properties to classpath) if (!Logger.getRootLogger().getAllAppenders().hasMoreElements()) { Logger.getRootLogger().setLevel(Level.WARN); } String consumerKey = args[0]; String consumerSecret = args[1]; String accessToken = args[2]; String accessTokenSecret = args[3]; String[] filters = Arrays.copyOfRange(args, 4, args.length); // Set the system properties so that Twitter4j library used by Twitter stream // can use them to generate OAuth credentials System.setProperty("twitter4j.oauth.consumerKey", consumerKey); System.setProperty("twitter4j.oauth.consumerSecret", consumerSecret); System.setProperty("twitter4j.oauth.accessToken", accessToken); System.setProperty("twitter4j.oauth.accessTokenSecret", accessTokenSecret); SparkConf sparkConf = new SparkConf().setAppName("JavaTwitterHashTagJoinSentiments"); // check Spark configuration for master URL, set it to local if not configured if (!sparkConf.contains("spark.master")) { sparkConf.setMaster("local[2]"); } JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(2000)); JavaReceiverInputDStream<Status> stream = TwitterUtils.createStream(jssc, filters); JavaDStream<String> words = stream.flatMap(new FlatMapFunction<Status, String>() { @Override public Iterator<String> call(Status s) { return Arrays.asList(s.getText().split(" ")).iterator(); } }); JavaDStream<String> hashTags = words.filter(new Function<String, Boolean>() { @Override public Boolean call(String word) { return word.startsWith("#"); } }); // Read in the word-sentiment list and create a static RDD from it String wordSentimentFilePath = "streaming-twitter/examples/data/AFINN-111.txt"; final JavaPairRDD<String, Double> wordSentiments = jssc.sparkContext() .textFile(wordSentimentFilePath) .mapToPair(new PairFunction<String, String, Double>(){ @Override public Tuple2<String, Double> call(String line) { String[] columns = line.split("\t"); return new Tuple2<>(columns[0], Double.parseDouble(columns[1])); } }); JavaPairDStream<String, Integer> hashTagCount = hashTags.mapToPair( new PairFunction<String, String, Integer>() { @Override public Tuple2<String, Integer> call(String s) { // leave out the # character return new Tuple2<>(s.substring(1), 1); } }); JavaPairDStream<String, Integer> hashTagTotals = hashTagCount.reduceByKeyAndWindow( new Function2<Integer, Integer, Integer>() { @Override public Integer call(Integer a, Integer b) { return a + b; } }, new Duration(10000)); // Determine the hash tags with the highest sentiment values by joining the streaming RDD // with the static RDD inside the transform() method and then multiplying // the frequency of the hash tag by its sentiment value JavaPairDStream<String, Tuple2<Double, Integer>> joinedTuples = hashTagTotals.transformToPair(new Function<JavaPairRDD<String, Integer>, JavaPairRDD<String, Tuple2<Double, Integer>>>() { @Override public JavaPairRDD<String, Tuple2<Double, Integer>> call( JavaPairRDD<String, Integer> topicCount) { return wordSentiments.join(topicCount); } }); JavaPairDStream<String, Double> topicHappiness = joinedTuples.mapToPair( new PairFunction<Tuple2<String, Tuple2<Double, Integer>>, String, Double>() { @Override public Tuple2<String, Double> call(Tuple2<String, Tuple2<Double, Integer>> topicAndTuplePair) { Tuple2<Double, Integer> happinessAndCount = topicAndTuplePair._2(); return new Tuple2<>(topicAndTuplePair._1(), happinessAndCount._1() * happinessAndCount._2()); } }); JavaPairDStream<Double, String> happinessTopicPairs = topicHappiness.mapToPair( new PairFunction<Tuple2<String, Double>, Double, String>() { @Override public Tuple2<Double, String> call(Tuple2<String, Double> topicHappiness) { return new Tuple2<>(topicHappiness._2(), topicHappiness._1()); } }); JavaPairDStream<Double, String> happiest10 = happinessTopicPairs.transformToPair( new Function<JavaPairRDD<Double, String>, JavaPairRDD<Double, String>>() { @Override public JavaPairRDD<Double, String> call( JavaPairRDD<Double, String> happinessAndTopics) { return happinessAndTopics.sortByKey(false); } } ); // Print hash tags with the most positive sentiment values happiest10.foreachRDD(new VoidFunction<JavaPairRDD<Double, String>>() { @Override public void call(JavaPairRDD<Double, String> happinessTopicPairs) { List<Tuple2<Double, String>> topList = happinessTopicPairs.take(10); System.out.println( String.format("\nHappiest topics in last 10 seconds (%s total):", happinessTopicPairs.count())); for (Tuple2<Double, String> pair : topList) { System.out.println( String.format("%s (%s happiness)", pair._2(), pair._1())); } } }); jssc.start(); try { jssc.awaitTermination(); } catch (InterruptedException e) { e.printStackTrace(); } } }
2,938
0
Create_ds/bahir/streaming-twitter/src/test/java/org/apache/spark/streaming
Create_ds/bahir/streaming-twitter/src/test/java/org/apache/spark/streaming/twitter/JavaTwitterStreamSuite.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.streaming.twitter; import org.junit.Test; import twitter4j.FilterQuery; import twitter4j.Status; import twitter4j.auth.Authorization; import twitter4j.auth.NullAuthorization; import org.apache.spark.storage.StorageLevel; import org.apache.spark.streaming.LocalJavaStreamingContext; import org.apache.spark.streaming.api.java.JavaDStream; public class JavaTwitterStreamSuite extends LocalJavaStreamingContext { @Test public void testTwitterStream() { String[] filters = { "filter1", "filter2" }; Authorization auth = NullAuthorization.getInstance(); FilterQuery query = new FilterQuery().language("en,es"); // tests the API, does not actually test data receiving JavaDStream<Status> test1 = TwitterUtils.createStream(ssc); JavaDStream<Status> test2 = TwitterUtils.createStream(ssc, filters); JavaDStream<Status> test3 = TwitterUtils.createStream( ssc, filters, StorageLevel.MEMORY_AND_DISK_SER_2()); JavaDStream<Status> test4 = TwitterUtils.createStream(ssc, auth); JavaDStream<Status> test5 = TwitterUtils.createStream(ssc, auth, filters); JavaDStream<Status> test6 = TwitterUtils.createStream(ssc, auth, filters, StorageLevel.MEMORY_AND_DISK_SER_2()); JavaDStream<Status> test7 = TwitterUtils.createFilteredStream(ssc, auth, query, StorageLevel.MEMORY_AND_DISK_SER_2()); } }
2,939
0
Create_ds/bahir/sql-streaming-sqs/src/main/java/org/apache/spark/sql/streaming
Create_ds/bahir/sql-streaming-sqs/src/main/java/org/apache/spark/sql/streaming/sqs/InstanceProfileCredentialsProviderWithRetries.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.streaming.sqs; import com.amazonaws.AmazonClientException; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.InstanceProfileCredentialsProvider; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; public class InstanceProfileCredentialsProviderWithRetries extends InstanceProfileCredentialsProvider { private static final Log LOG = LogFactory.getLog( InstanceProfileCredentialsProviderWithRetries.class); public AWSCredentials getCredentials() { int retries = 10; int sleep = 500; while(retries > 0) { try { return super.getCredentials(); } catch (RuntimeException re) { LOG.error("Got an exception while fetching credentials " + re); --retries; try { Thread.sleep(sleep); } catch (InterruptedException ie) { // Do nothing } if (sleep < 10000) { sleep *= 2; } } catch (Error error) { LOG.error("Got an exception while fetching credentials " + error); --retries; try { Thread.sleep(sleep); } catch (InterruptedException ie) { // Do nothing } if (sleep < 10000) { sleep *= 2; } } } throw new AmazonClientException("Unable to load credentials."); } }
2,940
0
Create_ds/bahir/sql-streaming-sqs/src/main/java/org/apache/spark/sql/streaming
Create_ds/bahir/sql-streaming-sqs/src/main/java/org/apache/spark/sql/streaming/sqs/BasicAWSCredentialsProvider.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.streaming.sqs; import com.amazonaws.AmazonClientException; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.auth.AWSCredentials; import org.apache.commons.lang.StringUtils; public class BasicAWSCredentialsProvider implements AWSCredentialsProvider { private final String accessKey; private final String secretKey; public BasicAWSCredentialsProvider(String accessKey, String secretKey) { this.accessKey = accessKey; this.secretKey = secretKey; } public AWSCredentials getCredentials() { if (!StringUtils.isEmpty(accessKey) && !StringUtils.isEmpty(secretKey)) { return new BasicAWSCredentials(accessKey, secretKey); } throw new AmazonClientException( "Access key or secret key is null"); } public void refresh() {} @Override public String toString() { return getClass().getSimpleName(); } }
2,941
0
Create_ds/bahir/streaming-pubsub/src/test/java/org/apache/spark/streaming
Create_ds/bahir/streaming-pubsub/src/test/java/org/apache/spark/streaming/pubsub/JavaPubsubStreamSuite.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.streaming.pubsub; import org.apache.spark.storage.StorageLevel; import org.apache.spark.streaming.LocalJavaStreamingContext; import org.apache.spark.streaming.api.java.JavaReceiverInputDStream; import org.junit.Test; public class JavaPubsubStreamSuite extends LocalJavaStreamingContext { @Test public void testPubsubStream() { // tests the API, does not actually test data receiving JavaReceiverInputDStream<SparkPubsubMessage> stream1 = PubsubUtils.createStream( ssc, "project", "subscription", new SparkGCPCredentials.Builder().build(), StorageLevel.MEMORY_AND_DISK_SER_2()); JavaReceiverInputDStream<SparkPubsubMessage> stream2 = PubsubUtils.createStream( ssc, "project", "topic", "subscription", new SparkGCPCredentials.Builder().build(), StorageLevel.MEMORY_AND_DISK_SER_2()); } }
2,942
0
Create_ds/bahir/common/src/test/java/org/apache/spark
Create_ds/bahir/common/src/test/java/org/apache/spark/streaming/LocalJavaStreamingContext.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.streaming; import org.apache.spark.SparkConf; import org.apache.spark.streaming.api.java.JavaStreamingContext; import org.junit.After; import org.junit.Before; public abstract class LocalJavaStreamingContext { protected transient JavaStreamingContext ssc; @Before public void setUp() { final SparkConf conf = new SparkConf() .setMaster("local[2]") .setAppName("test") .set("spark.streaming.clock", "org.apache.spark.util.ManualClock"); ssc = new JavaStreamingContext(conf, new Duration(1000)); ssc.checkpoint("checkpoint"); } @After public void tearDown() { ssc.stop(); ssc = null; } }
2,943
0
Create_ds/bahir/sql-streaming-jdbc/examples/src/main/java/org/apache/bahir/examples/sql/streaming
Create_ds/bahir/sql-streaming-jdbc/examples/src/main/java/org/apache/bahir/examples/sql/streaming/jdbc/JavaJdbcSinkDemo.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.bahir.examples.sql.streaming.jdbc; import org.apache.spark.SparkConf; import org.apache.spark.api.java.function.MapFunction; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Encoders; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions; import org.apache.spark.sql.streaming.OutputMode; import org.apache.spark.sql.streaming.StreamingQuery; import org.apache.spark.sql.streaming.Trigger; /** * Mock using rate source, change the log to a simple Person * object with name and age property, and write to jdbc. * * Usage: JdbcSinkDemo <jdbcUrl> <tableName> <username> <password> */ public class JavaJdbcSinkDemo { public static void main(String[] args) throws Exception{ if (args.length < 4) { System.err.println("Usage: JdbcSinkDemo <jdbcUrl> <tableName> <username> <password>"); System.exit(1); } String jdbcUrl = args[0]; String tableName = args[1]; String username = args[2]; String password = args[3]; SparkConf sparkConf = new SparkConf().setAppName("JavaJdbcSinkDemo"); SparkSession spark = SparkSession.builder() .config(sparkConf) .getOrCreate(); // load data source Dataset<Long> lines = spark .readStream() .format("rate") .option("numPartitions", "5") .option("rowsPerSecond", "100") .load().select("value").as(Encoders.LONG()); // change input value to a person object. DemoMapFunction demoFunction = new DemoMapFunction(); Dataset<Person> result = lines.map(demoFunction, Encoders.javaSerialization(Person.class)); // print schema for debug result.printSchema(); StreamingQuery query = result .writeStream() .outputMode("append") .format("streaming-jdbc") .outputMode(OutputMode.Append()) .option(JDBCOptions.JDBC_URL(), jdbcUrl) .option(JDBCOptions.JDBC_TABLE_NAME(), tableName) .option(JDBCOptions.JDBC_DRIVER_CLASS(), "com.mysql.jdbc.Driver") .option(JDBCOptions.JDBC_BATCH_INSERT_SIZE(), "5") .option("user", username) .option("password", password) .trigger(Trigger.ProcessingTime("10 seconds")) .start(); query.awaitTermination(); } private static class Person { private String name; private int age; Person(String name, int age) { this.name = name; this.age = age; } public String getName() { return name; } public void setName(String name) { this.name = name; } public int getAge() { return age; } public void setAge(int age) { this.age = age; } } private static class DemoMapFunction implements MapFunction<Long, Person> { @Override public Person call(Long value) throws Exception { return new Person("name_" + value, value.intValue() % 30); } } }
2,944
0
Create_ds/bahir/sql-cloudant/src/main/java/org/apache/bahir/cloudant
Create_ds/bahir/sql-cloudant/src/main/java/org/apache/bahir/cloudant/common/ChangesRow.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.bahir.cloudant.common; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import java.util.List; /** * Class representing a single row in a changes feed. Structure: * * { * last_seq": 5 * "results": [ * ---*** This next items is the ChangesRow ***--- * { * "changes": [ {"rev": "2-eec205a9d413992850a6e32678485900"}, ... ], * "deleted": true, * "id": "deleted", * "seq": 5, * "doc": ... structure ... * } * ] * } */ public class ChangesRow { public class Rev { private String rev; public String getRev() { return rev; } } private List<Rev> changes; public Boolean deleted; private String id; private JsonElement seq; private JsonObject doc; public List<Rev> getChanges() { return changes; } public String getSeq() { if (seq.isJsonNull()) { return null; } else { return seq.toString(); } } public String getId() { return id; } public JsonObject getDoc() { return doc; } }
2,945
0
Create_ds/bahir/sql-cloudant/src/main/java/org/apache/bahir/cloudant
Create_ds/bahir/sql-cloudant/src/main/java/org/apache/bahir/cloudant/common/ChangesRowScanner.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.bahir.cloudant.common; import com.google.gson.Gson; import java.io.BufferedReader; import java.io.IOException; /** * This scanner will read through a _changes stream until it finds the * next meaningful row, either a change entry or the closing line with * the lastSeq and, perhaps, pending changes (for normal/longpoll feeds). */ public class ChangesRowScanner { private static final Gson gson = new Gson(); /** * Read up to the next meaningful line from the changes feed, and calls * the passed delegate depending on what it finds. Works for all styles of * changes feed (normal, longpoll, continuous). * * @return True if should continue * * @throws IOException if there's a problem reading the stream */ public static ChangesRow readRowFromReader(BufferedReader changesReader) throws IOException { String line; // Read the next line (empty = heartbeat, ignore; null = end of stream) while ((line = changesReader.readLine()) != null) { if (line.isEmpty()) { continue; } if (line.startsWith("{\"results\":")) { // ignore, just the START of the result set in normal/longpoll mode continue; } else if (line.startsWith("],")) { // ignore, just the END of the result set in normal/longpoll mode continue; } break; } if(line != null) { if (line.startsWith("\"last_seq\":")) { return null; // End of feed } else if (line.startsWith("{\"last_seq\":")) { return null; // End of feed } else { if (line.endsWith(",")) { line = line.substring(0, line.length() - 1); } ChangesRow r = gson.fromJson(line, ChangesRow.class); return r; // not end of feed } } else { return null; } } }
2,946
0
Create_ds/bahir/streaming-pubnub/src/test/java/org/apache/spark/streaming
Create_ds/bahir/streaming-pubnub/src/test/java/org/apache/spark/streaming/pubnub/JavaPubNubStreamSuite.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.streaming.pubnub; import com.pubnub.api.PNConfiguration; import org.apache.spark.storage.StorageLevel; import org.apache.spark.streaming.LocalJavaStreamingContext; import org.apache.spark.streaming.api.java.JavaReceiverInputDStream; import org.junit.Test; import java.util.HashSet; public class JavaPubNubStreamSuite extends LocalJavaStreamingContext { @Test public void testPubNubStream() { // Tests the API compatibility, but do not actually receive any data. JavaReceiverInputDStream<SparkPubNubMessage> stream = PubNubUtils.createStream( ssc, new PNConfiguration(), new HashSet<>(), new HashSet<>(), null, StorageLevel.MEMORY_AND_DISK_SER_2() ); } }
2,947
0
Create_ds/bahir/streaming-zeromq/src/test/java/org/apache/spark/streaming
Create_ds/bahir/streaming-zeromq/src/test/java/org/apache/spark/streaming/zeromq/JavaZeroMQStreamSuite.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.streaming.zeromq; import org.junit.Test; import org.apache.spark.api.java.function.Function; import org.apache.spark.storage.StorageLevel; import org.apache.spark.streaming.LocalJavaStreamingContext; import org.apache.spark.streaming.api.java.JavaReceiverInputDStream; import zmq.ZMQ; import java.util.Arrays; public class JavaZeroMQStreamSuite extends LocalJavaStreamingContext { @Test public void testZeroMQAPICompatibility() { // Test the API, but do not exchange any messages. final String publishUrl = "tcp://localhost:5555"; final String topic = "topic1"; final Function<byte[][], Iterable<String>> messageConverter = new Function<byte[][], Iterable<String>>() { @Override public Iterable<String> call(byte[][] bytes) throws Exception { // Skip topic name and assume that each message contains only one frame. return Arrays.asList(new String(bytes[1], ZMQ.CHARSET)); } }; JavaReceiverInputDStream<String> test1 = ZeroMQUtils.createJavaStream( ssc, publishUrl, true, Arrays.asList(topic.getBytes()), messageConverter, StorageLevel.MEMORY_AND_DISK_SER_2() ); JavaReceiverInputDStream<String> test2 = ZeroMQUtils.createTextJavaStream( ssc, publishUrl, true, Arrays.asList(topic.getBytes()), StorageLevel.MEMORY_AND_DISK_SER_2() ); } }
2,948
0
Create_ds/bahir/sql-streaming-akka/examples/src/main/java/org/apache/bahir/examples/sql/streaming
Create_ds/bahir/sql-streaming-akka/examples/src/main/java/org/apache/bahir/examples/sql/streaming/akka/JavaAkkaStreamWordCount.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.bahir.examples.sql.streaming.akka; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.spark.SparkConf; import org.apache.spark.api.java.function.FlatMapFunction; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Encoders; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.streaming.StreamingQuery; import java.util.Arrays; import java.util.Iterator; /** * Counts words in UTF8 encoded, '\n' delimited text received from Akka Feeder Actor system. * * Usage: AkkaStreamWordCount <urlOfPublisher> * <urlOfPublisher> provides the uri of the publisher or feeder actor that Structured Streaming * would connect to receive data. * * To run this on your local machine, a Feeder Actor System should be up and running. * */ public final class JavaAkkaStreamWordCount { public static void main(String[] args) throws Exception { if (args.length < 1) { System.err.println("Usage: JavaAkkaStreamWordCount <urlOfPublisher>"); System.exit(1); } if (!Logger.getRootLogger().getAllAppenders().hasMoreElements()) { Logger.getRootLogger().setLevel(Level.WARN); } String urlOfPublisher = args[0]; SparkConf sparkConf = new SparkConf().setAppName("JavaAkkaStreamWordCount"); // check Spark configuration for master URL, set it to local if not configured if (!sparkConf.contains("spark.master")) { sparkConf.setMaster("local[4]"); } SparkSession spark = SparkSession.builder() .config(sparkConf) .getOrCreate(); // Create DataFrame representing the stream of input lines from connection // to publisher or feeder actor Dataset<String> lines = spark .readStream() .format("org.apache.bahir.sql.streaming.akka.AkkaStreamSourceProvider") .option("urlOfPublisher", urlOfPublisher) .load().select("value").as(Encoders.STRING()); // Split the lines into words Dataset<String> words = lines.flatMap(new FlatMapFunction<String, String>() { @Override public Iterator<String> call(String s) throws Exception { return Arrays.asList(s.split(" ")).iterator(); } }, Encoders.STRING()); // Generate running word count Dataset<Row> wordCounts = words.groupBy("value").count(); // Start running the query that prints the running counts to the console StreamingQuery query = wordCounts.writeStream() .outputMode("complete") .format("console") .start(); query.awaitTermination(); } }
2,949
0
Create_ds/bahir/streaming-mqtt/src/test/java/org/apache/spark/streaming
Create_ds/bahir/streaming-mqtt/src/test/java/org/apache/spark/streaming/mqtt/JavaMQTTStreamSuite.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.streaming.mqtt; import org.apache.spark.storage.StorageLevel; import org.apache.spark.streaming.LocalJavaStreamingContext; import org.apache.spark.streaming.api.java.JavaReceiverInputDStream; import org.junit.Test; import scala.Tuple2; public class JavaMQTTStreamSuite extends LocalJavaStreamingContext { @Test public void testMQTTStream() { String brokerUrl = "abc"; String topic = "def"; String[] topics = {"def1","def2"}; // tests the API, does not actually test data receiving JavaReceiverInputDStream<String> test1 = MQTTUtils.createStream(ssc, brokerUrl, topic); JavaReceiverInputDStream<String> test2 = MQTTUtils.createStream(ssc, brokerUrl, topic, StorageLevel.MEMORY_AND_DISK_SER_2()); JavaReceiverInputDStream<String> test3 = MQTTUtils.createStream(ssc, brokerUrl, topic, StorageLevel.MEMORY_AND_DISK_SER_2(), "testid", "user", "password", true, 1, 10, 30, 3); JavaReceiverInputDStream<String> test4 = MQTTUtils.createStream(ssc, brokerUrl, topic, "testid", "user", "password", true, 1, 10, 30, 3); JavaReceiverInputDStream<String> test5 = MQTTUtils.createStream(ssc, brokerUrl, topic, "testid", "user", "password", true); JavaReceiverInputDStream<Tuple2<String, String>> test6 = MQTTUtils.createPairedStream(ssc, brokerUrl, topics); JavaReceiverInputDStream<Tuple2<String, String>> test7 = MQTTUtils.createPairedStream(ssc, brokerUrl, topics, StorageLevel.MEMORY_AND_DISK_SER_2()); JavaReceiverInputDStream<Tuple2<String, String>> test8 = MQTTUtils.createPairedStream(ssc, brokerUrl, topics, StorageLevel.MEMORY_AND_DISK_SER_2(), "testid", "user", "password", true, 1, 10, 30, 3); JavaReceiverInputDStream<Tuple2<String, String>> test9 = MQTTUtils.createPairedStream(ssc, brokerUrl, topics, "testid", "user", "password", true, 1, 10, 30, 3); JavaReceiverInputDStream<Tuple2<String, String>> test10 = MQTTUtils.createPairedStream(ssc, brokerUrl, topics, "testid", "user", "password", true); JavaReceiverInputDStream<Tuple2<String, byte[]>> test11 = MQTTUtils.createPairedByteArrayStream(ssc, brokerUrl, topics); JavaReceiverInputDStream<Tuple2<String, byte[]>> test12 = MQTTUtils.createPairedByteArrayStream(ssc, brokerUrl, topics, StorageLevel.MEMORY_AND_DISK_SER_2()); JavaReceiverInputDStream<Tuple2<String, byte[]>> test13 = MQTTUtils.createPairedByteArrayStream(ssc, brokerUrl, topics, StorageLevel.MEMORY_AND_DISK_SER_2(), "testid", "user", "password", true, 1, 10, 30, 3); JavaReceiverInputDStream<Tuple2<String, byte[]>> test14 = MQTTUtils.createPairedByteArrayStream(ssc, brokerUrl, topics, "testid", "user", "password", true, 1, 10, 30, 3); JavaReceiverInputDStream<Tuple2<String, byte[]>> test15 = MQTTUtils.createPairedByteArrayStream(ssc, brokerUrl, topics, "testid", "user", "password", true); } }
2,950
0
Create_ds/bahir/sql-streaming-mqtt/examples/src/main/java/org/apache/bahir/examples/sql/streaming
Create_ds/bahir/sql-streaming-mqtt/examples/src/main/java/org/apache/bahir/examples/sql/streaming/mqtt/JavaMQTTStreamWordCount.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.bahir.examples.sql.streaming.mqtt; import org.apache.log4j.Logger; import org.apache.log4j.Level; import org.apache.spark.SparkConf; import org.apache.spark.api.java.function.FlatMapFunction; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Encoders; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.streaming.StreamingQuery; import java.util.Arrays; import java.util.Iterator; /** * Counts words in UTF8 encoded, '\n' delimited text received from MQTT Server. * * Usage: JavaMQTTStreamWordCount <brokerUrl> <topic> * <brokerUrl> and <topic> describe the MQTT server that Structured Streaming * would connect to receive data. * * To run this on your local machine, a MQTT Server should be up and running. * */ public final class JavaMQTTStreamWordCount { public static void main(String[] args) throws Exception { if (args.length < 2) { System.err.println("Usage: JavaMQTTStreamWordCount <brokerUrl> <topic>"); System.exit(1); } if (!Logger.getRootLogger().getAllAppenders().hasMoreElements()) { Logger.getRootLogger().setLevel(Level.WARN); } String brokerUrl = args[0]; String topic = args[1]; SparkConf sparkConf = new SparkConf().setAppName("JavaMQTTStreamWordCount"); // check Spark configuration for master URL, set it to local if not configured if (!sparkConf.contains("spark.master")) { sparkConf.setMaster("local[4]"); } SparkSession spark = SparkSession.builder() .config(sparkConf) .getOrCreate(); // Create DataFrame representing the stream of input lines from connection to mqtt server Dataset<String> lines = spark .readStream() .format("org.apache.bahir.sql.streaming.mqtt.MQTTStreamSourceProvider") .option("topic", topic) .load(brokerUrl).selectExpr("CAST(payload AS STRING)").as(Encoders.STRING()); // Split the lines into words Dataset<String> words = lines.flatMap(new FlatMapFunction<String, String>() { @Override public Iterator<String> call(String x) { return Arrays.asList(x.split(" ")).iterator(); } }, Encoders.STRING()); // Generate running word count Dataset<Row> wordCounts = words.groupBy("value").count(); // Start running the query that prints the running counts to the console StreamingQuery query = wordCounts.writeStream() .outputMode("complete") .format("console") .start(); query.awaitTermination(); } }
2,951
0
Create_ds/bahir/sql-streaming-mqtt/examples/src/main/java/org/apache/bahir/examples/sql/streaming
Create_ds/bahir/sql-streaming-mqtt/examples/src/main/java/org/apache/bahir/examples/sql/streaming/mqtt/JavaMQTTSinkWordCount.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.bahir.examples.sql.streaming.mqtt; import java.io.File; import java.util.Arrays; import java.util.Iterator; import org.apache.commons.io.FileUtils; import org.apache.spark.api.java.function.FlatMapFunction; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Encoders; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.streaming.StreamingQuery; /** * Counts words in UTF-8 encoded, '\n' delimited text received from local socket * and publishes results on MQTT topic. * * Usage: JavaMQTTSinkWordCount <port> <brokerUrl> <topic> * <port> represents local network port on which program is listening for input. * <brokerUrl> and <topic> describe the MQTT server that structured streaming * would connect and send data. * * To run example on your local machine, a MQTT Server should be up and running. * Linux users may leverage 'nc -lk <port>' to listen on local port and wait * for Spark socket connection. */ public class JavaMQTTSinkWordCount { public static void main(String[] args) throws Exception { if (args.length < 2) { System.err.println("Usage: JavaMQTTSinkWordCount <port> <brokerUrl> <topic>"); System.exit(1); } String checkpointDir = System.getProperty("java.io.tmpdir") + "/mqtt-example/"; // Remove checkpoint directory. FileUtils.deleteDirectory(new File(checkpointDir)); Integer port = Integer.valueOf(args[0]); String brokerUrl = args[1]; String topic = args[2]; SparkSession spark = SparkSession.builder() .appName("JavaMQTTSinkWordCount").master("local[4]") .getOrCreate(); // Create DataFrame representing the stream of input lines from local network socket. Dataset<String> lines = spark.readStream() .format("socket") .option("host", "localhost").option("port", port) .load().select("value").as(Encoders.STRING()); // Split the lines into words. Dataset<String> words = lines.flatMap(new FlatMapFunction<String, String>() { @Override public Iterator<String> call(String x) { return Arrays.asList(x.split(" ")).iterator(); } }, Encoders.STRING()); // Generate running word count. Dataset<Row> wordCounts = words.groupBy("value").count(); // Start publishing the counts to MQTT server. StreamingQuery query = wordCounts.writeStream() .format("org.apache.bahir.sql.streaming.mqtt.MQTTStreamSinkProvider") .option("checkpointLocation", checkpointDir) .outputMode("complete") .option("topic", topic) .option("localStorage", checkpointDir) .start(brokerUrl); query.awaitTermination(); } }
2,952
0
Create_ds/bahir/streaming-akka/examples/src/main/java/org/apache/spark/examples/streaming
Create_ds/bahir/streaming-akka/examples/src/main/java/org/apache/spark/examples/streaming/akka/JavaActorWordCount.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.examples.streaming.akka; import java.util.Arrays; import java.util.Iterator; import scala.Tuple2; import akka.actor.ActorSelection; import akka.actor.Props; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.spark.SparkConf; import org.apache.spark.api.java.function.FlatMapFunction; import org.apache.spark.api.java.function.Function2; import org.apache.spark.api.java.function.PairFunction; import org.apache.spark.streaming.Duration; import org.apache.spark.streaming.api.java.JavaDStream; import org.apache.spark.streaming.api.java.JavaStreamingContext; import org.apache.spark.streaming.akka.AkkaUtils; import org.apache.spark.streaming.akka.JavaActorReceiver; /** * A sample actor as receiver, is also simplest. This receiver actor * goes and subscribe to a typical publisher/feeder actor and receives * data. * * @see [[org.apache.spark.examples.streaming.akka.FeederActor]] */ class JavaSampleActorReceiver<T> extends JavaActorReceiver { private final String urlOfPublisher; JavaSampleActorReceiver(String urlOfPublisher) { this.urlOfPublisher = urlOfPublisher; } private ActorSelection remotePublisher; @Override public void preStart() { remotePublisher = getContext().actorSelection(urlOfPublisher); remotePublisher.tell(new SubscribeReceiver(getSelf()), getSelf()); } @Override public void onReceive(Object msg) throws Exception { @SuppressWarnings("unchecked") T msgT = (T) msg; store(msgT); } @Override public void postStop() { remotePublisher.tell(new UnsubscribeReceiver(getSelf()), getSelf()); } } /** * A sample word count program demonstrating the use of plugging in * Actor as Receiver * Usage: JavaActorWordCount <hostname> <port> * <hostname> and <port> describe the AkkaSystem that Spark Sample feeder is running on. * * To run this example locally, you may run Feeder Actor as * <code><pre> * $ bin/run-example org.apache.spark.examples.streaming.akka.FeederActor localhost 9999 * </pre></code> * and then run the example * <code><pre> * $ bin/run-example org.apache.spark.examples.streaming.akka.JavaActorWordCount localhost 9999 * </pre></code> */ public class JavaActorWordCount { public static void main(String[] args) { if (args.length < 2) { System.err.println("Usage: JavaActorWordCount <hostname> <port>"); System.exit(1); } //StreamingExamples.setStreamingLogLevels(); // Set logging level if log4j not configured (override by adding log4j.properties to classpath) if (!Logger.getRootLogger().getAllAppenders().hasMoreElements()) { Logger.getRootLogger().setLevel(Level.WARN); } final String host = args[0]; final String port = args[1]; SparkConf sparkConf = new SparkConf().setAppName("JavaActorWordCount"); // check Spark configuration for master URL, set it to local if not configured if (!sparkConf.contains("spark.master")) { sparkConf.setMaster("local[2]"); } // Create the context and set the batch size JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(2000)); String feederActorURI = "akka.tcp://test@" + host + ":" + port + "/user/FeederActor"; /* * Following is the use of AkkaUtils.createStream to plug in custom actor as receiver * * An important point to note: * Since Actor may exist outside the spark framework, It is thus user's responsibility * to ensure the type safety, i.e type of data received and InputDstream * should be same. * * For example: Both AkkaUtils.createStream and JavaSampleActorReceiver are parameterized * to same type to ensure type safety. */ JavaDStream<String> lines = AkkaUtils.createStream( jssc, Props.create(JavaSampleActorReceiver.class, feederActorURI), "SampleReceiver"); // compute wordcount lines.flatMap(new FlatMapFunction<String, String>() { @Override public Iterator<String> call(String s) { return Arrays.asList(s.split("\\s+")).iterator(); } }).mapToPair(new PairFunction<String, String, Integer>() { @Override public Tuple2<String, Integer> call(String s) { return new Tuple2<>(s, 1); } }).reduceByKey(new Function2<Integer, Integer, Integer>() { @Override public Integer call(Integer i1, Integer i2) { return i1 + i2; } }).print(); jssc.start(); try { jssc.awaitTermination(); } catch (InterruptedException e) { e.printStackTrace(); } } }
2,953
0
Create_ds/bahir/streaming-akka/src/test/java/org/apache/spark/streaming
Create_ds/bahir/streaming-akka/src/test/java/org/apache/spark/streaming/akka/JavaAkkaUtilsSuite.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.streaming.akka; import akka.actor.ActorSystem; import akka.actor.Props; import akka.actor.SupervisorStrategy; import akka.util.Timeout; import org.junit.Test; import org.apache.spark.api.java.function.Function0; import org.apache.spark.storage.StorageLevel; import org.apache.spark.streaming.LocalJavaStreamingContext; import org.apache.spark.streaming.api.java.JavaReceiverInputDStream; import java.util.concurrent.TimeUnit; public class JavaAkkaUtilsSuite extends LocalJavaStreamingContext { @Test public void testAkkaUtils() { // tests the API, does not actually test data receiving JavaReceiverInputDStream<String> test1 = AkkaUtils.<String>createStream( ssc, Props.create(JavaTestActor.class), "test" ); JavaReceiverInputDStream<String> test2 = AkkaUtils.<String>createStream( ssc, Props.create(JavaTestActor.class), "test", StorageLevel.MEMORY_AND_DISK_SER_2() ); JavaReceiverInputDStream<String> test3 = AkkaUtils.<String>createStream( ssc, Props.create(JavaTestActor.class), "test", StorageLevel.MEMORY_AND_DISK_SER_2(), new ActorSystemCreatorForTest(), SupervisorStrategy.defaultStrategy() ); } } class ActorSystemCreatorForTest implements Function0<ActorSystem> { @Override public ActorSystem call() { return null; } } class JavaTestActor extends JavaActorReceiver { @Override public void onReceive(Object message) throws Exception { store((String) message); store((String) message, new Timeout(1000, TimeUnit.MILLISECONDS)); } }
2,954
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda/impl/VirtualClassItem.java
/******************************************************************************** * Copyright (c) 2020 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.hda.impl; import org.eclipse.jifa.common.util.EscapeUtil; import org.eclipse.mat.SnapshotException; import org.eclipse.mat.query.Bytes; import org.eclipse.mat.query.IContextObjectSet; import org.eclipse.mat.query.IStructuredResult; import org.eclipse.mat.snapshot.ISnapshot; import org.eclipse.jifa.common.util.UseAccessor; import org.eclipse.jifa.hda.api.AnalysisException; import static org.eclipse.jifa.hda.api.Model.DominatorTree; @UseAccessor public class VirtualClassItem extends DominatorTree.ClassItem { static final int COLUMN_LABEL = 0; static final int COLUMN_OBJECTS = 1; static final int COLUMN_SHALLOW = 2; static final int COLUMN_RETAINED = 3; static final int COLUMN_PERCENT = 4; transient final ISnapshot snapshot; transient final IStructuredResult results; transient final Object e; public VirtualClassItem(final ISnapshot snapshot, final IStructuredResult results, final Object e) { this.snapshot = snapshot; this.results = results; this.e = e; this.objectId = results.getContext(e).getObjectId(); } @Override public String getSuffix() { return null; } @Override public int getObjectId() { return objectId; } @Override public int getObjectType() { try { return HeapDumpAnalyzerImpl.typeOf(snapshot.getObject(objectId)); } catch (SnapshotException se) { throw new AnalysisException(se); } } @Override public boolean isGCRoot() { return snapshot.isGCRoot(objectId); } @Override public String getLabel() { return EscapeUtil.unescapeLabel((String) results.getColumnValue(e, COLUMN_LABEL)); } @Override public int getObjects() { return (Integer) results.getColumnValue(e, COLUMN_OBJECTS); } @Override public int[] getObjectIds() { return ((IContextObjectSet) results.getContext(e)).getObjectIds(); } @Override public long getShallowSize() { return ((Bytes) results.getColumnValue(e, COLUMN_SHALLOW)).getValue(); } @Override public long getRetainedSize() { return ((Bytes) results.getColumnValue(e, COLUMN_RETAINED)).getValue(); } @Override public double getPercent() { return (Double) results.getColumnValue(e, COLUMN_PERCENT); } }
2,955
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda/impl/VirtualDefaultItem.java
/******************************************************************************** * Copyright (c) 2020 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.hda.impl; import org.eclipse.jifa.common.util.EscapeUtil; import org.eclipse.mat.SnapshotException; import org.eclipse.mat.query.Bytes; import org.eclipse.mat.query.IStructuredResult; import org.eclipse.mat.snapshot.ISnapshot; import org.eclipse.mat.snapshot.model.IObject; import org.eclipse.jifa.common.util.UseAccessor; import org.eclipse.jifa.hda.api.AnalysisException; import static org.eclipse.jifa.hda.api.Model.DominatorTree; @UseAccessor public class VirtualDefaultItem extends DominatorTree.DefaultItem { static final int COLUMN_LABEL = 0; static final int COLUMN_SHALLOW = 1; static final int COLUMN_RETAINED = 2; static final int COLUMN_PERCENT = 3; transient final ISnapshot snapshot; transient final IStructuredResult results; transient final Object e; public VirtualDefaultItem(final ISnapshot snapshot, final IStructuredResult results, final Object e) { this.snapshot = snapshot; this.results = results; this.e = e; this.objectId = results.getContext(e).getObjectId(); } @Override public String getSuffix() { try { IObject object = snapshot.getObject(objectId); return Helper.suffix(object.getGCRootInfo()); } catch (SnapshotException se) { throw new AnalysisException(se); } } @Override public int getObjectId() { return objectId; } @Override public int getObjectType() { try { return HeapDumpAnalyzerImpl.typeOf(snapshot.getObject(objectId)); } catch (SnapshotException se) { throw new AnalysisException(se); } } @Override public boolean isGCRoot() { return snapshot.isGCRoot(objectId); } @Override public String getLabel() { return EscapeUtil.unescapeLabel((String) results.getColumnValue(e, COLUMN_LABEL)); } @Override public long getShallowSize() { return ((Bytes) results.getColumnValue(e, COLUMN_SHALLOW)).getValue(); } @Override public long getRetainedSize() { return ((Bytes) results.getColumnValue(e, COLUMN_RETAINED)).getValue(); } @Override public double getPercent() { return (Double) results.getColumnValue(e, COLUMN_PERCENT); } }
2,956
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda/impl/AnalysisContext.java
/******************************************************************************** * Copyright (c) 2021 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.hda.impl; import org.eclipse.jifa.hda.api.Model; import org.eclipse.mat.query.IResult; import org.eclipse.mat.query.IResultTree; import org.eclipse.mat.query.refined.RefinedTable; import org.eclipse.mat.snapshot.ISnapshot; import java.lang.ref.SoftReference; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; public class AnalysisContext { final ISnapshot snapshot; volatile SoftReference<ClassLoaderExplorerData> classLoaderExplorerData = new SoftReference<>(null); volatile SoftReference<DirectByteBufferData> directByteBufferData = new SoftReference<>(null); volatile SoftReference<LeakReportData> leakReportData= new SoftReference<>(null); AnalysisContext(ISnapshot snapshot) { this.snapshot = snapshot; } static class ClassLoaderExplorerData { IResultTree result; // classloader object Id -> record Map<Integer, Object> classLoaderIdMap; List<?> items; int definedClasses; int numberOfInstances; } static class DirectByteBufferData { static final String OQL = "SELECT s.@displayName as label, s.position as position, s.limit as limit, s.capacity as " + "capacity FROM java.nio.DirectByteBuffer s where s.cleaner != null"; static final Map<String, Object> ARGS = new HashMap<>(1); static { ARGS.put("queryString", OQL); } RefinedTable resultContext; Model.DirectByteBuffer.Summary summary; public String label(Object row) { return (String) resultContext.getColumnValue(row, 0); } public int position(Object row) { return (Integer) resultContext.getColumnValue(row, 1); } public int limit(Object row) { return (Integer) resultContext.getColumnValue(row, 2); } public int capacity(Object row) { return (Integer) resultContext.getColumnValue(row, 3); } } static class LeakReportData { IResult result; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; AnalysisContext that = (AnalysisContext) o; return Objects.equals(snapshot, that.snapshot); } @Override public int hashCode() { return Objects.hash(snapshot); } }
2,957
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda/impl/ExoticTreeFinder.java
/******************************************************************************** * Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.hda.impl; import org.eclipse.mat.query.IResultTree; import java.util.List; import java.util.function.Function; import static org.eclipse.jifa.common.util.Assertion.ASSERT; // find elements in this exotic tree // MAT's APIs really astonished me, I'm climbing the s*** mountains of unbelievable awful smell; // 2020-12-11 public class ExoticTreeFinder { private final IResultTree tree; private BinFunction<IResultTree, Object, Integer> predicate; private Function<Object, List<?>> getChildrenCallback; public ExoticTreeFinder(IResultTree tree) { ASSERT.notNull(tree); this.tree = tree; } public ExoticTreeFinder setGetChildrenCallback(Function<Object, List<?>> getChildrenCallback) { this.getChildrenCallback = getChildrenCallback; return this; } public ExoticTreeFinder setPredicate(BinFunction<IResultTree, Object, Integer> predicate) { this.predicate = predicate; return this; } public List<?> findChildrenOf(int parentNodeId) { Object targetParentNode = null; try { targetParentNode = findTargetParentNodeImpl(tree.getElements(), parentNodeId); } catch (Exception e) { e.printStackTrace(); } if (targetParentNode != null) { return getChildrenCallback.apply(targetParentNode); } return null; } public Object findTargetParentNode(int parentNodeId) { try { return findTargetParentNodeImpl(tree.getElements(), parentNodeId); } catch (Exception e) { e.printStackTrace(); } return null; } private Object findTargetParentNodeImpl(List<?> nodes, int parentNodeId) throws Exception { if (nodes == null) { return null; } for (Object node : nodes) { Integer nodeId = predicate.apply(tree, node); if (nodeId != null && nodeId == parentNodeId) { return node; } } for (Object node : nodes) { List<?> children = getChildrenCallback.apply(node); if (children != null) { Object targetParentNode = findTargetParentNodeImpl(children, parentNodeId); if (targetParentNode != null) { return targetParentNode; } } } return null; } public interface BinFunction<A, B, R> { R apply(A a, B b) throws Exception; } }
2,958
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda/impl/Helper.java
/******************************************************************************** * Copyright (c) 2020, 2021 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.hda.impl; import org.eclipse.mat.SnapshotException; import org.eclipse.mat.query.IContextObject; import org.eclipse.mat.query.IResultTree; import org.eclipse.mat.snapshot.ISnapshot; import org.eclipse.mat.snapshot.model.GCRootInfo; import org.eclipse.mat.snapshot.model.IObject; import org.eclipse.mat.snapshot.model.NamedReference; import org.eclipse.mat.snapshot.query.IHeapObjectArgument; import org.eclipse.mat.util.IProgressListener; import org.eclipse.mat.util.VoidProgressListener; import java.util.Iterator; import java.util.List; import static org.eclipse.jifa.common.Constant.EMPTY_STRING; import static org.eclipse.jifa.common.util.Assertion.ASSERT; public class Helper { public static final int ILLEGAL_OBJECT_ID = -1; public static IProgressListener VOID_LISTENER = new VoidProgressListener(); public static int fetchObjectId(IContextObject context) { return context == null ? ILLEGAL_OBJECT_ID : context.getObjectId(); } public static String suffix(ISnapshot snapshot, int objectId) throws SnapshotException { GCRootInfo[] gc = snapshot.getGCRootInfo(objectId); return gc != null ? GCRootInfo.getTypeSetAsString(gc) : EMPTY_STRING; } public static String suffix(GCRootInfo[] gcRootInfo) { return gcRootInfo != null ? GCRootInfo.getTypeSetAsString(gcRootInfo) : EMPTY_STRING; } public static String prefix(ISnapshot snapshot, int objectId, int outbound) throws SnapshotException { IObject object = snapshot.getObject(objectId); long address = snapshot.mapIdToAddress(outbound); StringBuilder s = new StringBuilder(64); List<NamedReference> refs = object.getOutboundReferences(); for (NamedReference reference : refs) { if (reference.getObjectAddress() == address) { if (s.length() > 0) { s.append(", "); } s.append(reference.getName()); } } return s.toString(); } public static IHeapObjectArgument buildHeapObjectArgument(int[] ids) { return new IHeapObjectArgument() { @Override public int[] getIds(IProgressListener iProgressListener) { return ids; } @Override public String getLabel() { return ""; } @Override public Iterator<int[]> iterator() { return new Iterator<int[]>() { boolean hasNext = true; @Override public boolean hasNext() { return hasNext; } @Override public int[] next() { ASSERT.isTrue(hasNext); hasNext = false; return ids; } }; } }; } private static Object findObjectInTree(IResultTree tree, List<?> levelElements, int targetId) { if (levelElements != null) { for (Object o : levelElements) { if (tree.getContext(o).getObjectId() == targetId) { return o; } } } return null; } public static Object fetchObjectInResultTree(IResultTree tree, int[] idPathInResultTree) { if (idPathInResultTree == null || idPathInResultTree.length == 0) { return null; } // find the object in root tree Object objectInTree = findObjectInTree(tree, tree.getElements(), idPathInResultTree[0]); // find the object in children tree for (int i = 1; i < idPathInResultTree.length; i++) { if (objectInTree == null) { return null; } objectInTree = findObjectInTree(tree, tree.getChildren(objectInTree), idPathInResultTree[i]); } return objectInTree; } }
2,959
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda/impl/VirtualThreadItem.java
/******************************************************************************** * Copyright (c) 2021 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.hda.impl; import org.eclipse.mat.SnapshotException; import org.eclipse.mat.query.Bytes; import org.eclipse.mat.query.IResultTree; import org.eclipse.mat.snapshot.ISnapshot; import org.eclipse.mat.snapshot.model.IObject; import org.eclipse.jifa.common.util.UseAccessor; import org.eclipse.jifa.hda.api.AnalysisException; import static org.eclipse.jifa.hda.api.Model.Thread; @UseAccessor public class VirtualThreadItem extends Thread.Item { static final int COLUMN_OBJECT = 0; static final int COLUMN_NAME = 1; static final int COLUMN_SHALLOW = 2; static final int COLUMN_RETAINED = 3; static final int COLUMN_CONTEXT_CLASS_LOADER = 4; // changes depending on MAT report results final int COLUMN_DAEMON; transient final IResultTree result; transient final Object row; public VirtualThreadItem(final IResultTree result, final Object row) { this.row = row; this.result = result; this.objectId = result.getContext(row).getObjectId(); // the report changed a little in MAT: // Bug 572596 Add maximum retained heap size to thread overview stack // a row was injected at column position 5, so the daemon column may have been // pushed out to column 6 boolean includesMaxLocalRetained = (result.getColumns().length == 10); this.COLUMN_DAEMON = includesMaxLocalRetained ? 6 : 5; } @Override public int getObjectId() { return objectId; } @Override public String getObject() { return (String) result.getColumnValue(row, COLUMN_OBJECT); } @Override public String getName() { return (String) result.getColumnValue(row, COLUMN_NAME); } @Override public long getShallowSize() { return ((Bytes) result.getColumnValue(row, COLUMN_SHALLOW)).getValue(); } @Override public long getRetainedSize() { return ((Bytes) result.getColumnValue(row, COLUMN_RETAINED)).getValue(); } @Override public String getContextClassLoader() { return (String) result.getColumnValue(row, COLUMN_CONTEXT_CLASS_LOADER); } @Override public boolean isHasStack() { return (Boolean) result.hasChildren(row); } @Override public boolean isDaemon() { return (Boolean) result.getColumnValue(row, COLUMN_DAEMON); } }
2,960
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda/impl/Activator.java
/******************************************************************************** * Copyright (c) 2021 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.hda.impl; import org.eclipse.jifa.hda.api.HeapDumpAnalyzer; import org.osgi.framework.BundleActivator; import org.osgi.framework.BundleContext; import java.util.Hashtable; public class Activator implements BundleActivator { @Override public void start(BundleContext bundleContext) { bundleContext .registerService(HeapDumpAnalyzer.Provider.class, HeapDumpAnalyzerImpl.PROVIDER, new Hashtable<>()); } @Override public void stop(BundleContext bundleContext) { } }
2,961
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda/impl/HeapDumpAnalyzerImpl.java
/******************************************************************************** * Copyright (c) 2021, 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.hda.impl; import org.eclipse.jifa.common.Constant; import org.eclipse.jifa.common.JifaException; import org.eclipse.jifa.common.cache.Cacheable; import org.eclipse.jifa.common.listener.ProgressListener; import org.eclipse.jifa.common.request.PagingRequest; import org.eclipse.jifa.common.util.EscapeUtil; import org.eclipse.jifa.common.util.PageViewBuilder; import org.eclipse.jifa.common.util.ReflectionUtil; import org.eclipse.jifa.common.vo.PageView; import org.eclipse.jifa.common.vo.support.SearchPredicate; import org.eclipse.jifa.common.vo.support.SearchType; import org.eclipse.jifa.common.vo.support.SortTableGenerator; import org.eclipse.jifa.hda.api.AnalysisException; import org.eclipse.jifa.hda.api.HeapDumpAnalyzer; import org.eclipse.jifa.hda.api.Model; import org.eclipse.mat.SnapshotException; import org.eclipse.mat.hprof.extension.HprofPreferencesAccess; import org.eclipse.mat.hprof.ui.HprofPreferences; import org.eclipse.mat.internal.snapshot.SnapshotQueryContext; import org.eclipse.mat.parser.model.ClassImpl; import org.eclipse.mat.parser.model.XClassHistogramRecord; import org.eclipse.mat.parser.model.XClassLoaderHistogramRecord; import org.eclipse.mat.query.Bytes; import org.eclipse.mat.query.Column; import org.eclipse.mat.query.IContextObject; import org.eclipse.mat.query.IContextObjectSet; import org.eclipse.mat.query.IDecorator; import org.eclipse.mat.query.IIconProvider; import org.eclipse.mat.query.IResult; import org.eclipse.mat.query.IResultPie; import org.eclipse.mat.query.IResultTable; import org.eclipse.mat.query.IResultTree; import org.eclipse.mat.query.refined.RefinedResultBuilder; import org.eclipse.mat.query.refined.RefinedTable; import org.eclipse.mat.query.refined.RefinedTree; import org.eclipse.mat.query.results.CompositeResult; import org.eclipse.mat.query.results.TextResult; import org.eclipse.mat.report.QuerySpec; import org.eclipse.mat.report.SectionSpec; import org.eclipse.mat.report.Spec; import org.eclipse.mat.snapshot.ClassHistogramRecord; import org.eclipse.mat.snapshot.Histogram; import org.eclipse.mat.snapshot.HistogramRecord; import org.eclipse.mat.snapshot.IPathsFromGCRootsComputer; import org.eclipse.mat.snapshot.ISnapshot; import org.eclipse.mat.snapshot.SnapshotFactory; import org.eclipse.mat.snapshot.SnapshotInfo; import org.eclipse.mat.snapshot.UnreachableObjectsHistogram; import org.eclipse.mat.snapshot.model.Field; import org.eclipse.mat.snapshot.model.GCRootInfo; import org.eclipse.mat.snapshot.model.IClass; import org.eclipse.mat.snapshot.model.IClassLoader; import org.eclipse.mat.snapshot.model.IInstance; import org.eclipse.mat.snapshot.model.IObject; import org.eclipse.mat.snapshot.model.IObjectArray; import org.eclipse.mat.snapshot.model.IPrimitiveArray; import org.eclipse.mat.snapshot.model.ObjectReference; import org.eclipse.mat.snapshot.query.Icons; import org.eclipse.mat.snapshot.query.SnapshotQuery; import java.lang.ref.SoftReference; import java.net.URL; import java.nio.file.Path; import java.util.*; import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Pattern; import java.util.stream.Collectors; import static org.eclipse.jifa.common.listener.ProgressListener.NoOpProgressListener; import static org.eclipse.jifa.common.vo.support.SearchPredicate.createPredicate; import static org.eclipse.jifa.hda.api.Model.*; import static org.eclipse.jifa.hda.impl.AnalysisContext.ClassLoaderExplorerData; import static org.eclipse.jifa.hda.impl.AnalysisContext.DirectByteBufferData; public class HeapDumpAnalyzerImpl implements HeapDumpAnalyzer { static final Provider PROVIDER = new ProviderImpl(); private final AnalysisContext context; public HeapDumpAnalyzerImpl(AnalysisContext context) { this.context = context; } public static int typeOf(IObject object) { if (object instanceof IClass) { return JavaObject.CLASS_TYPE; } if (object instanceof IClassLoader) { return JavaObject.CLASS_LOADER_TYPE; } if (object.getClazz().isArrayType()) { return JavaObject.ARRAY_TYPE; } return JavaObject.NORMAL_TYPE; } private static int getClassReferrerType(URL icon) { if (icon == Icons.CLASS_IN || icon == Icons.CLASS_OUT) { return ClassReferrer.Type.NEW; } else if (icon == Icons.CLASS_IN_MIXED || icon == Icons.CLASS_OUT_MIXED) { return ClassReferrer.Type.MIXED; } else if (icon == Icons.CLASS_IN_OLD || icon == Icons.CLASS_OUT_OLD) { return ClassReferrer.Type.OLD_FAD; } throw new AnalysisException("Should not reach here"); } private static Map<IClass, Set<String>> convert(AnalysisContext context, List<String> excludes) throws SnapshotException { Map<IClass, Set<String>> excludeMap = null; if (excludes != null && !excludes.isEmpty()) { excludeMap = new HashMap<>(); for (String entry : excludes) { String pattern = entry; Set<String> fields = null; int colon = entry.indexOf(':'); if (colon >= 0) { fields = new HashSet<>(); StringTokenizer tokens = new StringTokenizer(entry.substring(colon + 1), ","); while (tokens.hasMoreTokens()) fields.add(tokens.nextToken()); pattern = pattern.substring(0, colon); } for (IClass clazz : context.snapshot.getClassesByName(Pattern.compile(pattern), true)) excludeMap.put(clazz, fields); } } return excludeMap; } private static <V> V $(RV<V> rv) { try { return rv.run(); } catch (Throwable t) { throw new AnalysisException(t); } } private void $(R e) { $(() -> { e.run(); return null; }); } @Override public void dispose() { $(() -> SnapshotFactory.dispose(context.snapshot)); } @Override public Overview.Details getDetails() { return $(() -> { SnapshotInfo snapshotInfo = context.snapshot.getSnapshotInfo(); return new Overview.Details(snapshotInfo.getJvmInfo(), snapshotInfo.getIdentifierSize(), snapshotInfo.getCreationDate().getTime(), snapshotInfo.getNumberOfObjects(), snapshotInfo.getNumberOfGCRoots(), snapshotInfo.getNumberOfClasses(), snapshotInfo.getNumberOfClassLoaders(), snapshotInfo.getUsedHeapSize(), false); } ); } private <Res extends IResult> Res queryByCommand(AnalysisContext context, String command) throws SnapshotException { return queryByCommand(context, command, null, NoOpProgressListener); } @Cacheable protected <Res extends IResult> Res queryByCommand(AnalysisContext context, String command, Map<String, Object> args) throws SnapshotException { return queryByCommand(context, command, args, NoOpProgressListener); } private <Res extends IResult> Res queryByCommand(AnalysisContext context, String command, ProgressListener listener) throws SnapshotException { return queryByCommand(context, command, null, listener); } @SuppressWarnings("unchecked") private <Res extends IResult> Res queryByCommand(AnalysisContext context, String command, Map<String, Object> args, ProgressListener listener) throws SnapshotException { SnapshotQuery query = SnapshotQuery.parse(command, context.snapshot); if (args != null) { args.forEach((k, v) -> $(() -> query.setArgument(k, v))); } return (Res) query.execute(new ProgressListenerImpl(listener)); } @Override public Map<String, String> getSystemProperties() { return $(() -> { IResultTable result = queryByCommand(context, "system_properties"); Map<String, String> map = new HashMap<>(); int count = result.getRowCount(); for (int i = 0; i < count; i++) { Object row = result.getRow(i); map.put((String) result.getColumnValue(row, 1), (String) result.getColumnValue(row, 2)); } return map; }); } @Override public JavaObject getObjectInfo(int objectId) { return $(() -> { JavaObject ho = new JavaObject(); IObject object = context.snapshot.getObject(objectId); ho.setObjectId(objectId); ho.setLabel(EscapeUtil.unescapeLabel(object.getDisplayName())); ho.setShallowSize(object.getUsedHeapSize()); ho.setRetainedSize(object.getRetainedHeapSize()); ho.setObjectType(typeOf(object)); ho.setGCRoot(context.snapshot.isGCRoot(objectId)); ho.setHasOutbound(true); ho.setSuffix(Helper.suffix(context.snapshot, objectId)); return ho; }); } @Override public InspectorView getInspectorView(int objectId) { return $(() -> { InspectorView view = new InspectorView(); ISnapshot snapshot = context.snapshot; IObject object = snapshot.getObject(objectId); view.setObjectAddress(object.getObjectAddress()); IClass iClass = object instanceof IClass ? (IClass) object : object.getClazz(); view.setName(iClass.getName()); view.setObjectType(typeOf(object)); view.setGCRoot(snapshot.isGCRoot(objectId)); // class name and address of the object IClass clazz = object.getClazz(); view.setClassLabel(clazz.getTechnicalName()); view.setClassGCRoot(clazz.getGCRootInfo() != null); // super class name if (iClass.getSuperClass() != null) { view.setSuperClassName(iClass.getSuperClass().getName()); } // class loader name and address IObject classLoader = snapshot.getObject(iClass.getClassLoaderId()); view.setClassLoaderLabel(classLoader.getTechnicalName()); view.setClassLoaderGCRoot(classLoader.getGCRootInfo() != null); view.setShallowSize(object.getUsedHeapSize()); view.setRetainedSize(object.getRetainedHeapSize()); // gc root GCRootInfo[] gcRootInfo = object.getGCRootInfo(); view.setGcRootInfo( gcRootInfo != null ? "GC root: " + GCRootInfo.getTypeSetAsString(object.getGCRootInfo()) : "no GC root"); return view; }); } private String getObjectValue(IObject o) { String text = o.getClassSpecificName(); return text != null ? EscapeUtil.unescapeJava(text) : o.getTechnicalName(); } private PageView<Model.FieldView> buildPageViewOfFields(List<Field> fields, int page, int pageSize) { return PageViewBuilder.build(fields, new PagingRequest(page, pageSize), field -> { Model.FieldView fv = new Model.FieldView(); fv.fieldType = field.getType(); fv.name = field.getName(); Object value = field.getValue(); if (value instanceof ObjectReference) { try { fv.objectId = ((ObjectReference) value).getObjectId(); fv.value = getObjectValue(((ObjectReference) value).getObject()); } catch (SnapshotException e) { throw new AnalysisException(e); } } else if (value != null) { fv.value = value.toString(); } return fv; }); } @Override public PageView<Model.FieldView> getFields(int objectId, int page, int pageSize) { return $(() -> { ISnapshot snapshot = context.snapshot; IObject object = snapshot.getObject(objectId); PagingRequest pagingRequest = new PagingRequest(page, pageSize); if (object instanceof IPrimitiveArray) { List<Model.FieldView> fvs = new ArrayList<>(); IPrimitiveArray pa = (IPrimitiveArray) object; int firstIndex = (pagingRequest.getPage() - 1) * pagingRequest.getPageSize(); int lastIndex = Math.min(firstIndex + pagingRequest.getPageSize(), pa.getLength()); for (int i = firstIndex; i < lastIndex; i++) { fvs.add(new Model.FieldView(pa.getType(), "[" + i + "]", pa.getValueAt(i).toString())); } return new PageView<>(pagingRequest, pa.getLength(), fvs); } else if (object instanceof IObjectArray) { List<Model.FieldView> fvs = new ArrayList<>(); IObjectArray oa = (IObjectArray) object; int firstIndex = (pagingRequest.getPage() - 1) * pagingRequest.getPageSize(); int lastIndex = Math.min(firstIndex + pagingRequest.getPageSize(), oa.getLength()); for (int i = firstIndex; i < lastIndex; i++) { long[] refs = oa.getReferenceArray(i, 1); int refObjectId = 0; if (refs[0] != 0) { refObjectId = snapshot.mapAddressToId(refs[0]); } String value = null; if (refObjectId != 0) { value = getObjectValue(snapshot.getObject(refObjectId)); } fvs.add(new Model.FieldView(IObject.Type.OBJECT, "[" + i + "]", value, refObjectId)); } return new PageView<>(pagingRequest, oa.getLength(), fvs); } List<Field> fields = new ArrayList<>(); boolean isClass = object instanceof IClass; IClass clazz = isClass ? (IClass) object : object.getClazz(); if (object instanceof IInstance) { fields.addAll(((IInstance) object).getFields()); } else if (object instanceof IClass) { do { List<Field> staticFields = clazz.getStaticFields(); for (Field staticField : staticFields) { if (staticField.getName().startsWith("<")) { fields.add(staticField); } } } while ((clazz = clazz.getSuperClass()) != null); } return buildPageViewOfFields(fields, page, pageSize); }); } @Override public PageView<Model.FieldView> getStaticFields(int objectId, int page, int pageSize) { return $(() -> { ISnapshot snapshot = context.snapshot; IObject object = snapshot.getObject(objectId); boolean isClass = object instanceof IClass; IClass clazz = isClass ? (IClass) object : object.getClazz(); List<Field> fields = new ArrayList<>(); do { List<Field> staticFields = clazz.getStaticFields(); for (Field staticField : staticFields) { if (!staticField.getName().startsWith("<")) { fields.add(staticField); } } } while (!isClass && (clazz = clazz.getSuperClass()) != null); return buildPageViewOfFields(fields, page, pageSize); }); } @Override public int mapAddressToId(long address) { return $(() -> context.snapshot.mapAddressToId(address)); } @Override public String getObjectValue(int objectId) { return $(() -> { IObject object = context.snapshot.getObject(objectId); String text = object.getClassSpecificName(); return text != null ? EscapeUtil.unescapeJava(text) : Constant.EMPTY_STRING; }); } @Override public List<Overview.BigObject> getBigObjects() { return $(() -> { IResultPie result = queryByCommand(context, "pie_biggest_objects"); List<? extends IResultPie.Slice> slices = result.getSlices(); return slices .stream() .map(slice -> new Overview.BigObject(slice.getLabel(), slice.getContext() != null ? slice.getContext().getObjectId() : Helper.ILLEGAL_OBJECT_ID, slice.getValue(), slice.getDescription())) .collect(Collectors.toList()); }); } private ClassLoaderExplorerData queryClassLoader(AnalysisContext context) throws Exception { ClassLoaderExplorerData classLoaderExplorerData = context.classLoaderExplorerData.get(); if (classLoaderExplorerData != null) { return classLoaderExplorerData; } //noinspection SynchronizationOnLocalVariableOrMethodParameter synchronized (context) { classLoaderExplorerData = context.classLoaderExplorerData.get(); if (classLoaderExplorerData != null) { return classLoaderExplorerData; } IResultTree result = queryByCommand(context, "ClassLoaderExplorerQuery"); classLoaderExplorerData = new ClassLoaderExplorerData(); classLoaderExplorerData.result = result; Map<Integer, Object> classLoaderIdMap = new HashMap<>(); for (Object r : result.getElements()) { classLoaderIdMap.put(result.getContext(r).getObjectId(), r); } classLoaderExplorerData.classLoaderIdMap = classLoaderIdMap; classLoaderExplorerData.items = result.getElements(); classLoaderExplorerData.items.sort((Comparator<Object>) (o1, o2) -> Integer .compare((int) result.getColumnValue(o2, 1), (int) result.getColumnValue(o1, 1))); for (Object item : classLoaderExplorerData.items) { classLoaderExplorerData.definedClasses += (int) result.getColumnValue(item, 1); classLoaderExplorerData.numberOfInstances += (int) result.getColumnValue(item, 2); } context.classLoaderExplorerData = new SoftReference<>(classLoaderExplorerData); return classLoaderExplorerData; } } @Override public Model.ClassLoader.Summary getSummaryOfClassLoaders() { return $(() -> { ClassLoaderExplorerData data = queryClassLoader(context); Model.ClassLoader.Summary summary = new Model.ClassLoader.Summary(); summary.setTotalSize(data.items.size()); summary.setDefinedClasses(data.definedClasses); summary.setNumberOfInstances(data.numberOfInstances); return summary; }); } @Override public PageView<Model.ClassLoader.Item> getClassLoaders(int page, int pageSize) { return $(() -> { ClassLoaderExplorerData data = queryClassLoader(context); IResultTree result = data.result; return PageViewBuilder.build(data.items, new PagingRequest(page, pageSize), e -> { Model.ClassLoader.Item r = new Model.ClassLoader.Item(); r.setObjectId(result.getContext(e).getObjectId()); r.setPrefix(((IDecorator) result).prefix(e)); r.setLabel((String) result.getColumnValue(e, 0)); r.setDefinedClasses((Integer) result.getColumnValue(e, 1)); r.setNumberOfInstances((Integer) result.getColumnValue(e, 2)); r.setClassLoader(true); // FIXME r.setHasParent(false); return r; }); }); } @Override public PageView<Model.ClassLoader.Item> getChildrenOfClassLoader(int classLoaderId, int page, int pageSize) { return $(() -> { ClassLoaderExplorerData data = queryClassLoader(context); IResultTree result = data.result; Object o = data.classLoaderIdMap.get(classLoaderId); List<?> children = result.getChildren(o); return PageViewBuilder.build(children, new PagingRequest(page, pageSize), e -> { Model.ClassLoader.Item r = new Model.ClassLoader.Item(); r.setObjectId(result.getContext(e).getObjectId()); r.setPrefix(((IDecorator) result).prefix(e)); r.setLabel((String) result.getColumnValue(e, 0)); r.setNumberOfInstances((Integer) result.getColumnValue(e, 2)); if (!(e instanceof IClass)) { r.setClassLoader(true); r.setDefinedClasses((Integer) result.getColumnValue(e, 1)); // FIXME r.setHasParent(false); } return r; }); }); } @Override public UnreachableObject.Summary getSummaryOfUnreachableObjects() { return $(() -> { UnreachableObjectsHistogram histogram = (UnreachableObjectsHistogram) context.snapshot.getSnapshotInfo().getProperty( UnreachableObjectsHistogram.class.getName()); UnreachableObject.Summary summary = new UnreachableObject.Summary(); if (histogram != null) { summary.setTotalSize(histogram.getRowCount()); int objects = 0; long shallowSize = 0; for (Object record : histogram.getRecords()) { objects += (Integer) histogram.getColumnValue(record, 1); shallowSize += ((Bytes) histogram.getColumnValue(record, 2)).getValue(); } summary.setObjects(objects); summary.setShallowSize(shallowSize); } return summary; }); } @Override public PageView<UnreachableObject.Item> getUnreachableObjects(int page, int pageSize) { return $(() -> { UnreachableObjectsHistogram histogram = (UnreachableObjectsHistogram) context.snapshot.getSnapshotInfo().getProperty( UnreachableObjectsHistogram.class.getName()); List<?> total = new ArrayList<>(histogram.getRecords()); total.sort((Comparator<Object>) (o1, o2) -> { long v2 = ((Bytes) histogram.getColumnValue(o2, 2)).getValue(); long v1 = ((Bytes) histogram.getColumnValue(o1, 2)).getValue(); return Long.compare(v2, v1); }); return PageViewBuilder.build(total, new PagingRequest(page, pageSize), record -> { UnreachableObject.Item r = new UnreachableObject.Item(); r.setClassName((String) histogram.getColumnValue(record, 0)); r.setObjectId(Helper.fetchObjectId(histogram.getContext(record))); r.setObjects((Integer) histogram.getColumnValue(record, 1)); r.setShallowSize(((Bytes) histogram.getColumnValue(record, 2)).getValue()); return r; }); }); } private DirectByteBufferData queryDirectByteBufferData( AnalysisContext context) throws SnapshotException { DirectByteBufferData data = context.directByteBufferData.get(); if (data != null) { return data; } //noinspection SynchronizationOnLocalVariableOrMethodParameter synchronized (context) { data = context.directByteBufferData.get(); if (data != null) { return data; } data = new DirectByteBufferData(); IResult result = queryByCommand(context, "oql", DirectByteBufferData.ARGS); IResultTable table; if (result instanceof IResultTable) { table = (IResultTable) result; RefinedResultBuilder builder = new RefinedResultBuilder(new SnapshotQueryContext(context.snapshot), table); builder.setSortOrder(3, Column.SortDirection.DESC); data.resultContext = (RefinedTable) builder.build(); DirectByteBuffer.Summary summary = new DirectByteBuffer.Summary(); summary.totalSize = data.resultContext.getRowCount(); for (int i = 0; i < summary.totalSize; i++) { Object row = data.resultContext.getRow(i); summary.position += data.position(row); summary.limit += data.limit(row); summary.capacity += data.capacity(row); } data.summary = summary; } else { data.summary = new DirectByteBuffer.Summary(); } context.directByteBufferData = new SoftReference<>(data); return data; } } @Override public DirectByteBuffer.Summary getSummaryOfDirectByteBuffers() { return $(() -> queryDirectByteBufferData(context).summary); } @Override public PageView<DirectByteBuffer.Item> getDirectByteBuffers(int page, int pageSize) { return $(() -> { DirectByteBufferData data = queryDirectByteBufferData(context); RefinedTable resultContext = data.resultContext; return PageViewBuilder.build(new PageViewBuilder.Callback<Object>() { @Override public int totalSize() { return data.summary.totalSize; } @Override public Object get(int index) { return resultContext.getRow(index); } }, new PagingRequest(page, pageSize), row -> { DirectByteBuffer.Item item = new DirectByteBuffer.Item(); item.objectId = resultContext.getContext(row).getObjectId(); item.label = data.label(row); item.position = data.position(row); item.limit = data.limit(row); item.capacity = data.capacity(row); return item; }); }); } private PageView<JavaObject> queryIOBoundsOfObject(AnalysisContext context, int objectId, int page, int pageSize, boolean outbound) throws SnapshotException { ISnapshot snapshot = context.snapshot; int[] ids = outbound ? snapshot.getOutboundReferentIds(objectId) : snapshot.getInboundRefererIds(objectId); return PageViewBuilder.build(ids, new PagingRequest(page, pageSize), id -> { try { JavaObject o = new JavaObject(); IObject object = context.snapshot.getObject(id); o.setObjectId(id); o.setLabel(object.getDisplayName()); o.setShallowSize(object.getUsedHeapSize()); o.setRetainedSize(object.getRetainedHeapSize()); o.setObjectType(typeOf(object)); o.setGCRoot(snapshot.isGCRoot(id)); o.setHasOutbound(true); o.setHasInbound(true); o.setPrefix(Helper.prefix(snapshot, outbound ? objectId : id, outbound ? id : objectId)); o.setSuffix(Helper.suffix(snapshot, id)); return o; } catch (Exception e) { throw new RuntimeException(e); } }); } @Override public PageView<JavaObject> getOutboundOfObject(int objectId, int page, int pageSize) { return $(() -> queryIOBoundsOfObject(context, objectId, page, pageSize, true)); } @Override public PageView<JavaObject> getInboundOfObject(int objectId, int page, int pageSize) { return $(() -> queryIOBoundsOfObject(context, objectId, page, pageSize, false)); } @Override public List<GCRoot.Item> getGCRoots() { return $(() -> { IResultTree tree = queryByCommand(context, "gc_roots"); return tree.getElements().stream().map(e -> { GCRoot.Item item = new GCRoot.Item(); item.setClassName((String) tree.getColumnValue(e, 0)); item.setObjects((Integer) tree.getColumnValue(e, 1)); return item; }).collect(Collectors.toList()); }); } @Override public PageView<TheString.Item> getStrings(String pattern, int page, int pageSize) { return $(() -> { IResultTree tree = queryByCommand(context, "find_strings java.lang.String -pattern " + (pattern == null || pattern.equals("") ? ".*" : ".*" + pattern + ".*")); List<?> strings = tree.getElements(); return PageViewBuilder.build(strings, new PagingRequest(page, pageSize), node -> { TheString.Item item = new TheString.Item(); int id = tree.getContext(node).getObjectId(); item.setObjectId(id); item.setLabel((String)tree.getColumnValue(node,0)); item.setShallowSize(((Bytes) tree.getColumnValue(node, 1)).getValue()); item.setRetainedSize(((Bytes) tree.getColumnValue(node, 2)).getValue()); return item; }); }); } @Override public PageView<GCRoot.Item> getClassesOfGCRoot(int rootTypeIndex, int page, int pageSize) { return $(() -> { IResultTree tree = queryByCommand(context, "gc_roots"); Object root = tree.getElements().get(rootTypeIndex); List<?> classes = tree.getChildren(root); return PageViewBuilder.build(classes, new PagingRequest(page, pageSize), clazz -> { GCRoot.Item item = new GCRoot.Item(); item.setClassName((String) tree.getColumnValue(clazz, 0)); item.setObjects((Integer) tree.getColumnValue(clazz, 1)); item.setObjectId(tree.getContext(clazz).getObjectId()); return item; }); }); } @Override public PageView<JavaObject> getObjectsOfGCRoot(int rootTypeIndex, int classIndex, int page, int pageSize) { return $(() -> { IResultTree tree = queryByCommand(context, "gc_roots"); Object root = tree.getElements().get(rootTypeIndex); List<?> classes = tree.getChildren(root); Object clazz = classes.get(classIndex); List<?> objects = tree.getChildren(clazz); return PageViewBuilder.build(objects, new PagingRequest(page, pageSize), o -> $(() -> { JavaObject ho = new JavaObject(); int objectId = tree.getContext(o).getObjectId(); IObject object = context.snapshot.getObject(objectId); ho.setLabel(object.getDisplayName()); ho.setObjectId(objectId); ho.setShallowSize(object.getUsedHeapSize()); ho.setRetainedSize(object.getRetainedHeapSize()); ho.setObjectType(typeOf(object)); ho.setGCRoot(context.snapshot.isGCRoot(objectId)); ho.setSuffix(Helper.suffix(object.getGCRootInfo())); ho.setHasOutbound(true); ho.setHasInbound(true); return ho; } )); }); } private IResultTree queryIOBoundClassOfClassReference(AnalysisContext context, Object idOrIds, boolean outbound) throws SnapshotException { Map<String, Object> args = new HashMap<>(); if (idOrIds instanceof int[]) { args.put("objects", idOrIds); } else { args.put("objects", new int[]{(Integer) idOrIds}); } args.put("inbound", !outbound); return queryByCommand(context, "class_references", args); } private ClassReferrer.Item buildClassReferenceItem(IResultTree result, Object row) { ClassReferrer.Item item = new ClassReferrer.Item(); item.label = (String) result.getColumnValue(row, 0); item.objects = (Integer) result.getColumnValue(row, 1); item.shallowSize = ((Bytes) result.getColumnValue(row, 2)).getValue(); IContextObjectSet context = (IContextObjectSet) result.getContext(row); item.objectId = context.getObjectId(); item.objectIds = context.getObjectIds(); item.setType(getClassReferrerType(((IIconProvider) result).getIcon(row))); return item; } @Override public ClassReferrer.Item getOutboundClassOfClassReference(int objectId) { return $(() -> { IResultTree result = queryIOBoundClassOfClassReference(context, objectId, true); return buildClassReferenceItem(result, result.getElements().get(0)); }); } @Override public ClassReferrer.Item getInboundClassOfClassReference(int objectId) { return $(() -> { IResultTree result = queryIOBoundClassOfClassReference(context, objectId, false); return buildClassReferenceItem(result, result.getElements().get(0)); }); } @Override public PageView<ClassReferrer.Item> getOutboundsOfClassReference(int[] objectId, int page, int pageSize) { return $(() -> { IResultTree result = queryIOBoundClassOfClassReference(context, objectId, true); return PageViewBuilder .build(result.getChildren(result.getElements().get(0)), new PagingRequest(page, pageSize), e -> buildClassReferenceItem(result, e)); }); } @Override public PageView<ClassReferrer.Item> getInboundsOfClassReference(int[] objectId, int page, int pageSize) { return $(() -> { IResultTree result = queryIOBoundClassOfClassReference(context, objectId, false); return PageViewBuilder .build(result.getChildren(result.getElements().get(0)), new PagingRequest(page, pageSize), e -> buildClassReferenceItem(result, e)); }); } @Override public Comparison.Summary getSummaryOfComparison(Path other) { return $(() -> { ISnapshot baselineSnapshot = ((HeapDumpAnalyzerImpl) PROVIDER.provide(other, Collections.emptyMap(), NoOpProgressListener)).context.snapshot; ISnapshot targetSnapshot = context.snapshot; Histogram targetHistogram = targetSnapshot.getHistogram(new ProgressListenerImpl(NoOpProgressListener)); Histogram baselineHistogram = baselineSnapshot.getHistogram(new ProgressListenerImpl(NoOpProgressListener)); final Histogram delta = targetHistogram.diffWithBaseline(baselineHistogram); long totalObjects = 0; long totalShallowHeap = 0; for (Object r : delta.getClassHistogramRecords()) { totalObjects += (long) delta.getColumnValue(r, 1); totalShallowHeap += ((Bytes) delta.getColumnValue(r, 2)).getValue(); } Comparison.Summary summary = new Comparison.Summary(); summary.setTotalSize(delta.getClassHistogramRecords().size()); summary.setObjects(totalObjects); summary.setShallowSize(totalShallowHeap); return summary; }); } @Override public PageView<Comparison.Item> getItemsOfComparison(Path other, int page, int pageSize) { return $(() -> { ISnapshot baselineSnapshot = ((HeapDumpAnalyzerImpl) PROVIDER.provide(other, Collections.emptyMap(), NoOpProgressListener)).context.snapshot; ISnapshot targetSnapshot = context.snapshot; Histogram targetHistogram = targetSnapshot.getHistogram(new ProgressListenerImpl(NoOpProgressListener)); Histogram baselineHistogram = baselineSnapshot.getHistogram(new ProgressListenerImpl(NoOpProgressListener)); final Histogram delta = targetHistogram.diffWithBaseline(baselineHistogram); //noinspection ((List<ClassHistogramRecord>) delta.getClassHistogramRecords()).sort((o1, o2) -> Long .compare(((Bytes) delta.getColumnValue(o2, 2)).getValue(), ((Bytes) delta.getColumnValue(o1, 2)).getValue())); return PageViewBuilder.build(delta.getClassHistogramRecords(), new PagingRequest(page, pageSize), r -> { Comparison.Item record = new Comparison.Item(); record.setClassName((String) delta.getColumnValue(r, 0)); record.setObjects((Long) delta.getColumnValue(r, 1)); record.setShallowSize(((Bytes) delta.getColumnValue(r, 2)).getValue()); return record; }); }); } private IResultTree queryMultiplePath2GCRootsTreeByClassId(AnalysisContext context, int classId, GCRootPath.Grouping grouping) throws Exception { ClassImpl clazz = (ClassImpl) context.snapshot.getObject(classId); return queryMultiplePath2GCRootsTreeByObjectIds(context, clazz.getObjectIds(), grouping); } private IResultTree queryMultiplePath2GCRootsTreeByObjectIds(AnalysisContext context, int[] objectIds, GCRootPath.Grouping grouping) throws Exception { if (grouping != GCRootPath.Grouping.FROM_GC_ROOTS) { throw new JifaException("Unsupported grouping now"); } Map<String, Object> args = new HashMap<>(); args.put("objects", Helper.buildHeapObjectArgument(objectIds)); return queryByCommand(context, "merge_shortest_paths", args); } private PageView<GCRootPath.MergePathToGCRootsTreeNode> buildMergePathRootsNode(AnalysisContext context, IResultTree tree, List<?> elements, int page, int pageSize) { return PageViewBuilder.build(elements, new PagingRequest(page, pageSize), element -> $(() -> { ISnapshot snapshot = context.snapshot; GCRootPath.MergePathToGCRootsTreeNode record = new GCRootPath.MergePathToGCRootsTreeNode(); int objectId = tree.getContext(element).getObjectId(); IObject object = snapshot.getObject(objectId); record.setObjectId(tree.getContext(element).getObjectId()); record.setObjectType(typeOf(object)); record.setGCRoot(snapshot.isGCRoot(objectId)); record.setClassName(tree.getColumnValue(element, 0).toString()); record.setSuffix(Helper.suffix(object.getGCRootInfo())); record.setRefObjects((int) tree.getColumnValue(element, 1)); record.setShallowHeap(((Bytes) tree.getColumnValue(element, 2)).getValue()); record.setRefShallowHeap(((Bytes) tree.getColumnValue(element, 3)).getValue()); record.setRetainedHeap(((Bytes) tree.getColumnValue(element, 4)).getValue()); return record; })); } @Override public PageView<GCRootPath.MergePathToGCRootsTreeNode> getRootsOfMergePathToGCRootsByClassId( int classId, GCRootPath.Grouping grouping, int page, int pageSize) { return $(() -> { IResultTree tree = queryMultiplePath2GCRootsTreeByClassId(context, classId, grouping); return buildMergePathRootsNode(context, tree, tree.getElements(), page, pageSize); }); } @Override public PageView<GCRootPath.MergePathToGCRootsTreeNode> getRootsOfMergePathToGCRootsByObjectIds( int[] objectIds, GCRootPath.Grouping grouping, int page, int pageSize) { return $(() -> { IResultTree tree = queryMultiplePath2GCRootsTreeByObjectIds(context, objectIds, grouping); return buildMergePathRootsNode(context, tree, tree.getElements(), page, pageSize); }); } @Override public PageView<GCRootPath.MergePathToGCRootsTreeNode> getChildrenOfMergePathToGCRootsByClassId( int classId, int[] objectIdPathInGCPathTree, GCRootPath.Grouping grouping, int page, int pageSize) { return $(() -> { IResultTree tree = queryMultiplePath2GCRootsTreeByClassId(context, classId, grouping); Object object = Helper.fetchObjectInResultTree(tree, objectIdPathInGCPathTree); List<?> elements = object == null ? Collections.emptyList() : tree.getChildren(object); return buildMergePathRootsNode(context, tree, elements, page, pageSize); }); } @Override public PageView<GCRootPath.MergePathToGCRootsTreeNode> getChildrenOfMergePathToGCRootsByObjectIds( int[] objectIds, int[] objectIdPathInGCPathTree, GCRootPath.Grouping grouping, int page, int pageSize) { return $(() -> { IResultTree tree = queryMultiplePath2GCRootsTreeByObjectIds(context, objectIds, grouping); Object object = Helper.fetchObjectInResultTree(tree, objectIdPathInGCPathTree); List<?> elements = object == null ? Collections.emptyList() : tree.getChildren(object); return buildMergePathRootsNode(context, tree, elements, page, pageSize); }); } @Override public GCRootPath.Item getPathToGCRoots(int originId, int skip, int count) { return $(() -> { ISnapshot snapshot = context.snapshot; Map<IClass, Set<String>> excludeMap = convert(context, GCRootPath.EXCLUDES); IPathsFromGCRootsComputer computer = snapshot.getPathsFromGCRoots(originId, excludeMap); List<int[]> paths = new ArrayList<>(); int index = 0; int[] current; int get = 0; while (get < count && (current = computer.getNextShortestPath()) != null) { if (index < skip) { index++; continue; } paths.add(current); get++; } boolean hasMore = computer.getNextShortestPath() != null; GCRootPath.Item item = new GCRootPath.Item(); item.setCount(paths.size()); item.setHasMore(hasMore); GCRootPath.Node origin = new GCRootPath.Node(); IObject object = snapshot.getObject(originId); origin.setOrigin(true); origin.setObjectId(originId); origin.setLabel(object.getDisplayName()); origin.setSuffix(Helper.suffix(snapshot, originId)); origin.setGCRoot(snapshot.isGCRoot(originId)); origin.setObjectType(typeOf(object)); origin.setShallowSize(object.getUsedHeapSize()); origin.setRetainedSize(object.getRetainedHeapSize()); item.setTree(origin); if (paths.size() == 0) { return item; } for (int[] path : paths) { GCRootPath.Node parentNode = origin; for (index = 1; index < path.length; index++) { int childId = path[index]; GCRootPath.Node childNode = parentNode.getChild(childId); if (childNode == null) { IObject childObj = snapshot.getObject(childId); childNode = new GCRootPath.Node(); childNode.setObjectId(childId); childNode.setPrefix(Helper.prefix(snapshot, childId, parentNode.getObjectId())); childNode.setLabel(childObj.getDisplayName()); childNode.setSuffix(Helper.suffix(snapshot, childId)); childNode.setGCRoot(snapshot.isGCRoot(childId)); childNode.setObjectType(typeOf(childObj)); childNode.setShallowSize(childObj.getUsedHeapSize()); childNode.setRetainedSize(childObj.getRetainedHeapSize()); parentNode.addChild(childNode); } parentNode = childNode; } } return item; }); } private LeakReport.ShortestPath buildPath(ISnapshot snapshot, RefinedTree rst, Object row) throws SnapshotException { LeakReport.ShortestPath shortestPath = new LeakReport.ShortestPath(); shortestPath.setLabel((String) rst.getColumnValue(row, 0)); shortestPath.setShallowSize(((Bytes) rst.getColumnValue(row, 1)).getValue()); shortestPath.setRetainedSize(((Bytes) rst.getColumnValue(row, 2)).getValue()); int objectId = rst.getContext(row).getObjectId(); shortestPath.setObjectId(objectId); IObject object = snapshot.getObject(objectId); shortestPath.setGCRoot(snapshot.isGCRoot(objectId)); shortestPath.setObjectType(typeOf(object)); if (rst.hasChildren(row)) { List<LeakReport.ShortestPath> children = new ArrayList<>(); shortestPath.setChildren(children); for (Object c : rst.getChildren(row)) { children.add(buildPath(snapshot, rst, c)); } } return shortestPath; } @Override public LeakReport getLeakReport() { return $(() -> { AnalysisContext.LeakReportData data = context.leakReportData.get(); if (data == null) { synchronized (context) { data = context.leakReportData.get(); if (data == null) { IResult result = queryByCommand(context, "leakhunter"); data = new AnalysisContext.LeakReportData(); data.result = result; context.leakReportData = new SoftReference<>(data); } } } IResult result = data.result; LeakReport report = new LeakReport(); if (result instanceof TextResult) { report.setInfo(((TextResult) result).getText()); } else if (result instanceof SectionSpec) { report.setUseful(true); SectionSpec sectionSpec = (SectionSpec) result; report.setName(sectionSpec.getName()); List<Spec> specs = sectionSpec.getChildren(); for (int i = 0; i < specs.size(); i++) { QuerySpec spec = (QuerySpec) specs.get(i); String name = spec.getName(); if (name == null || name.isEmpty()) { continue; } // LeakHunterQuery_Overview if (name.startsWith("Overview")) { IResultPie irtPie = (IResultPie) spec.getResult(); List<? extends IResultPie.Slice> pieSlices = irtPie.getSlices(); List<LeakReport.Slice> slices = new ArrayList<>(); for (IResultPie.Slice slice : pieSlices) { slices.add( new LeakReport.Slice(slice.getLabel(), Helper.fetchObjectId(slice.getContext()), slice.getValue(), slice.getDescription())); } report.setSlices(slices); } // LeakHunterQuery_ProblemSuspect // LeakHunterQuery_Hint else if (name.startsWith("Problem Suspect") || name.startsWith("Hint")) { LeakReport.Record suspect = new LeakReport.Record(); suspect.setIndex(i); suspect.setName(name); CompositeResult cr = (CompositeResult) spec.getResult(); List<CompositeResult.Entry> entries = cr.getResultEntries(); for (CompositeResult.Entry entry : entries) { String entryName = entry.getName(); if (entryName == null || entryName.isEmpty()) { IResult r = entry.getResult(); if (r instanceof QuerySpec && // LeakHunterQuery_ShortestPaths ((QuerySpec) r).getName().equals("Shortest Paths To the Accumulation Point")) { IResultTree tree = (IResultTree) ((QuerySpec) r).getResult(); RefinedResultBuilder builder = new RefinedResultBuilder( new SnapshotQueryContext(context.snapshot), tree); RefinedTree rst = (RefinedTree) builder.build(); List<?> elements = rst.getElements(); List<LeakReport.ShortestPath> paths = new ArrayList<>(); suspect.setPaths(paths); for (Object row : elements) { paths.add(buildPath(context.snapshot, rst, row)); } } } // LeakHunterQuery_Description // LeakHunterQuery_Overview else if ((entryName.startsWith("Description") || entryName.startsWith("Overview"))) { TextResult desText = (TextResult) entry.getResult(); suspect.setDesc(desText.getText()); } } List<LeakReport.Record> records = report.getRecords(); if (records == null) { report.setRecords(records = new ArrayList<>()); } records.add(suspect); } } } return report; }); } @Cacheable protected IResult getOQLResult(AnalysisContext context, String oql) { return $(() -> { Map<String, Object> args = new HashMap<>(); args.put("queryString", oql); return queryByCommand(context, "oql", args); }); } @Override public CalciteSQLResult getCalciteSQLResult(String sql, String sortBy, boolean ascendingOrder, int page, int pageSize) { return $(() -> { Map<String, Object> args = new HashMap<>(); args.put("sql", sql); IResult result; try { result = queryByCommand(context, "calcite", args); } catch (Throwable t) { return new CalciteSQLResult.TextResult(t.getMessage()); } if (result instanceof IResultTree) { return new CalciteSQLResult.TreeResult( PageViewBuilder.build( ((IResultTree) result).getElements(), new PagingRequest(page, pageSize), e -> $(()-> context.snapshot.getObject(((IResultTree) result).getContext(e).getObjectId())), o -> $(() -> { JavaObject jo = new JavaObject(); jo.setObjectId(o.getObjectId()); jo.setLabel(o.getDisplayName()); jo.setSuffix(Helper.suffix(o.getGCRootInfo())); jo.setShallowSize(o.getUsedHeapSize()); jo.setRetainedSize(o.getRetainedHeapSize()); jo.setGCRoot(context.snapshot.isGCRoot(o.getObjectId())); jo.setObjectType(typeOf(o)); jo.setHasOutbound(true); return jo; }), IObjectSortHelper.sortBy(sortBy, ascendingOrder))); } else if (result instanceof IResultTable) { IResultTable table = (IResultTable) result; Column[] columns = table.getColumns(); List<String> cs = Arrays.stream(columns).map(Column::getLabel).collect(Collectors.toList()); PageView<CalciteSQLResult.TableResult.Entry> pv = PageViewBuilder.build(new PageViewBuilder.Callback<Object>() { @Override public int totalSize() { return table.getRowCount(); } @Override public Object get(int index) { return table.getRow(index); } }, new PagingRequest(page, pageSize), o -> { List<Object> l = new ArrayList<>(); for (int i = 0; i < columns.length; i++) { Object columnValue = table.getColumnValue(o, i); l.add(columnValue != null ? EscapeUtil.unescapeJava(columnValue.toString()) : null); } IContextObject co = table.getContext(o); return new CalciteSQLResult.TableResult.Entry(co != null ? co.getObjectId() : Helper.ILLEGAL_OBJECT_ID, l); }); return new CalciteSQLResult.TableResult(cs, pv); } else if (result instanceof TextResult) { return new CalciteSQLResult.TextResult(((TextResult) result).getText()); } return new CalciteSQLResult.TextResult("Unsupported Calcite SQL result type"); }); } static class IObjectSortHelper { static Map<String, Comparator<IObject>> sortTable = new SortTableGenerator<IObject>() .add("id", IObject::getObjectId) .add("shallowHeap", IObject::getUsedHeapSize) .add("retainedHeap", IObject::getRetainedHeapSize) .add("label", IObject::getDisplayName) .build(); public static Comparator<IObject> sortBy(String field, boolean ascendingOrder) { return ascendingOrder ? sortTable.get(field) : sortTable.get(field).reversed(); } } public OQLResult getOQLResult(String oql, String sortBy, boolean ascendingOrder, int page, int pageSize) { IResult result = getOQLResult(context, oql); return $(() -> { if (result instanceof IResultTree) { return new OQLResult.TreeResult( PageViewBuilder.build( ((IResultTree) result).getElements(), new PagingRequest(page, pageSize), e -> $(() -> context.snapshot.getObject(((IResultTree) result).getContext(e).getObjectId())), o -> $(() -> { JavaObject jo = new JavaObject(); jo.setObjectId(o.getObjectId()); jo.setLabel(o.getDisplayName()); jo.setSuffix(Helper.suffix(o.getGCRootInfo())); jo.setShallowSize(o.getUsedHeapSize()); jo.setRetainedSize(o.getRetainedHeapSize()); jo.setGCRoot(context.snapshot.isGCRoot(o.getObjectId())); jo.setObjectType(typeOf(o)); jo.setHasOutbound(true); return jo; }), IObjectSortHelper.sortBy(sortBy, ascendingOrder))); } else if (result instanceof IResultTable) { IResultTable table = (IResultTable) result; Column[] columns = table.getColumns(); List<String> cs = Arrays.stream(columns).map(Column::getLabel).collect(Collectors.toList()); PageView<OQLResult.TableResult.Entry> pv = PageViewBuilder.build(new PageViewBuilder.Callback<Object>() { @Override public int totalSize() { return table.getRowCount(); } @Override public Object get(int index) { return table.getRow(index); } }, new PagingRequest(page, pageSize), o -> { List<Object> l = new ArrayList<>(); for (int i = 0; i < columns.length; i++) { Object columnValue = table.getColumnValue(o, i); l.add(columnValue != null ? columnValue.toString() : null); } IContextObject co = table.getContext(o); return new OQLResult.TableResult.Entry(co != null ? co.getObjectId() : Helper.ILLEGAL_OBJECT_ID, l); }); return new OQLResult.TableResult(cs, pv); } else if (result instanceof TextResult) { return new OQLResult.TextResult(((TextResult) result).getText()); } else { throw new AnalysisException("Unsupported OQL result type"); } }); } @Override public Model.Thread.Summary getSummaryOfThreads(String searchText, SearchType searchType) { return $(() -> { IResultTree result = queryByCommand(context, "thread_overview"); List<Model.Thread.Item> items = result.getElements().stream() .map(row -> new VirtualThreadItem(result, row)) .filter(SearchPredicate.createPredicate(searchText, searchType)) .collect(Collectors.toList()); Model.Thread.Summary summary = new Model.Thread.Summary(); summary.totalSize = items.size(); summary.shallowHeap = items.stream().mapToLong(Model.Thread.Item::getShallowSize).sum(); summary.retainedHeap = items.stream().mapToLong(Model.Thread.Item::getRetainedSize).sum();; return summary; }); } @Override public PageView<Model.Thread.Item> getThreads(String sortBy, boolean ascendingOrder, String searchText, SearchType searchType, int page, int pageSize) { PagingRequest pagingRequest = new PagingRequest(page, pageSize); return $(() -> { IResultTree result = queryByCommand(context, "thread_overview"); final AtomicInteger afterFilterCount = new AtomicInteger(0); List<Model.Thread.Item> items = result.getElements().stream() .map(row -> new VirtualThreadItem(result, row)) .filter(SearchPredicate.createPredicate(searchText, searchType)) .peek(filtered -> afterFilterCount.incrementAndGet()) .sorted(Model.Thread.Item.sortBy(sortBy, ascendingOrder)) .skip(pagingRequest.from()) .limit(pagingRequest.getPageSize()) .collect(Collectors.toList()); return new PageView(pagingRequest, afterFilterCount.get(), items); }); } @Override public List<Model.Thread.StackFrame> getStackTrace(int objectId) { return $(() -> { Map<String, Object> args = new HashMap<>(); args.put("objects", Helper.buildHeapObjectArgument(new int[]{objectId})); IResultTree result = queryByCommand(context, "thread_overview", args); List<?> elements = result.getElements(); boolean includesMaxLocalRetained = (result.getColumns().length == 10); if (result.hasChildren(elements.get(0))) { List<?> frames = result.getChildren(elements.get(0)); List<Model.Thread.StackFrame> res = frames.stream().map( frame -> new Model.Thread.StackFrame(((String) result.getColumnValue(frame, 0)), result.hasChildren(frame), (includesMaxLocalRetained && result.getColumnValue(frame, 4) != null) ? ((Bytes) result.getColumnValue(frame, 4)).getValue() : 0L )).collect(Collectors.toList()); res.stream().filter(t -> !t.getStack().contains("Native Method")).findFirst() .ifPresent(sf -> sf.setFirstNonNativeFrame(true)); return res; } return Collections.emptyList(); }); } @Override public List<Model.Thread.LocalVariable> getLocalVariables(int objectId, int depth, boolean firstNonNativeFrame) { return $(() -> { Map<String, Object> args = new HashMap<>(); args.put("objects", Helper.buildHeapObjectArgument(new int[]{objectId})); IResultTree result = queryByCommand(context, "thread_overview", args); List<?> elements = result.getElements(); if (result.hasChildren(elements.get(0))) { List<?> frames = result.getChildren(elements.get(0)); Object frame = frames.get(depth - 1); if (result.hasChildren(frame)) { List<?> locals = result.getChildren(frame); return locals.stream().map(local -> { int id = result.getContext(local).getObjectId(); Model.Thread.LocalVariable var = new Model.Thread.LocalVariable(); var.setObjectId(id); try { IObject object = context.snapshot.getObject(id); var.setLabel(object.getDisplayName()); var.setShallowSize(object.getUsedHeapSize()); var.setRetainedSize(object.getRetainedHeapSize()); var.setObjectType(typeOf(object)); var.setGCRoot(context.snapshot.isGCRoot(id)); var.setHasOutbound(result.hasChildren(var)); // ThreadStackQuery_Label_Local var.setPrefix("<local>"); if (firstNonNativeFrame) { GCRootInfo[] gcRootInfos = object.getGCRootInfo(); if (gcRootInfos != null) { for (GCRootInfo gcRootInfo : gcRootInfos) { if (gcRootInfo.getContextId() != 0 && (gcRootInfo.getType() & GCRootInfo.Type.BUSY_MONITOR) != 0 && gcRootInfo.getContextId() == objectId) { // ThreadStackQuery_Label_Local_Blocked_On var.setPrefix("<local, blocked on>"); } } } } var.setSuffix(Helper.suffix(context.snapshot, id)); return var; } catch (SnapshotException e) { throw new JifaException(e); } }).collect(Collectors.toList()); } } return Collections.emptyList(); }); } @Override public PageView<DuplicatedClass.ClassItem> getDuplicatedClasses(String searchText, SearchType searchType, int page, int pageSize) { return $(() -> { IResultTree result = queryByCommand(context, "duplicate_classes"); List<?> classes = result.getElements(); classes.sort((o1, o2) -> ((List<?>) o2).size() - ((List<?>) o1).size()); PageViewBuilder<?, DuplicatedClass.ClassItem> builder = PageViewBuilder.fromList(classes); return builder.paging(new PagingRequest(page, pageSize)) .map(r -> { DuplicatedClass.ClassItem item = new DuplicatedClass.ClassItem(); item.setLabel((String) result.getColumnValue(r, 0)); item.setCount((Integer) result.getColumnValue(r, 1)); return item; }) .filter(SearchPredicate.createPredicate(searchText, searchType)) .done(); }); } @Override public PageView<DuplicatedClass.ClassLoaderItem> getClassloadersOfDuplicatedClass(int index, int page, int pageSize) { return $(() -> { IResultTree result = queryByCommand(context, "duplicate_classes"); List<?> classes = result.getElements(); classes.sort((o1, o2) -> ((List<?>) o2).size() - ((List<?>) o1).size()); List<?> classLoaders = (List<?>) classes.get(index); return PageViewBuilder.build(classLoaders, new PagingRequest(page, pageSize), r -> { DuplicatedClass.ClassLoaderItem item = new DuplicatedClass.ClassLoaderItem(); item.setLabel((String) result.getColumnValue(r, 0)); item.setDefinedClassesCount((Integer) result.getColumnValue(r, 2)); item.setInstantiatedObjectsCount((Integer) result.getColumnValue(r, 3)); GCRootInfo[] roots; try { roots = ((IClass) r).getGCRootInfo(); } catch (SnapshotException e) { throw new JifaException(e); } int id = ((IClass) r).getClassLoaderId(); item.setObjectId(id); item.setGCRoot(context.snapshot.isGCRoot(id)); item.setSuffix(roots != null ? GCRootInfo.getTypeSetAsString(roots) : null); return item; }); }); } @Override public PageView<Model.Histogram.Item> getHistogram(Model.Histogram.Grouping groupingBy, int[] ids, String sortBy, boolean ascendingOrder, String searchText, SearchType searchType, int page, int pageSize) { return $(() -> { Map<String, Object> args = new HashMap<>(); if (ids != null) { args.put("objects", Helper.buildHeapObjectArgument(ids)); } IResult result = queryByCommand(context, "histogram -groupBy " + groupingBy.name(), args); switch (groupingBy) { case BY_CLASS: Histogram h = (Histogram) result; List<ClassHistogramRecord> records = (List<ClassHistogramRecord>) h.getClassHistogramRecords(); return PageViewBuilder.<ClassHistogramRecord, Model.Histogram.Item>fromList(records) .beforeMap(record -> $(() -> record .calculateRetainedSize(context.snapshot, true, true, Helper.VOID_LISTENER))) .paging(new PagingRequest(page, pageSize)) .map(record -> new Model.Histogram.Item(record.getClassId(), record.getLabel(), Model.Histogram.ItemType.CLASS, record.getNumberOfObjects(), record.getUsedHeapSize(), record.getRetainedHeapSize())) .sort(Model.Histogram.Item.sortBy(sortBy, ascendingOrder)) .filter(createPredicate(searchText, searchType)) .done(); case BY_CLASSLOADER: Histogram.ClassLoaderTree ct = (Histogram.ClassLoaderTree) result; @SuppressWarnings("unchecked") PageViewBuilder<? extends XClassLoaderHistogramRecord, Model.Histogram.Item> builder = PageViewBuilder.fromList((List<? extends XClassLoaderHistogramRecord>) ct.getElements()); return builder .beforeMap(record -> $(() -> record.calculateRetainedSize(context.snapshot, true, true, Helper.VOID_LISTENER))) .paging(new PagingRequest(page, pageSize)) .map(record -> new Model.Histogram.Item(record.getClassLoaderId(), record.getLabel(), Model.Histogram.ItemType.CLASS_LOADER, record.getNumberOfObjects(), record.getUsedHeapSize(), record.getRetainedHeapSize()) ) .sort(Model.Histogram.Item.sortBy(sortBy, ascendingOrder)) .filter(createPredicate(searchText, searchType)) .done(); case BY_SUPERCLASS: Histogram.SuperclassTree st = (Histogram.SuperclassTree) result; //noinspection unchecked return PageViewBuilder.<HistogramRecord, Model.Histogram.Item>fromList( (List<HistogramRecord>) st.getElements()) .paging(new PagingRequest(page, pageSize)) .map(e -> { Model.Histogram.Item item = new Model.Histogram.Item(); int objectId = st.getContext(e).getObjectId(); item.setType(Model.Histogram.ItemType.SUPER_CLASS); item.setObjectId(objectId); item.setLabel((String) st.getColumnValue(e, 0)); item.setNumberOfObjects((Long) st.getColumnValue(e, 1)); item.setShallowSize(((Bytes) st.getColumnValue(e, 2)).getValue()); return item; }) .sort(Model.Histogram.Item.sortBy(sortBy, ascendingOrder)) .filter(createPredicate(searchText, searchType)) .done(); case BY_PACKAGE: Histogram.PackageTree pt = (Histogram.PackageTree) result; //noinspection unchecked return PageViewBuilder.<HistogramRecord, Model.Histogram.Item>fromList( (List<HistogramRecord>) pt.getElements()) .paging(new PagingRequest(page, pageSize)) .map(e -> { Model.Histogram.Item item = new Model.Histogram.Item(); String label = (String) pt.getColumnValue(e, 0); item.setLabel(label); if (e instanceof XClassHistogramRecord) { int objectId = pt.getContext(e).getObjectId(); item.setObjectId(objectId); item.setType(Model.Histogram.ItemType.CLASS); } else { item.setObjectId(label.hashCode()); item.setType(Model.Histogram.ItemType.PACKAGE); } if (label.matches("^int(\\[\\])*") || label.matches("^char(\\[\\])*") || label.matches("^byte(\\[\\])*") || label.matches("^short(\\[\\])*") || label.matches("^boolean(\\[\\])*") || label.matches("^double(\\[\\])*") || label.matches("^float(\\[\\])*") || label.matches("^long(\\[\\])*") || label.matches("^void(\\[\\])*")) { item.setType(Model.Histogram.ItemType.CLASS); } item.setNumberOfObjects((Long) pt.getColumnValue(e, 1)); item.setShallowSize(((Bytes) pt.getColumnValue(e, 2)).getValue()); return item; }) .sort(Model.Histogram.Item.sortBy(sortBy, ascendingOrder)) .filter(createPredicate(searchText, searchType)) .done(); default: throw new AnalysisException("Should not reach here"); } }); } @Override public PageView<JavaObject> getHistogramObjects(int classId, int page, int pageSize) { return $(() -> { IResult result = queryByCommand(context, "histogram -groupBy BY_CLASS", Collections.emptyMap()); Histogram h = (Histogram) result; List<ClassHistogramRecord> records = (List<ClassHistogramRecord>) h.getClassHistogramRecords(); Optional<ClassHistogramRecord> ro = records.stream().filter(r -> r.getClassId() == classId).findFirst(); if (ro.isPresent()) { IContextObject c = ((Histogram) result).getContext(ro.get()); if (c instanceof IContextObjectSet) { int[] objectIds = ((IContextObjectSet) c).getObjectIds(); return PageViewBuilder.build(objectIds, new PagingRequest(page, pageSize), this::getObjectInfo); } } return PageView.empty(); }); } @Override public PageView<Model.Histogram.Item> getChildrenOfHistogram(Model.Histogram.Grouping groupBy, int[] ids, String sortBy, boolean ascendingOrder, int parentObjectId, int page, int pageSize) { return $(() -> { Map<String, Object> args = new HashMap<>(); if (ids != null) { args.put("objects", Helper.buildHeapObjectArgument(ids)); } IResult result = queryByCommand(context, "histogram -groupBy " + groupBy.name(), args); switch (groupBy) { case BY_CLASS: { throw new AnalysisException("Should not reach here"); } case BY_CLASSLOADER: { Histogram.ClassLoaderTree tree = (Histogram.ClassLoaderTree) result; List<?> elems = tree.getElements(); List<? extends ClassHistogramRecord> children = null; for (Object elem : elems) { if (elem instanceof XClassLoaderHistogramRecord) { if (((XClassLoaderHistogramRecord) elem).getClassLoaderId() == parentObjectId) { children = (List<? extends ClassHistogramRecord>) ((XClassLoaderHistogramRecord) elem) .getClassHistogramRecords(); break; } } } if (children != null) { //noinspection unchecked return PageViewBuilder.<ClassHistogramRecord, Model.Histogram.Item>fromList( (List<ClassHistogramRecord>) children) .beforeMap(record -> $(() -> record .calculateRetainedSize(context.snapshot, true, true, Helper.VOID_LISTENER))) .paging(new PagingRequest(page, pageSize)) .map(record -> new Model.Histogram.Item(record.getClassId(), record.getLabel(), Model.Histogram.ItemType.CLASS, record.getNumberOfObjects(), record.getUsedHeapSize(), record.getRetainedHeapSize())) .sort(Model.Histogram.Item.sortBy(sortBy, ascendingOrder)) .done(); } else { return PageView.empty(); } } case BY_SUPERCLASS: { Histogram.SuperclassTree st = (Histogram.SuperclassTree) result; List<?> children = new ExoticTreeFinder(st) .setGetChildrenCallback(node -> { Map<String, ?> subClasses = ReflectionUtil.getFieldValueOrNull(node, "subClasses"); if (subClasses != null) { return new ArrayList<>(subClasses.values()); } return null; }) .setPredicate((theTree, theNode) -> theTree.getContext(theNode).getObjectId()) .findChildrenOf(parentObjectId); if (children != null) { //noinspection unchecked return PageViewBuilder.<HistogramRecord, Model.Histogram.Item>fromList( (List<HistogramRecord>) children) .paging(new PagingRequest(page, pageSize)) .map(e -> { Model.Histogram.Item item = new Model.Histogram.Item(); int objectId = st.getContext(e).getObjectId(); item.setType(Model.Histogram.ItemType.SUPER_CLASS); item.setObjectId(objectId); item.setLabel((String) st.getColumnValue(e, 0)); item.setNumberOfObjects((Long) st.getColumnValue(e, 1)); item.setShallowSize(((Bytes) st.getColumnValue(e, 2)).getValue()); return item; }) .sort(Model.Histogram.Item.sortBy(sortBy, ascendingOrder)) .done(); } else { return PageView.empty(); } } case BY_PACKAGE: { Histogram.PackageTree pt = (Histogram.PackageTree) result; Object targetParentNode = new ExoticTreeFinder(pt) .setGetChildrenCallback(node -> { Map<String, ?> subPackages = ReflectionUtil.getFieldValueOrNull(node, "subPackages"); if (subPackages != null) { return new ArrayList<>(subPackages.values()); } else { return null; } }) .setPredicate((theTree, theNode) -> { if (!(theNode instanceof XClassHistogramRecord)) { try { java.lang.reflect.Field field = theNode.getClass().getSuperclass().getDeclaredField("label"); field.setAccessible(true); String labelName = (String) field.get(theNode); return labelName.hashCode(); } catch (Throwable e) { e.printStackTrace(); } } return null; }) .findTargetParentNode(parentObjectId); if (targetParentNode != null) { Map<String, ?> packageMap = ReflectionUtil.getFieldValueOrNull(targetParentNode, "subPackages"); List<?> elems = new ArrayList<>(); if (packageMap != null) { if (packageMap.size() == 0) { elems = ReflectionUtil.getFieldValueOrNull(targetParentNode, "classes"); } else { elems = new ArrayList<>(packageMap.values()); } } //noinspection unchecked return PageViewBuilder.<HistogramRecord, Model.Histogram.Item>fromList( (List<HistogramRecord>) elems) .paging(new PagingRequest(page, pageSize)) .map(e -> { Model.Histogram.Item item = new Model.Histogram.Item(); String label = (String) pt.getColumnValue(e, 0); item.setLabel(label); if (e instanceof XClassHistogramRecord) { int objectId = pt.getContext(e).getObjectId(); item.setObjectId(objectId); item.setType(Model.Histogram.ItemType.CLASS); } else { item.setObjectId(label.hashCode()); item.setType(Model.Histogram.ItemType.PACKAGE); } if (label.matches("^int(\\[\\])*") || label.matches("^char(\\[\\])*") || label.matches("^byte(\\[\\])*") || label.matches("^short(\\[\\])*") || label.matches("^boolean(\\[\\])*") || label.matches("^double(\\[\\])*") || label.matches("^float(\\[\\])*") || label.matches("^long(\\[\\])*") || label.matches("^void(\\[\\])*")) { item.setType(Model.Histogram.ItemType.CLASS); } item.setNumberOfObjects((Long) pt.getColumnValue(e, 1)); item.setShallowSize(((Bytes) pt.getColumnValue(e, 2)).getValue()); return item; }) .sort(Model.Histogram.Item.sortBy(sortBy, ascendingOrder)) .done(); } else { return PageView.empty(); } } default: { throw new AnalysisException("Should not reach here"); } } }); } private PageView<DominatorTree.DefaultItem> buildDefaultItems(ISnapshot snapshot, IResultTree tree, List<?> elements, boolean ascendingOrder, String sortBy, String searchText, SearchType searchType, PagingRequest pagingRequest) { final AtomicInteger afterFilterCount = new AtomicInteger(0); List<DominatorTree.DefaultItem> items = elements.stream() .map(e -> $(() -> new VirtualDefaultItem(snapshot, tree, e))) .filter(SearchPredicate.createPredicate(searchText, searchType)) .peek(filtered -> afterFilterCount.incrementAndGet()) .sorted(DominatorTree.DefaultItem.sortBy(sortBy, ascendingOrder)) .skip(pagingRequest.from()) .limit(pagingRequest.getPageSize()) .collect(Collectors.toList()); return new PageView(pagingRequest, afterFilterCount.get(), items); } private PageView<DominatorTree.ClassItem> buildClassItems(ISnapshot snapshot, IResultTree tree, List<?> elements, boolean ascendingOrder, String sortBy, String searchText, SearchType searchType, PagingRequest pagingRequest) { final AtomicInteger afterFilterCount = new AtomicInteger(0); List<DominatorTree.ClassItem> items = elements.stream() .map(e -> $(() -> new VirtualClassItem(snapshot, tree, e))) .filter(SearchPredicate.createPredicate(searchText, searchType)) .peek(filtered -> afterFilterCount.incrementAndGet()) .sorted(DominatorTree.ClassItem.sortBy(sortBy, ascendingOrder)) .skip(pagingRequest.from()) .limit(pagingRequest.getPageSize()) .collect(Collectors.toList()); return new PageView(pagingRequest, afterFilterCount.get(), items); } private PageView<DominatorTree.ClassLoaderItem> buildClassLoaderItems(ISnapshot snapshot, IResultTree tree, List<?> elements, boolean ascendingOrder, String sortBy, String searchText, SearchType searchType, PagingRequest pagingRequest) { final AtomicInteger afterFilterCount = new AtomicInteger(0); List<DominatorTree.ClassLoaderItem> items = elements.stream() .map(e -> $(() -> new VirtualClassLoaderItem(snapshot, tree, e))) .filter(SearchPredicate.createPredicate(searchText, searchType)) .peek(filtered -> afterFilterCount.incrementAndGet()) .sorted(DominatorTree.ClassLoaderItem.sortBy(sortBy, ascendingOrder)) .skip(pagingRequest.from()) .limit(pagingRequest.getPageSize()) .collect(Collectors.toList()); return new PageView(pagingRequest, afterFilterCount.get(), items); } private PageView<DominatorTree.PackageItem> buildPackageItems(ISnapshot snapshot, IResultTree tree, List<?> elements, boolean ascendingOrder, String sortBy, String searchText, SearchType searchType, PagingRequest pagingRequest) { final AtomicInteger afterFilterCount = new AtomicInteger(0); List<DominatorTree.PackageItem> items = elements.stream() .map(e -> $(() -> new VirtualPackageItem(snapshot, tree, e))) .filter(SearchPredicate.createPredicate(searchText, searchType)) .peek(filtered -> afterFilterCount.incrementAndGet()) .sorted(DominatorTree.PackageItem.sortBy(sortBy, ascendingOrder)) .skip(pagingRequest.from()) .limit(pagingRequest.getPageSize()) .collect(Collectors.toList()); return new PageView(pagingRequest, afterFilterCount.get(), items); } @Override public PageView<? extends DominatorTree.Item> getRootsOfDominatorTree(DominatorTree.Grouping groupBy, String sortBy, boolean ascendingOrder, String searchText, SearchType searchType, int page, int pageSize) { return $(() -> { Map<String, Object> args = new HashMap<>(); IResultTree tree = queryByCommand(context, "dominator_tree -groupBy " + groupBy.name(), args); switch (groupBy) { case NONE: return buildDefaultItems(context.snapshot, tree, tree.getElements(), ascendingOrder, sortBy, searchText, searchType, new PagingRequest(page, pageSize)); case BY_CLASS: return buildClassItems(context.snapshot, tree, tree.getElements(), ascendingOrder, sortBy, searchText, searchType, new PagingRequest(page, pageSize)); case BY_CLASSLOADER: return buildClassLoaderItems(context.snapshot, tree, tree.getElements(), ascendingOrder, sortBy, searchText, searchType, new PagingRequest(page, pageSize)); case BY_PACKAGE: return buildPackageItems(context.snapshot, tree, tree.getElements(), ascendingOrder, sortBy, searchText, searchType, new PagingRequest(page, pageSize)); default: throw new AnalysisException("Should not reach here"); } }); } @Override public PageView<? extends DominatorTree.Item> getChildrenOfDominatorTree(DominatorTree.Grouping groupBy, String sortBy, boolean ascendingOrder, int parentObjectId, int[] idPathInResultTree, int page, int pageSize) { return $(() -> { Map<String, Object> args = new HashMap<>(); IResultTree tree = queryByCommand(context, "dominator_tree -groupBy " + groupBy.name(), args); switch (groupBy) { case NONE: Object parent = Helper.fetchObjectInResultTree(tree, idPathInResultTree); return buildDefaultItems(context.snapshot, tree, tree.getChildren(parent), ascendingOrder, sortBy, null, null, new PagingRequest(page, pageSize)); case BY_CLASS: Object object = Helper.fetchObjectInResultTree(tree, idPathInResultTree); List<?> elements = object == null ? Collections.emptyList() : tree.getChildren(object); return buildClassItems(context.snapshot, tree, elements, ascendingOrder, sortBy, null, null, new PagingRequest(page , pageSize)); case BY_CLASSLOADER: List<?> children = new ExoticTreeFinder(tree) .setGetChildrenCallback(tree::getChildren) .setPredicate((theTree, theNode) -> theTree.getContext(theNode).getObjectId()) .findChildrenOf(parentObjectId); if (children != null) { return buildClassLoaderItems(context.snapshot, tree, children, ascendingOrder, sortBy, null, null, new PagingRequest(page, pageSize)); } else { return PageView.empty(); } case BY_PACKAGE: Object targetParentNode = new ExoticTreeFinder(tree) .setGetChildrenCallback(node -> { Map<String, ?> subPackages = ReflectionUtil.getFieldValueOrNull(node, "subPackages"); if (subPackages != null) { return new ArrayList<>(subPackages.values()); } else { return null; } }) .setPredicate((theTree, theNode) -> { try { java.lang.reflect.Field field = theNode.getClass().getSuperclass().getSuperclass().getDeclaredField("label"); field.setAccessible(true); String labelName = (String) field.get(theNode); return labelName.hashCode(); } catch (Throwable e) { e.printStackTrace(); } return null; }) .findTargetParentNode(parentObjectId); if (targetParentNode != null) { Map<String, ?> packageMap = ReflectionUtil.getFieldValueOrNull(targetParentNode, "subPackages"); List<?> elems = new ArrayList<>(); if (packageMap != null) { if (packageMap.size() == 0) { elems = ReflectionUtil.getFieldValueOrNull(targetParentNode, "classes"); } else { elems = new ArrayList<>(packageMap.values()); } } if (elems != null) { return buildPackageItems(context.snapshot, tree, elems, ascendingOrder, sortBy, null, null, new PagingRequest(page, pageSize)); } else { return PageView.empty(); } } else { return PageView.empty(); } default: throw new AnalysisException("Should not reach here"); } }); } interface R { void run() throws Exception; } interface RV<V> { V run() throws Exception; } private static class ProviderImpl implements HeapDumpAnalyzer.Provider { @Override public HeapDumpAnalyzer provide(Path path, Map<String, String> arguments, ProgressListener listener) { return new HeapDumpAnalyzerImpl(new AnalysisContext( $(() -> { try { HprofPreferencesAccess.setStrictness(arguments.get("strictness")); return SnapshotFactory.openSnapshot(path.toFile(), arguments, new ProgressListenerImpl(listener)); } finally { HprofPreferencesAccess.setStrictness(null); } }) )); } } }
2,962
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda/impl/VirtualClassLoaderItem.java
/******************************************************************************** * Copyright (c) 2020 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.hda.impl; import org.eclipse.jifa.common.util.EscapeUtil; import org.eclipse.mat.SnapshotException; import org.eclipse.mat.query.Bytes; import org.eclipse.mat.query.IContextObjectSet; import org.eclipse.mat.query.IStructuredResult; import org.eclipse.mat.snapshot.ISnapshot; import org.eclipse.jifa.common.util.UseAccessor; import org.eclipse.jifa.hda.api.AnalysisException; import static org.eclipse.jifa.hda.api.Model.DominatorTree; @UseAccessor public class VirtualClassLoaderItem extends DominatorTree.ClassLoaderItem { static final int COLUMN_LABEL = 0; static final int COLUMN_OBJECTS = 1; static final int COLUMN_SHALLOW = 2; static final int COLUMN_RETAINED = 3; static final int COLUMN_PERCENT = 4; transient final ISnapshot snapshot; transient final IStructuredResult results; transient final Object e; public VirtualClassLoaderItem(final ISnapshot snapshot, final IStructuredResult results, final Object e) { this.snapshot = snapshot; this.results = results; this.e = e; this.objectId = results.getContext(e).getObjectId(); } @Override public String getSuffix() { return null; } @Override public int getObjectId() { return objectId; } @Override public int getObjectType() { try { return HeapDumpAnalyzerImpl.typeOf(snapshot.getObject(objectId)); } catch (SnapshotException se) { throw new AnalysisException(se); } } @Override public boolean isGCRoot() { return snapshot.isGCRoot(objectId); } @Override public String getLabel() { return EscapeUtil.unescapeLabel((String) results.getColumnValue(e, COLUMN_LABEL)); } @Override public long getObjects() { Object value = results.getColumnValue(e, COLUMN_OBJECTS); if (value != null) { return (Integer) value; } else { return 0; } } @Override public int[] getObjectIds() { return ((IContextObjectSet) results.getContext(e)).getObjectIds(); } @Override public long getShallowSize() { return ((Bytes) results.getColumnValue(e, COLUMN_SHALLOW)).getValue(); } @Override public long getRetainedSize() { return ((Bytes) results.getColumnValue(e, COLUMN_RETAINED)).getValue(); } @Override public double getPercent() { return (Double) results.getColumnValue(e, COLUMN_PERCENT); } }
2,963
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda/impl/ProgressListenerImpl.java
/******************************************************************************** * Copyright (c) 2021, 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.hda.impl; import org.eclipse.jifa.hda.api.FilterProgressListener; import org.eclipse.jifa.common.listener.ProgressListener; import org.eclipse.mat.util.IProgressListener; public class ProgressListenerImpl extends FilterProgressListener implements IProgressListener { private boolean cancelled = false; public ProgressListenerImpl(ProgressListener listener) { super(listener); } @Override public void done() { } @Override public boolean isCanceled() { return cancelled; } @Override public void setCanceled(boolean b) { cancelled = b; } @Override public void sendUserMessage(Severity severity, String s, Throwable throwable) { sendUserMessage(Level.valueOf(severity.name()), s, throwable); } }
2,964
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/impl/src/main/java/org/eclipse/jifa/hda/impl/VirtualPackageItem.java
/******************************************************************************** * Copyright (c) 2020 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.hda.impl; import org.eclipse.jifa.common.util.EscapeUtil; import org.eclipse.mat.query.Bytes; import org.eclipse.mat.query.IContextObjectSet; import org.eclipse.mat.query.IStructuredResult; import org.eclipse.mat.snapshot.ISnapshot; import org.eclipse.jifa.common.util.ReflectionUtil; import org.eclipse.jifa.common.util.UseAccessor; import static org.eclipse.jifa.hda.api.Model.DominatorTree; import java.util.Map; @UseAccessor public class VirtualPackageItem extends DominatorTree.PackageItem { static final int COLUMN_LABEL = 0; static final int COLUMN_OBJECTS = 1; static final int COLUMN_SHALLOW = 2; static final int COLUMN_RETAINED = 3; static final int COLUMN_PERCENT = 4; transient final ISnapshot snapshot; transient final IStructuredResult results; transient final Object e; public VirtualPackageItem(final ISnapshot snapshot, final IStructuredResult results, final Object e) { this.snapshot = snapshot; this.results = results; this.e = e; this.objectId = results.getContext(e).getObjectId(); this.isObjType = false; } @Override public String getSuffix() { return null; } @Override public int getObjectId() { return getLabel().hashCode(); } @Override public int getObjectType() { Map<String, ?> subPackages = ReflectionUtil.getFieldValueOrNull(e, "subPackages"); if (subPackages.size() == 0) { return DominatorTree.ItemType.CLASS; } else { return DominatorTree.ItemType.PACKAGE; } } @Override public boolean isGCRoot() { return snapshot.isGCRoot(objectId); } @Override public String getLabel() { return EscapeUtil.unescapeLabel((String) results.getColumnValue(e, COLUMN_LABEL)); } @Override public long getObjects() { Object value = results.getColumnValue(e, COLUMN_OBJECTS); if (value != null) { return (Integer) value; } else { return 0; } } @Override public int[] getObjectIds() { return ((IContextObjectSet) results.getContext(e)).getObjectIds(); } @Override public long getShallowSize() { return ((Bytes) results.getColumnValue(e, COLUMN_SHALLOW)).getValue(); } @Override public long getRetainedSize() { return ((Bytes) results.getColumnValue(e, COLUMN_RETAINED)).getValue(); } @Override public double getPercent() { return (Double) results.getColumnValue(e, COLUMN_PERCENT); } }
2,965
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/provider/src/main/java/org/eclipse/jifa/hdp
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/provider/src/main/java/org/eclipse/jifa/hdp/provider/MATProvider.java
/******************************************************************************** * Copyright (c) 2021, 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.hdp.provider; import org.eclipse.jifa.common.JifaException; import org.eclipse.jifa.hda.api.HeapDumpAnalyzer; import org.eclipse.jifa.common.listener.ProgressListener; import org.osgi.framework.Bundle; import org.osgi.framework.BundleException; import org.osgi.framework.Constants; import org.osgi.framework.launch.Framework; import org.osgi.framework.launch.FrameworkFactory; import java.io.File; import java.nio.file.Path; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.ServiceLoader; public class MATProvider implements HeapDumpAnalyzer.Provider { private HeapDumpAnalyzer.Provider provider; public MATProvider() { init(); } @Override public HeapDumpAnalyzer provide(Path path, Map<String, String> arguments, ProgressListener listener) { return provider.provide(path, arguments, listener); } private void init() { Map<String, String> config = new HashMap<>(); config.put(Constants.FRAMEWORK_STORAGE_CLEAN, Constants.FRAMEWORK_STORAGE_CLEAN_ONFIRSTINIT); String apiBase = "org.eclipse.jifa.hda.api"; String commonBase = "org.eclipse.jifa.common"; String[] extras = { apiBase, commonBase, commonBase + ".aux", commonBase + ".enums", commonBase + ".request", commonBase + ".util", commonBase + ".cache", commonBase + ".vo", commonBase + ".vo.support", commonBase + ".listener", "net.sf.cglib.beans", "net.sf.cglib.core", "net.sf.cglib.core.internal", "net.sf.cglib.proxy", "net.sf.cglib.reflect", "net.sf.cglib.transform", "net.sf.cglib.transform.impl", "net.sf.cglib.util", }; config.put(Constants.FRAMEWORK_SYSTEMPACKAGES_EXTRA, String.join(",", extras)); try { Framework framework = ServiceLoader.load(FrameworkFactory.class).iterator().next().newFramework(config); framework.start(); File[] files = Objects.requireNonNull(new File(System.getProperty("mat-deps")).listFiles()); List<Bundle> bundles = new ArrayList<>(); for (File file : files) { String name = file.getName(); // org.eclipse.osgi is the system bundle if (name.endsWith(".jar") && !name.startsWith("org.eclipse.osgi_")) { Bundle b = framework.getBundleContext().installBundle(file.toURI().toString()); bundles.add(b); } } ArrayList validNames = new ArrayList(); validNames.add("org.apache.felix.scr"); validNames.add("org.eclipse.equinox.event"); validNames.add("org.eclipse.jifa.hda.implementation"); for (Bundle bundle : bundles) { if (validNames.contains(bundle.getSymbolicName())) { System.out.println("starting bundle: " + bundle); bundle.start(); } } provider = framework.getBundleContext() .getService(framework.getBundleContext() .getServiceReference(HeapDumpAnalyzer.Provider.class)); } catch (BundleException be) { throw new JifaException(be); } } }
2,966
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/api/src/main/java/org/eclipse/jifa/hda
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/api/src/main/java/org/eclipse/jifa/hda/api/FilterProgressListener.java
/******************************************************************************** * Copyright (c) 2021, 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.hda.api; import org.eclipse.jifa.common.listener.ProgressListener; public class FilterProgressListener implements ProgressListener { ProgressListener listener; public FilterProgressListener(ProgressListener listener) { assert listener != null; this.listener = listener; } @Override public void beginTask(String s, int i) { listener.beginTask(s, i); } @Override public void subTask(String s) { listener.subTask(s); } @Override public void worked(int i) { listener.worked(i); } @Override public void sendUserMessage(Level level, String s, Throwable throwable) { listener.sendUserMessage(level, s, throwable); } @Override public String log() { return listener.log(); } @Override public double percent() { return listener.percent(); } }
2,967
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/api/src/main/java/org/eclipse/jifa/hda
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/api/src/main/java/org/eclipse/jifa/hda/api/AnalysisException.java
/******************************************************************************** * Copyright (c) 2021 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.hda.api; public class AnalysisException extends RuntimeException { public AnalysisException(String message) { super(message); } public AnalysisException(Throwable cause) { super(cause); } }
2,968
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/api/src/main/java/org/eclipse/jifa/hda
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/api/src/main/java/org/eclipse/jifa/hda/api/HeapDumpAnalyzer.java
/******************************************************************************** * Copyright (c) 2021, 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.hda.api; import org.eclipse.jifa.common.listener.ProgressListener; import org.eclipse.jifa.common.vo.PageView; import org.eclipse.jifa.common.vo.support.SearchType; import java.nio.file.Path; import java.util.List; import java.util.Map; import static org.eclipse.jifa.hda.api.Model.ClassLoader; import static org.eclipse.jifa.hda.api.Model.*; public interface HeapDumpAnalyzer { void dispose(); Overview.Details getDetails(); Map<String, String> getSystemProperties(); List<Overview.BigObject> getBigObjects(); JavaObject getObjectInfo(int objectId); InspectorView getInspectorView(int objectId); PageView<FieldView> getFields(int objectId, int page, int pageSize); PageView<FieldView> getStaticFields(int objectId, int page, int pageSize); int mapAddressToId(long address); String getObjectValue(int objectId); ClassLoader.Summary getSummaryOfClassLoaders(); PageView<ClassLoader.Item> getClassLoaders(int page, int pageSize); PageView<ClassLoader.Item> getChildrenOfClassLoader(int classLoaderId, int page, int pageSize); UnreachableObject.Summary getSummaryOfUnreachableObjects(); PageView<UnreachableObject.Item> getUnreachableObjects(int page, int pageSize); DirectByteBuffer.Summary getSummaryOfDirectByteBuffers(); PageView<DirectByteBuffer.Item> getDirectByteBuffers(int page, int pageSize); PageView<JavaObject> getOutboundOfObject(int objectId, int page, int pageSize); PageView<JavaObject> getInboundOfObject(int objectId, int page, int pageSize); List<GCRoot.Item> getGCRoots(); PageView<TheString.Item> getStrings(String pattern, int page, int pageSize); PageView<GCRoot.Item> getClassesOfGCRoot(int rootTypeIndex, int page, int pageSize); PageView<JavaObject> getObjectsOfGCRoot(int rootTypeIndex, int classIndex, int page, int pageSize); ClassReferrer.Item getOutboundClassOfClassReference(int objectId); ClassReferrer.Item getInboundClassOfClassReference(int objectId); PageView<ClassReferrer.Item> getOutboundsOfClassReference(int[] objectIds, int page, int pageSize); PageView<ClassReferrer.Item> getInboundsOfClassReference(int[] objectIds, int page, int pageSize); Comparison.Summary getSummaryOfComparison(Path other); PageView<Comparison.Item> getItemsOfComparison(Path other, int page, int pageSize); PageView<GCRootPath.MergePathToGCRootsTreeNode> getRootsOfMergePathToGCRootsByClassId(int classId, GCRootPath.Grouping grouping, int page, int pageSize); PageView<GCRootPath.MergePathToGCRootsTreeNode> getRootsOfMergePathToGCRootsByObjectIds(int[] objectIds, GCRootPath.Grouping grouping, int page, int pageSize); PageView<GCRootPath.MergePathToGCRootsTreeNode> getChildrenOfMergePathToGCRootsByClassId(int classId, int[] objectIdPathInGCPathTree, GCRootPath.Grouping grouping, int page, int pageSize); PageView<GCRootPath.MergePathToGCRootsTreeNode> getChildrenOfMergePathToGCRootsByObjectIds(int[] objectIds, int[] objectIdPathInGCPathTree, GCRootPath.Grouping grouping, int page, int pageSize); GCRootPath.Item getPathToGCRoots(int originId, int skip, int count); LeakReport getLeakReport(); OQLResult getOQLResult(String oql, String sortBy, boolean ascendingOrder, int page, int pageSize); CalciteSQLResult getCalciteSQLResult(String sql, String sortBy, boolean ascendingOrder, int page, int pageSize); Model.Thread.Summary getSummaryOfThreads(String searchText, SearchType searchType); PageView<Model.Thread.Item> getThreads(String sortBy, boolean ascendingOrder, String searchText, SearchType searchType, int page, int pageSize); List<Model.Thread.StackFrame> getStackTrace(int objectId); List<Model.Thread.LocalVariable> getLocalVariables(int objectId, int depth, boolean firstNonNativeFrame); PageView<DuplicatedClass.ClassItem> getDuplicatedClasses(String searchText, SearchType searchType, int page, int pageSize); PageView<DuplicatedClass.ClassLoaderItem> getClassloadersOfDuplicatedClass(int index, int page, int pageSize); PageView<Histogram.Item> getHistogram(Histogram.Grouping groupingBy, int[] ids, String sortBy, boolean ascendingOrder, String searchText, SearchType searchType, int page, int pageSize); PageView<JavaObject> getHistogramObjects(int classId, int page, int pageSize); PageView<Histogram.Item> getChildrenOfHistogram(Histogram.Grouping groupBy, int[] ids, String sortBy, boolean ascendingOrder, int parentObjectId, int page, int pageSize); PageView<? extends DominatorTree.Item> getRootsOfDominatorTree(DominatorTree.Grouping groupBy, String sortBy, boolean ascendingOrder, String searchText, SearchType searchType, int page, int pageSize); PageView<? extends DominatorTree.Item> getChildrenOfDominatorTree(DominatorTree.Grouping groupBy, String sortBy, boolean ascendingOrder, int parentObjectId, int[] idPathInResultTree, int page, int pageSize); interface Provider { HeapDumpAnalyzer provide(Path path, Map<String, String> arguments, ProgressListener listener); } }
2,969
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/api/src/main/java/org/eclipse/jifa/hda
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/api/src/main/java/org/eclipse/jifa/hda/api/Model.java
/******************************************************************************** * Copyright (c) 2021 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.hda.api; import lombok.Data; import lombok.EqualsAndHashCode; import lombok.NoArgsConstructor; import org.eclipse.jifa.common.util.ErrorUtil; import org.eclipse.jifa.common.vo.PageView; import org.eclipse.jifa.common.vo.support.SearchType; import org.eclipse.jifa.common.vo.support.Searchable; import org.eclipse.jifa.common.vo.support.SortTableGenerator; import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; import java.util.List; import java.util.Map; public interface Model { interface DominatorTree { interface ItemType { int CLASS = 1; int CLASS_LOADER = 2; int SUPER_CLASS = 5; int PACKAGE = 6; } enum Grouping { NONE, BY_CLASS, BY_CLASSLOADER, BY_PACKAGE; } @Data class Item { public String label; public String suffix; public int objectId; public int objectType; public boolean gCRoot; public long shallowSize; public long retainedSize; public double percent; public boolean isObjType = true; } @Data @EqualsAndHashCode(callSuper = true) class ClassLoaderItem extends Item implements Searchable { private static Map<String, Comparator<ClassLoaderItem>> sortTable = new SortTableGenerator<ClassLoaderItem>() .add("id", ClassLoaderItem::getObjectId) .add("shallowHeap", ClassLoaderItem::getShallowSize) .add("retainedHeap", ClassLoaderItem::getRetainedSize) .add("percent", ClassLoaderItem::getPercent) .add("Objects", ClassLoaderItem::getObjects) .build(); public long objects; private int[] objectIds; public static Comparator<ClassLoaderItem> sortBy(String field, boolean ascendingOrder) { return ascendingOrder ? sortTable.get(field) : sortTable.get(field).reversed(); } @Override public Object getBySearchType(SearchType type) { switch (type) { case BY_NAME: return getLabel(); case BY_PERCENT: return getPercent(); case BY_OBJ_NUM: return getObjects(); case BY_RETAINED_SIZE: return getRetainedSize(); case BY_SHALLOW_SIZE: return getShallowSize(); default: ErrorUtil.shouldNotReachHere(); } return null; } } @Data @EqualsAndHashCode(callSuper = true) class ClassItem extends Item implements Searchable { private static Map<String, Comparator<ClassItem>> sortTable = new SortTableGenerator<ClassItem>() .add("id", ClassItem::getObjectId) .add("shallowHeap", ClassItem::getShallowSize) .add("retainedHeap", ClassItem::getRetainedSize) .add("percent", ClassItem::getPercent) .add("Objects", ClassItem::getObjects) .build(); private int objects; private int[] objectIds; public static Comparator<ClassItem> sortBy(String field, boolean ascendingOrder) { return ascendingOrder ? sortTable.get(field) : sortTable.get(field).reversed(); } @Override public Object getBySearchType(SearchType type) { switch (type) { case BY_NAME: return getLabel(); case BY_PERCENT: return getPercent(); case BY_OBJ_NUM: return getObjects(); case BY_RETAINED_SIZE: return getRetainedSize(); case BY_SHALLOW_SIZE: return getShallowSize(); default: ErrorUtil.shouldNotReachHere(); } return null; } } @Data @EqualsAndHashCode(callSuper = true) class DefaultItem extends Item implements Searchable { private static Map<String, Comparator<DefaultItem>> sortTable = new SortTableGenerator<DefaultItem>() .add("id", DefaultItem::getObjectId) .add("shallowHeap", DefaultItem::getShallowSize) .add("retainedHeap", DefaultItem::getRetainedSize) .add("percent", DefaultItem::getPercent) .build(); public static Comparator<DefaultItem> sortBy(String field, boolean ascendingOrder) { return ascendingOrder ? sortTable.get(field) : sortTable.get(field).reversed(); } @Override public Object getBySearchType(SearchType type) { switch (type) { case BY_NAME: return getLabel(); case BY_PERCENT: return getPercent(); case BY_OBJ_NUM: return null; case BY_RETAINED_SIZE: return getRetainedSize(); case BY_SHALLOW_SIZE: return getShallowSize(); default: ErrorUtil.shouldNotReachHere(); } return null; } } @Data @EqualsAndHashCode(callSuper = true) class PackageItem extends Item implements Searchable { private static Map<String, Comparator<PackageItem>> sortTable = new SortTableGenerator<PackageItem>() .add("id", PackageItem::getObjectId) .add("shallowHeap", PackageItem::getShallowSize) .add("retainedHeap", PackageItem::getRetainedSize) .add("percent", PackageItem::getPercent) .add("Objects", PackageItem::getObjects) .build(); private long objects; private int[] objectIds; public static Comparator<PackageItem> sortBy(String field, boolean ascendingOrder) { return ascendingOrder ? sortTable.get(field) : sortTable.get(field).reversed(); } @Override public Object getBySearchType(SearchType type) { switch (type) { case BY_NAME: return getLabel(); case BY_PERCENT: return getPercent(); case BY_OBJ_NUM: return getObjects(); case BY_RETAINED_SIZE: return getRetainedSize(); case BY_SHALLOW_SIZE: return getShallowSize(); default: ErrorUtil.shouldNotReachHere(); } return null; } } } interface Histogram { enum Grouping { BY_CLASS, BY_SUPERCLASS, BY_CLASSLOADER, BY_PACKAGE; } interface ItemType { int CLASS = 1; int CLASS_LOADER = 2; int SUPER_CLASS = 5; int PACKAGE = 6; } @Data @NoArgsConstructor class Item implements Searchable { private static Map<String, Comparator<Item>> sortTable = new SortTableGenerator<Item>() .add("id", Item::getObjectId) .add("numberOfObjects", Item::getNumberOfObjects) .add("shallowSize", Item::getShallowSize) .add("retainedSize", Item::getRetainedSize) .build(); public long numberOfObjects; public long shallowSize; public long retainedSize; public String label; public int objectId; public int type; public Item(int objectId, String label, int type, long numberOfObjects, long shallowSize, long retainedSize) { this.objectId = objectId; this.label = label; this.type = type; this.numberOfObjects = numberOfObjects; this.shallowSize = shallowSize; this.retainedSize = retainedSize; } public static Comparator<Item> sortBy(String field, boolean ascendingOrder) { return ascendingOrder ? sortTable.get(field) : sortTable.get(field).reversed(); } @Override public Object getBySearchType(SearchType type) { switch (type) { case BY_NAME: return getLabel(); case BY_OBJ_NUM: return getNumberOfObjects(); case BY_RETAINED_SIZE: return getRetainedSize(); case BY_SHALLOW_SIZE: return getShallowSize(); default: ErrorUtil.shouldNotReachHere(); } return null; } } } interface DuplicatedClass { @Data class ClassItem implements Searchable { public String label; public int count; @Override public Object getBySearchType(SearchType type) { switch (type) { case BY_NAME: return getLabel(); case BY_CLASSLOADER_COUNT: return (long) getCount(); default: ErrorUtil.shouldNotReachHere(); } return null; } } @Data class ClassLoaderItem { public String label; public String suffix; public int definedClassesCount; public int instantiatedObjectsCount; public int objectId; public boolean gCRoot; } } interface Thread { @Data class Summary { public long totalSize; public long shallowHeap; public long retainedHeap; } @Data class Item implements Searchable { public static Map<String, Comparator<Item>> sortTable = new SortTableGenerator<Item>() .add("id", Item::getObjectId) .add("shallowHeap", Item::getShallowSize) .add("retainedHeap", Item::getRetainedSize) .add("daemon", Item::isDaemon) .add("contextClassLoader", Item::getContextClassLoader) .add("name", Item::getName) .build(); public int objectId; public String object; public String name; public long shallowSize; public long retainedSize; public String contextClassLoader; public boolean hasStack; public boolean daemon; public Item(int objectId, String object, String name, long shallowSize, long retainedSize, String contextClassLoader, boolean hasStack, boolean daemon) { this.objectId = objectId; this.object = object; this.name = name; this.shallowSize = shallowSize; this.retainedSize = retainedSize; this.contextClassLoader = contextClassLoader; this.hasStack = hasStack; this.daemon = daemon; } public Item() {} public static Comparator<Item> sortBy(String field, boolean ascendingOrder) { return ascendingOrder ? sortTable.get(field) : sortTable.get(field).reversed(); } @Override public Object getBySearchType(SearchType type) { switch (type) { case BY_NAME: return getName(); case BY_SHALLOW_SIZE: return getShallowSize(); case BY_RETAINED_SIZE: return getRetainedSize(); case BY_CONTEXT_CLASSLOADER_NAME: return getContextClassLoader(); default: ErrorUtil.shouldNotReachHere(); } return null; } } @Data @EqualsAndHashCode(callSuper = true) class LocalVariable extends JavaObject { } @Data class StackFrame { public String stack; public boolean hasLocal; public boolean firstNonNativeFrame; public long maxLocalsRetainedSize; public StackFrame(String stack, boolean hasLocal, long maxLocalsRetainedSize) { this.stack = stack; this.hasLocal = hasLocal; this.maxLocalsRetainedSize = maxLocalsRetainedSize; } } } interface CalciteSQLResult { int TREE = 1; int TABLE = 2; int TEXT = 3; int getType(); @Data class TableResult implements CalciteSQLResult { public int type = TABLE; public List<String> columns; public PageView<Entry> pv; public TableResult(List<String> columns, PageView<Entry> pv) { this.columns = columns; this.pv = pv; } @Data public static class Entry { public int objectId; public List<Object> values; public Entry(int objectId, List<Object> values) { this.objectId = objectId; this.values = values; } } } @Data class TextResult implements CalciteSQLResult { public int type = CalciteSQLResult.TEXT; public String text; public TextResult(String text) { this.text = text; } } @Data class TreeResult implements CalciteSQLResult { public PageView<JavaObject> pv; public int type = TREE; public TreeResult(PageView<JavaObject> pv) { this.pv = pv; } } } interface OQLResult { int TREE = 1; int TABLE = 2; int TEXT = 3; int getType(); @Data class TableResult implements OQLResult { public int type = TABLE; public List<String> columns; public PageView<Entry> pv; public TableResult(List<String> columns, PageView<Entry> pv) { this.columns = columns; this.pv = pv; } @Data public static class Entry { public int objectId; public List<Object> values; public Entry(int objectId, List<Object> values) { this.objectId = objectId; this.values = values; } } } @Data class TextResult implements OQLResult { public int type = OQLResult.TEXT; public String text; public TextResult(String text) { this.text = text; } } @Data class TreeResult implements OQLResult { public PageView<JavaObject> pv; public int type = TREE; public TreeResult(PageView<JavaObject> pv) { this.pv = pv; } } } interface GCRootPath { List<String> EXCLUDES = Arrays.asList("java.lang.ref.WeakReference:referent", "java.lang.ref.SoftReference:referent"); enum Grouping { FROM_GC_ROOTS, FROM_GC_ROOTS_BY_CLASS, FROM_OBJECTS_BY_CLASS } @Data class MergePathToGCRootsTreeNode { public int objectId; public String className; public int refObjects; public long shallowHeap; public long refShallowHeap; public long retainedHeap; public String suffix; public int objectType; public boolean gCRoot; } @Data class Item { public Node tree; public int count; public boolean hasMore; } @Data @EqualsAndHashCode(callSuper = true) class Node extends JavaObject { public boolean origin; public List<Node> children = new ArrayList<>(); public void addChild(Node child) { children.add(child); } public Node getChild(int objectId) { for (Node child : children) { if (child.getObjectId() == objectId) { return child; } } return null; } } } interface ClassReferrer { interface Type { int NEW = 0; int MIXED = 1; int OLD_FAD = 2; } @Data class Item { public String label; public int objects; public long shallowSize; public int objectId; public int[] objectIds; public int type; } } interface Comparison { @Data class Summary { public int totalSize; public long objects; public long shallowSize; } @Data class Item { public String className; public long objects; public long shallowSize; } } interface TheString { @Data class Item { public int objectId; public String label; public long shallowSize; public long retainedSize; } } interface GCRoot { @Data class Item { public String className; public int objects; public int objectId; public long shallowSize; public long retainedSize; } } interface DirectByteBuffer { @Data class Item { public int objectId; public String label; public int position; public int limit; public int capacity; } @Data class Summary { public int totalSize; public long position; public long limit; public long capacity; } } interface UnreachableObject { @Data class Item { public int objectId; public String className; public int objects; public long shallowSize; } @Data class Summary { public int totalSize; public int objects; public long shallowSize; } } interface Overview { @Data class BigObject { public String label; public int objectId; public double value; public String description; public BigObject(String label, int objectId, double value, String description) { this.label = label; this.objectId = objectId; this.value = value; this.description = description; } } @Data class Details { public String jvmInfo; public int identifierSize; public long creationDate; public int numberOfObjects; public int numberOfGCRoots; public int numberOfClasses; public int numberOfClassLoaders; public long usedHeapSize; public boolean generationInfoAvailable; public Details(String jvmInfo, int identifierSize, long creationDate, int numberOfObjects, int numberOfGCRoots, int numberOfClasses, int numberOfClassLoaders, long usedHeapSize, boolean generationInfoAvailable) { this.jvmInfo = jvmInfo; this.identifierSize = identifierSize; this.creationDate = creationDate; this.numberOfObjects = numberOfObjects; this.numberOfGCRoots = numberOfGCRoots; this.numberOfClasses = numberOfClasses; this.numberOfClassLoaders = numberOfClassLoaders; this.usedHeapSize = usedHeapSize; this.generationInfoAvailable = generationInfoAvailable; } } } interface ClassLoader { @Data class Item { public int objectId; public String prefix; public String label; public boolean classLoader; public boolean hasParent; public int definedClasses; public int numberOfInstances; } @Data class Summary { public int totalSize; public int definedClasses; public int numberOfInstances; } } @Data class LeakReport { public boolean useful; public String info; public String name; public List<Slice> slices; public List<Record> records; @Data public static class Slice { public String label; public int objectId; public double value; public String desc; public Slice(String label, int objectId, double value, String desc) { this.label = label; this.objectId = objectId; this.value = value; this.desc = desc; } } @Data public static class Record { public String name; public String desc; public int index; public List<ShortestPath> paths; } @Data public static class ShortestPath { public String label; public long shallowSize; public long retainedSize; public int objectId; public int objectType; public boolean gCRoot; public List<ShortestPath> children; } } @Data class JavaObject { public static final int CLASS_TYPE = 1; public static final int CLASS_LOADER_TYPE = 2; public static final int ARRAY_TYPE = 3; public static final int NORMAL_TYPE = 4; // FIXME: can we generate these code automatically? public static Map<String, Comparator<JavaObject>> sortTable = new SortTableGenerator<JavaObject>() .add("id", JavaObject::getObjectId) .add("shallowHeap", JavaObject::getShallowSize) .add("retainedHeap", JavaObject::getRetainedSize) .add("label", JavaObject::getLabel) .build(); public int objectId; public String prefix; public String label; public String suffix; public long shallowSize; public long retainedSize; public boolean hasInbound; public boolean hasOutbound; public int objectType; public boolean gCRoot; public static Comparator<JavaObject> sortBy(String field, boolean ascendingOrder) { return ascendingOrder ? sortTable.get(field) : sortTable.get(field).reversed(); } } @Data class InspectorView { public long objectAddress; public String name; public boolean gCRoot; public int objectType; public String classLabel; public boolean classGCRoot; public String superClassName; public String classLoaderLabel; public boolean classLoaderGCRoot; public long shallowSize; public long retainedSize; public String gcRootInfo; } class FieldView { public int fieldType; public String name; public String value; public int objectId; public FieldView(int fieldType, String name, String value) { this.fieldType = fieldType; this.name = name; this.value = value; } public FieldView(int fieldType, String name, String value, int objectId) { this(fieldType, name, value); this.objectId = objectId; } public FieldView() { } } }
2,970
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/hook/src/main/java/org/eclipse/mat/hprof
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/hook/src/main/java/org/eclipse/mat/hprof/ui/HprofPreferences.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.mat.hprof.ui; import org.eclipse.core.runtime.Platform; import org.eclipse.mat.hprof.HprofPlugin; public class HprofPreferences { public static final String STRICTNESS_PREF = "hprofStrictness"; //$NON-NLS-1$ public static final HprofStrictness DEFAULT_STRICTNESS = HprofStrictness.STRICTNESS_STOP; public static final String ADDITIONAL_CLASS_REFERENCES = "hprofAddClassRefs"; //$NON-NLS-1$ public static ThreadLocal<HprofStrictness> TL = new ThreadLocal<>(); public static void setStrictness(HprofStrictness strictness) { TL.set(strictness); } public static HprofStrictness getCurrentStrictness() { HprofStrictness strictness = TL.get(); return strictness != null ? strictness : DEFAULT_STRICTNESS; } public static boolean useAdditionalClassReferences() { return Platform.getPreferencesService().getBoolean(HprofPlugin.getDefault().getBundle().getSymbolicName(), HprofPreferences.ADDITIONAL_CLASS_REFERENCES, false, null); } public enum HprofStrictness { STRICTNESS_STOP("hprofStrictnessStop"), //$NON-NLS-1$ STRICTNESS_WARNING("hprofStrictnessWarning"), //$NON-NLS-1$ STRICTNESS_PERMISSIVE("hprofStrictnessPermissive"); //$NON-NLS-1$ private final String name; HprofStrictness(String name) { this.name = name; } public static HprofStrictness parse(String value) { if (value != null && value.length() > 0) { for (HprofStrictness strictness : values()) { if (strictness.toString().equals(value)) { return strictness; } } } return DEFAULT_STRICTNESS; } @Override public String toString() { return name; } } }
2,971
0
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/hook/src/main/java/org/eclipse/mat/hprof
Create_ds/eclipse-jifa/backend/heap-dump-analyzer/hook/src/main/java/org/eclipse/mat/hprof/extension/HprofPreferencesAccess.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.mat.hprof.extension; import org.eclipse.mat.hprof.ui.HprofPreferences; public final class HprofPreferencesAccess { private static HprofPreferences.HprofStrictness parseStrictness(String strictness) { if (strictness == null) { return HprofPreferences.DEFAULT_STRICTNESS; } switch (strictness) { case "warn": return HprofPreferences.HprofStrictness.STRICTNESS_WARNING; case "permissive": return HprofPreferences.HprofStrictness.STRICTNESS_PERMISSIVE; default: return HprofPreferences.DEFAULT_STRICTNESS; } } public static void setStrictness(String strictness) { HprofPreferences.setStrictness(parseStrictness(strictness)); } }
2,972
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/test/java/org/eclipse/jifa
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/test/java/org/eclipse/jifa/tda/TestSerDesParser.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda; import org.eclipse.jifa.tda.model.Snapshot; import org.eclipse.jifa.tda.parser.ParserException; import org.eclipse.jifa.tda.parser.SerDesParser; import org.junit.Assert; import org.junit.Test; import java.net.URISyntaxException; import static org.eclipse.jifa.common.listener.ProgressListener.NoOpProgressListener; public class TestSerDesParser extends TestBase { @Test public void test() throws ParserException, URISyntaxException { SerDesParser serDesAnalyzer = new SerDesParser(analyzer); Snapshot first = serDesAnalyzer.parse(pathOfResource("jstack_8.log"), NoOpProgressListener); Snapshot second = serDesAnalyzer.parse(pathOfResource("jstack_8.log"), NoOpProgressListener); Assert.assertEquals(first, second); } }
2,973
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/test/java/org/eclipse/jifa
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/test/java/org/eclipse/jifa/tda/TestPool.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda; import org.eclipse.jifa.tda.model.Pool; import org.junit.Assert; import org.junit.Test; public class TestPool extends TestBase { @Test public void test() { Pool<String> sp = new Pool<>(); sp.add("abc"); sp.add("ab" + "c"); sp.add("a" + "bc"); sp.add("cba"); Assert.assertEquals(2, sp.size()); sp.freeze(); } }
2,974
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/test/java/org/eclipse/jifa
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/test/java/org/eclipse/jifa/tda/TestBase.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda; import org.apache.commons.io.IOUtils; import org.eclipse.jifa.tda.model.Snapshot; import org.eclipse.jifa.tda.parser.JStackParser; import org.eclipse.jifa.tda.parser.ParserException; import java.io.FileOutputStream; import java.io.IOException; import java.net.URISyntaxException; import java.nio.charset.Charset; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import static org.eclipse.jifa.common.listener.ProgressListener.NoOpProgressListener; public class TestBase { protected final JStackParser analyzer = new JStackParser(); protected Path pathOfResource(String name) throws URISyntaxException { return Paths.get(this.getClass().getClassLoader().getResource(name).toURI()); } protected Path createTempFile(String content) throws IOException { Path path = Files.createTempFile("test", ".tmp"); path.toFile().deleteOnExit(); IOUtils.write(content, new FileOutputStream(path.toFile()), Charset.defaultCharset()); return path; } protected Snapshot parseString(String content) throws ParserException, IOException { return analyzer.parse(createTempFile(content), NoOpProgressListener); } protected Snapshot parseFile(String name) throws ParserException, URISyntaxException { return analyzer.parse(pathOfResource(name), NoOpProgressListener); } }
2,975
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/test/java/org/eclipse/jifa
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/test/java/org/eclipse/jifa/tda/TestJStackParser.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda; import org.eclipse.jifa.tda.model.Snapshot; import org.eclipse.jifa.tda.parser.ParserException; import org.junit.Assert; import org.junit.Test; import java.io.IOException; import java.net.URISyntaxException; import java.text.ParseException; import java.text.SimpleDateFormat; public class TestJStackParser extends TestBase { @Test public void testTime() throws ParserException, ParseException, IOException { String time = "2021-06-12 23:07:17"; Snapshot snapshot = parseString(time); SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); Assert.assertEquals(sdf.parse(time).getTime(), snapshot.getTimestamp()); time = "2021-06-12 23:07:18\n"; snapshot = parseString(time); Assert.assertEquals(sdf.parse(time).getTime(), snapshot.getTimestamp()); } @Test public void testVersion() throws ParserException, IOException { String version = "Full thread dump OpenJDK 64-Bit Server VM (15.0.1+9-18 mixed mode, sharing):"; Snapshot snapshot = parseString(version); Assert.assertEquals(-1, snapshot.getTimestamp()); Assert.assertEquals("OpenJDK 64-Bit Server VM (15.0.1+9-18 mixed mode, sharing)", snapshot.getVmInfo()); } @Test public void testJDK8Log() throws ParserException, URISyntaxException { Snapshot snapshot = parseFile("jstack_8.log"); Assert.assertTrue(snapshot.getErrors().isEmpty()); } @Test public void testJDK11Log() throws ParserException, URISyntaxException { Snapshot snapshot = parseFile("jstack_11_with_deadlocks.log"); Assert.assertTrue(snapshot.getErrors().isEmpty()); } }
2,976
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/test/java/org/eclipse/jifa
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/test/java/org/eclipse/jifa/tda/TestAnalyzer.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda; import org.eclipse.jifa.common.listener.DefaultProgressListener; import org.eclipse.jifa.common.request.PagingRequest; import org.eclipse.jifa.common.vo.PageView; import org.eclipse.jifa.tda.enums.ThreadType; import org.eclipse.jifa.tda.vo.Content; import org.eclipse.jifa.tda.vo.Overview; import org.eclipse.jifa.tda.vo.VFrame; import org.eclipse.jifa.tda.vo.VMonitor; import org.eclipse.jifa.tda.vo.VThread; import org.junit.Assert; import org.junit.Test; public class TestAnalyzer extends TestBase { @Test public void test() throws Exception { ThreadDumpAnalyzer tda = new ThreadDumpAnalyzer(pathOfResource("jstack_8.log"), new DefaultProgressListener()); Overview o1 = tda.overview(); Overview o2 = tda.overview(); Assert.assertEquals(o1, o2); Assert.assertEquals(o1.hashCode(), o2.hashCode()); PageView<VThread> threads = tda.threads("main", ThreadType.JAVA, new PagingRequest(1, 1)); Assert.assertEquals(1, threads.getTotalSize()); PageView<VFrame> frames = tda.callSiteTree(0, new PagingRequest(1, 16)); Assert.assertTrue(frames.getTotalSize() > 0); Assert.assertNotEquals(frames.getData().get(0), frames.getData().get(1)); PageView<VMonitor> monitors = tda.monitors(new PagingRequest(1, 8)); Assert.assertTrue(monitors.getTotalSize() > 0); Content line2 = tda.content(2, 1); Assert.assertEquals("Full thread dump OpenJDK 64-Bit Server VM (18-internal+0-adhoc.denghuiddh.my-jdk mixed " + "mode, sharing):", line2.getContent().get(0)); } }
2,977
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/ThreadDumpAnalyzer.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda; import org.apache.commons.lang.StringUtils; import org.eclipse.jifa.common.cache.Cacheable; import org.eclipse.jifa.common.cache.ProxyBuilder; import org.eclipse.jifa.common.listener.ProgressListener; import org.eclipse.jifa.common.request.PagingRequest; import org.eclipse.jifa.common.util.CollectionUtil; import org.eclipse.jifa.common.util.PageViewBuilder; import org.eclipse.jifa.common.vo.PageView; import org.eclipse.jifa.tda.enums.MonitorState; import org.eclipse.jifa.tda.enums.ThreadType; import org.eclipse.jifa.tda.model.CallSiteTree; import org.eclipse.jifa.tda.model.Frame; import org.eclipse.jifa.tda.model.IdentityPool; import org.eclipse.jifa.tda.model.JavaThread; import org.eclipse.jifa.tda.model.Monitor; import org.eclipse.jifa.tda.model.RawMonitor; import org.eclipse.jifa.tda.model.Snapshot; import org.eclipse.jifa.tda.model.Thread; import org.eclipse.jifa.tda.parser.ParserFactory; import org.eclipse.jifa.tda.vo.Content; import org.eclipse.jifa.tda.vo.Overview; import org.eclipse.jifa.tda.vo.VFrame; import org.eclipse.jifa.tda.vo.VMonitor; import org.eclipse.jifa.tda.vo.VThread; import java.io.FileReader; import java.io.IOException; import java.io.LineNumberReader; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; /** * Thread dump analyzer */ public class ThreadDumpAnalyzer { private final Snapshot snapshot; ThreadDumpAnalyzer(Path path, ProgressListener listener) { snapshot = ParserFactory.buildParser(path).parse(path, listener); } /** * build a parser for a thread dump * * @param path the path of thread dump * @param listener progress listener * @return analyzer */ public static ThreadDumpAnalyzer build(Path path, ProgressListener listener) { return ProxyBuilder.build(ThreadDumpAnalyzer.class, new Class[]{Path.class, ProgressListener.class}, new Object[]{path, listener}); } private void computeThreadState(Overview o, Thread thread) { ThreadType type = thread.getType(); switch (type) { case JAVA: JavaThread jt = ((JavaThread) thread); o.getJavaThreadStat().inc(jt.getJavaThreadState()); o.getJavaThreadStat().inc(jt.getOsThreadState()); if (jt.isDaemon()) { o.getJavaThreadStat().incDaemon(); } break; case JIT: o.getJitThreadStat().inc(thread.getOsThreadState()); break; case GC: o.getGcThreadStat().inc(thread.getOsThreadState()); break; case VM: o.getOtherThreadStat().inc(thread.getOsThreadState()); break; } o.getThreadStat().inc(thread.getOsThreadState()); } /** * @return the overview of the thread dump */ @Cacheable public Overview overview() { Overview o = new Overview(); CollectionUtil.forEach(t -> computeThreadState(o, t), snapshot.getJavaThreads(), snapshot.getNonJavaThreads()); snapshot.getThreadGroup().forEach( (p, l) -> { for (Thread t : l) { o.getThreadGroupStat().computeIfAbsent(p, i -> new Overview.ThreadStat()).inc(t.getOsThreadState()); } } ); o.setTimestamp(snapshot.getTimestamp()); o.setVmInfo(snapshot.getVmInfo()); o.setJniRefs(snapshot.getJniRefs()); o.setJniWeakRefs(snapshot.getJniWeakRefs()); if (snapshot.getDeadLockThreads() != null) { o.setDeadLockCount(snapshot.getDeadLockThreads().size()); } o.setErrorCount(snapshot.getErrors().size()); return o; } /** * @return the call site tree by parent id */ public PageView<VFrame> callSiteTree(int parentId, PagingRequest paging) { CallSiteTree tree = snapshot.getCallSiteTree(); if (parentId < 0 || parentId >= tree.getId2Node().length) { throw new IllegalArgumentException("Illegal parent id: " + parentId); } CallSiteTree.Node node = tree.getId2Node()[parentId]; List<CallSiteTree.Node> children = node.getChildren() != null ? node.getChildren() : Collections.emptyList(); return PageViewBuilder.build(children, paging, n -> { VFrame vFrame = new VFrame(); vFrame.setId(n.getId()); vFrame.setWeight(n.getWeight()); vFrame.setEnd(n.getChildren() == null); Frame frame = n.getFrame(); vFrame.setClazz(frame.getClazz()); vFrame.setMethod(frame.getMethod()); vFrame.setModule(frame.getModule()); vFrame.setSourceType(frame.getSourceType()); vFrame.setSource(frame.getSource()); vFrame.setLine(frame.getLine()); if (frame.getMonitors() != null) { List<VMonitor> vMonitors = new ArrayList<>(); for (Monitor monitor : frame.getMonitors()) { String clazz = null; RawMonitor rm = monitor.getRawMonitor(); clazz = rm.getClazz(); vMonitors.add(new VMonitor(rm.getId(), rm.getAddress(), rm.isClassInstance(), clazz, monitor.getState())); } vFrame.setMonitors(vMonitors); } return vFrame; }); } private PageView<VThread> buildVThreadPageView(List<Thread> threads, PagingRequest paging) { return PageViewBuilder.build(threads, paging, thread -> { VThread vThread = new VThread(); vThread.setId(thread.getId()); vThread.setName(thread.getName()); return vThread; }); } /** * @param name the thread name * @param type the thread type * @param paging paging request * @return the threads filtered by name and type */ public PageView<VThread> threads(String name, ThreadType type, PagingRequest paging) { List<Thread> threads = new ArrayList<>(); CollectionUtil.forEach(t -> { if (type != null && t.getType() != type) { return; } if (StringUtils.isNotBlank(name) && !t.getName().contains(name)) { return; } threads.add(t); }, snapshot.getJavaThreads(), snapshot.getNonJavaThreads()); return buildVThreadPageView(threads, paging); } /** * @param groupName the thread group name * @param paging paging request * @return the threads filtered by group name and type */ public PageView<VThread> threadsOfGroup(String groupName, PagingRequest paging) { List<Thread> threads = snapshot.getThreadGroup().getOrDefault(groupName, Collections.emptyList()); return buildVThreadPageView(threads, paging); } public List<String> rawContentOfThread(int id) throws IOException { Thread thread = snapshot.getThreadMap().get(id); if (thread == null) { throw new IllegalArgumentException("Thread id is illegal: " + id); } String path = snapshot.getPath(); int start = thread.getLineStart(); int end = thread.getLineEnd(); List<String> content = new ArrayList<>(); try (LineNumberReader lnr = new LineNumberReader(new FileReader(path))) { for (int i = 1; i < start; i++) { lnr.readLine(); } for (int i = start; i <= end; i++) { content.add(lnr.readLine()); } } return content; } /** * @param lineNo start line number * @param lineLimit line count * @return the raw content * @throws IOException */ public Content content(int lineNo, int lineLimit) throws IOException { String path = snapshot.getPath(); int end = lineNo + lineLimit - 1; List<String> content = new ArrayList<>(); boolean reachEnd; try (LineNumberReader lnr = new LineNumberReader(new FileReader(path))) { for (int i = 1; i < lineNo; i++) { String line = lnr.readLine(); if (line == null) { break; } } for (int i = lineNo; i <= end; i++) { String line = lnr.readLine(); if (line == null) { break; } content.add(line); } String line = lnr.readLine(); reachEnd = line == null; } return new Content(content, reachEnd); } /** * @param paging paging request * @return the monitors */ public PageView<VMonitor> monitors(PagingRequest paging) { IdentityPool<RawMonitor> monitors = snapshot.getRawMonitors(); return PageViewBuilder.build(monitors.objects(), paging, m -> new VMonitor(m.getId(), m.getAddress(), m.isClassInstance(), m.getClazz())); } /** * @param id monitor id * @param state monitor state * @param paging paging request * @return the threads by monitor id and state */ public PageView<VThread> threadsByMonitor(int id, MonitorState state, PagingRequest paging) { Map<MonitorState, List<Thread>> map = snapshot.getMonitorThreads().get(id); if (map == null) { throw new IllegalArgumentException("Illegal monitor id: " + id); } return buildVThreadPageView(map.getOrDefault(state, Collections.emptyList()), paging); } /** * @param id monitor id * @return the <state, count> map by monitor id */ public Map<MonitorState, Integer> threadCountsByMonitor(int id) { Map<MonitorState, List<Thread>> map = snapshot.getMonitorThreads().get(id); if (map == null) { throw new IllegalArgumentException("Illegal monitor id: " + id); } Map<MonitorState, Integer> counts = new HashMap<>(); map.forEach((s, l) -> counts.put(s, l.size())); return counts; } }
2,978
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/vo/VMonitor.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.vo; import com.google.gson.annotations.SerializedName; import lombok.AllArgsConstructor; import lombok.Data; import org.eclipse.jifa.tda.enums.MonitorState; @Data @AllArgsConstructor public class VMonitor { private int id; private long address; private boolean classInstance; @SerializedName("class") private String clazz; private MonitorState state; public VMonitor(int id, long address, boolean classInstance, String clazz) { this.id = id; this.address = address; this.classInstance = classInstance; this.clazz = clazz; } }
2,979
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/vo/VThread.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.vo; import lombok.Data; @Data public class VThread { private int id; private String name; }
2,980
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/vo/VFrame.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.vo; import com.google.gson.annotations.SerializedName; import lombok.Data; import org.eclipse.jifa.tda.enums.SourceType; import java.util.List; @Data public class VFrame { private int id; @SerializedName("class") private String clazz; private String method; private String module; private SourceType sourceType; private String source; private int line; private int weight; private VMonitor wait; private List<VMonitor> monitors; private boolean end; }
2,981
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/vo/Overview.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.vo; import lombok.Data; import lombok.EqualsAndHashCode; import org.eclipse.jifa.tda.enums.JavaThreadState; import org.eclipse.jifa.tda.enums.OSTreadState; import java.util.HashMap; import java.util.Map; @Data public class Overview { private long timestamp; private String vmInfo; private int jniRefs; private int jniWeakRefs; private int deadLockCount; private int errorCount; private ThreadStat threadStat = new ThreadStat(); private JavaThreadStat javaThreadStat = new JavaThreadStat(); private ThreadStat jitThreadStat = new ThreadStat(); private ThreadStat gcThreadStat = new ThreadStat(); private ThreadStat otherThreadStat = new ThreadStat(); private Map<String, ThreadStat> threadGroupStat = new HashMap<>(); private final OSTreadState[] states = OSTreadState.values(); private final JavaThreadState[] javaStates = JavaThreadState.values(); @Data public static class ThreadStat { private final int[] counts = new int[OSTreadState.COUNT]; public void inc(OSTreadState state) { counts[state.ordinal()]++; } } @Data @EqualsAndHashCode(callSuper = true) public static class JavaThreadStat extends ThreadStat { private final int[] javaCounts = new int[JavaThreadState.COUNT]; private int daemonCount; public void inc(JavaThreadState state) { javaCounts[state.ordinal()]++; } public void incDaemon() { daemonCount++; } } }
2,982
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/vo/Content.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.vo; import lombok.AllArgsConstructor; import lombok.Data; import java.util.List; @Data @AllArgsConstructor public class Content { private List<String> content; private boolean end; }
2,983
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/util/Converter.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.util; public class Converter { public static double str2TimeMillis(String str) { if (str == null) { return -1; } int length = str.length(); if (str.endsWith("ms")) { return Double.parseDouble(str.substring(0, length - 2)); } else if (str.endsWith("s")) { return Double.parseDouble(str.substring(0, length - 1)) * 1000; } throw new IllegalArgumentException(str); } }
2,984
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/enums/ThreadType.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.enums; public enum ThreadType { JAVA, // NOTE: Actually a JIT thread is also a JAVA thread in hotspot implementation JIT, GC, VM, }
2,985
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/enums/OSTreadState.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.enums; public enum OSTreadState { ALLOCATED("allocated"), INITIALIZED("initialized"), RUNNABLE("runnable"), MONITOR_WAIT("waiting for monitor entry"), COND_VAR_WAIT("waiting on condition"), OBJECT_WAIT("in Object.wait()"), BREAK_POINTED("at breakpoint"), SLEEPING("sleeping"), ZOMBIE("zombie"), UNKNOWN("unknown state"); public static final int COUNT = OSTreadState.values().length; private final String description; OSTreadState(String description) { this.description = description; } public static OSTreadState getByDescription(String s) { if (s == null) { throw new IllegalArgumentException(); } for (OSTreadState state : OSTreadState.values()) { if (s.startsWith(state.description)) { return state; } } return UNKNOWN; } }
2,986
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/enums/JavaThreadState.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.enums; public enum JavaThreadState { NEW("NEW"), RUNNABLE("RUNNABLE"), SLEEPING("TIMED_WAITING (sleeping)"), IN_OBJECT_WAIT("WAITING (on object monitor)"), IN_OBJECT_WAIT_TIMED("TIMED_WAITING (on object monitor)"), PARKED("WAITING (parking)"), PARKED_TIMED("TIMED_WAITING (parking)"), BLOCKED_ON_MONITOR_ENTER("BLOCKED (on object monitor)"), TERMINATED("TERMINATED"), UNKNOWN("UNKNOWN"); public static final int COUNT = JavaThreadState.values().length; private final String description; JavaThreadState(String description) { this.description = description; } public static JavaThreadState getByDescription(String s) { if (s == null) { throw new IllegalArgumentException(); } for (JavaThreadState state : JavaThreadState.values()) { if (s.startsWith(state.description)) { return state; } } return UNKNOWN; } }
2,987
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/enums/MonitorState.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.enums; public enum MonitorState { WAITING_ON("- waiting on"), WAITING_TO_RE_LOCK("- waiting to re-lock"), WAITING_ON_NO_OBJECT_REFERENCE_AVAILABLE("- waiting on"), PARKING("- parking"), WAITING_ON_CLASS_INITIALIZATION("- waiting on the Class initialization monitor"), LOCKED("- locked"), WAITING_TO_LOCK("- waiting to lock"), ELIMINATED_SCALAR_REPLACED("- eliminated <owner is scalar replaced>"), ELIMINATED("- eliminated"); private final String prefix; MonitorState(String prefix) { this.prefix = prefix; } public String prefix() { return prefix; } }
2,988
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/enums/SourceType.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.enums; public enum SourceType { REDEFINED, NATIVE_METHOD, SOURCE_FILE, SOURCE_FILE_WITH_LINE_NUMBER, UNKNOWN_SOURCE; public static SourceType judge(String source) { if (source.contains(":")) { return SOURCE_FILE_WITH_LINE_NUMBER; } if (source.endsWith(".java")) { return SOURCE_FILE; } if (source.equals("Redefined")) { return REDEFINED; } if (source.equals("Native Method")) { return NATIVE_METHOD; } if (source.equals("Unknown Source")) { return UNKNOWN_SOURCE; } return SOURCE_FILE; } }
2,989
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/parser/JStackParser.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.parser; import org.apache.commons.lang.StringUtils; import org.eclipse.jifa.common.listener.ProgressListener; import org.eclipse.jifa.tda.enums.JavaThreadState; import org.eclipse.jifa.tda.enums.MonitorState; import org.eclipse.jifa.tda.enums.OSTreadState; import org.eclipse.jifa.tda.enums.SourceType; import org.eclipse.jifa.tda.enums.ThreadType; import org.eclipse.jifa.tda.model.ConcurrentLock; import org.eclipse.jifa.tda.model.Frame; import org.eclipse.jifa.tda.model.JavaThread; import org.eclipse.jifa.tda.model.Monitor; import org.eclipse.jifa.tda.model.Pool; import org.eclipse.jifa.tda.model.RawMonitor; import org.eclipse.jifa.tda.model.Snapshot; import org.eclipse.jifa.tda.model.Thread; import org.eclipse.jifa.tda.model.Trace; import org.eclipse.jifa.tda.util.Converter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.lang.reflect.Field; import java.nio.file.Path; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.concurrent.BlockingDeque; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Matcher; import java.util.regex.Pattern; public class JStackParser implements Parser { private static final Logger LOGGER = LoggerFactory.getLogger(JStackParser.class); private static final BlockingDeque<ParserImpl.RawJavaThread> QUEUE; private static final ExecutorService ES; static { QUEUE = new LinkedBlockingDeque<>(128); int count = Math.max(2, Runtime.getRuntime().availableProcessors()); ES = Executors.newFixedThreadPool(count); assert count >= 2; for (int i = 0; i < count; i++) { ES.submit(() -> { //noinspection InfiniteLoopStatement while (true) { try { QUEUE.take().parse(); } catch (Throwable t) { LOGGER.error("Parse one thread error", t); } } }); } } @Override public Snapshot parse(Path path, ProgressListener listener) { try { Snapshot snapshot = new ParserImpl(path, listener).parse(); snapshot.post(); return snapshot; } catch (Throwable t) { if (t instanceof ParserException) { throw (ParserException) t; } throw new ParserException(t); } } static final class PATTERNS { static String TIME_FORMAT; static Pattern TIME; static Pattern VERSION; static String SMR_HEAD; static Pattern JAVA_THREAD; static Pattern JAVA_STATE; static Pattern JAVA_FRAME; static Pattern NO_JAVA_THREAD; static Pattern JNI_GLOBAL_REFS; static Pattern WAITING_ON; static Pattern WAITING_TO_RE_LOCK; static Pattern PARKING; static Pattern WAITING_ON_CLASS_INITIALIZATION; static Pattern LOCKED; static Pattern WAITING_TO_LOCK; static Pattern ELIMINATED_SCALAR_REPLACED; static Pattern ELIMINATED; static String LOCKED_OWNABLE_SYNCHRONIZERS; static String NONE; static Pattern LOCKED_SYNCHRONIZER; static String DEAD_LOCK_HEAD; static Pattern DEAD_LOCK_THREAD; static Pattern DEAD_LOCK_WAITING_TO_LOCK_MONITOR; static Pattern DEAD_LOCK_WAITING_TO_LOCK_SYNCHRONIZER; static Pattern DEAD_HELD_INFO; static String DEAD_LOCK_STACK_HEAD; static Pattern DEAD_FOUND; static { try { String fn = "jstack_pattern.properties"; Properties ps = new Properties(); ps.load(PATTERNS.class.getClassLoader().getResourceAsStream(fn)); Field[] fields = PATTERNS.class.getDeclaredFields(); for (Field field : fields) { String value = (String) ps.get(field.getName()); if (value == null) { throw new ParserException(field.getName() + " not found in " + fn); } Class<?> type = field.getType(); if (type == Pattern.class) { field.set(null, Pattern.compile((String) ps.get(field.getName()))); } else if (type == String.class) { field.set(null, value); } } } catch (Throwable t) { if (t instanceof ParserException) { throw (ParserException) t; } throw new ParserException(t); } } static Pattern patternOf(ParserImpl.Element element) { switch (element) { case TIME: return TIME; case VERSION: return VERSION; case JNI_GLOBAL_REFS: return JNI_GLOBAL_REFS; case NON_JAVA_THREAD: return NO_JAVA_THREAD; default: throw new ParserException("Should not reach here"); } } } private static class ParserImpl { private final Input input; private final AtomicInteger processing; private final List<Throwable> errors; private final Snapshot snapshot; private final ProgressListener listener; ParserImpl(Path path, ProgressListener listener) throws IOException { this.input = new Input(path); this.listener = listener; snapshot = new Snapshot(); snapshot.setPath(path.toAbsolutePath().toString()); processing = new AtomicInteger(0); errors = new ArrayList<>(); step(); } Snapshot parse() throws Exception { listener.beginTask("Parsing thread dump", 100); listener.subTask("Parsing timestamp and version"); parseTimeStamp(); parseVersion(); listener.worked(1); skipSMR(); // concurrent listener.subTask("Parsing threads"); parseThreads(); listener.subTask("Parsing JNI handles"); parseJNIGlobalHandles(); listener.worked(1); listener.subTask("Parsing JNI deadLocks"); parseDeadLocks(); listener.worked(8); // Wait for all Java threads to complete synchronized (this) { while (processing.get() != 0) { this.wait(); } } listener.worked(90); return snapshot; } void step() throws IOException { String line; while ((line = input.readLine()) != null) { if (!StringUtils.isBlank(line)) { return; } } } void skipSMR() throws IOException { if (PATTERNS.SMR_HEAD.equals(input.currentLine())) { //noinspection StatementWithEmptyBody while (StringUtils.isNotBlank(input.readLine())) ; } } void parseByElementPattern(Element element, Action action, boolean stepOnFailed) throws Exception { String line = input.currentLine(); if (line == null) { LOGGER.warn("Skip parsing {} caused by EOF", element.description); return; } Matcher matcher = PATTERNS.patternOf(element).matcher(line); if (matcher.matches()) { try { action.onMatched(matcher); } finally { step(); } } else { LOGGER.warn("Parse {} failed: {}", element.description, line); if (stepOnFailed) { step(); } } } void parseTimeStamp() throws Exception { parseByElementPattern(Element.TIME, m -> { long ts = new SimpleDateFormat(PATTERNS.TIME_FORMAT).parse(input.currentLine()).getTime(); snapshot.setTimestamp(ts); }, false); } void parseVersion() throws Exception { parseByElementPattern(Element.VERSION, m -> { snapshot.setVmInfo(m.group("info")); }, false); } void parseJNIGlobalHandles() throws Exception { parseByElementPattern(Element.JNI_GLOBAL_REFS, m -> { String all = m.group("all"); if (all != null) { snapshot.setJniRefs(Integer.parseInt(all)); } else { int strong = Integer.parseInt(m.group("strong")); int weak = Integer.parseInt(m.group("weak")); snapshot.setJniRefs(strong + weak); snapshot.setJniWeakRefs(weak); } }, false); } void parseDeadLocks() throws Exception { String line = input.currentLine(); if (line == null) { return; } int dlCount = 0; while (line.equals(PATTERNS.DEAD_LOCK_HEAD)) { dlCount++; if (snapshot.getDeadLockThreads() == null) { snapshot.setDeadLockThreads(new ArrayList<>()); } List<JavaThread> threads = new ArrayList<>(); // skip ==== input.readLine(); step(); line = input.currentLine(); Matcher matcher; int tCount = 0; do { matcher = PATTERNS.DEAD_LOCK_THREAD.matcher(line); if (!matcher.matches()) { throw new ParserException("Illegal dead lock thread name"); } JavaThread thread = new JavaThread(); String name = matcher.group("name"); threads.add(thread); tCount++; thread.setName(snapshot.getSymbols().add(name)); thread.setType(ThreadType.JAVA); // wait and held info input.readLine(); input.readLine(); line = input.readLine(); } while (line.startsWith("\"")); step(); line = input.currentLine(); if (!line.equals(PATTERNS.DEAD_LOCK_STACK_HEAD)) { throw new ParserException("Illegal dead lock stack head"); } // skip ==== input.readLine(); line = input.readLine(); for (int i = 0; i < tCount; i++) { matcher = PATTERNS.DEAD_LOCK_THREAD.matcher(line); if (!matcher.matches()) { throw new ParserException("Illegal dead lock thread name"); } List<String> stackTraces = new ArrayList<>(); while (true) { line = input.readLine(); if (line != null && !line.startsWith("\"") && !line.isBlank() && !line.startsWith("Found")) { stackTraces.add(line); } else { Trace trace = parseStackTrace(threads.get(i), true, stackTraces); threads.get(i).setTrace(snapshot.getTraces().add(trace)); break; } } } snapshot.getDeadLockThreads().add(threads); } if (dlCount > 0) { step(); line = input.currentLine(); Matcher matcher = PATTERNS.DEAD_FOUND.matcher(line); if (!matcher.matches()) { throw new ParserException("Missing Dead lock found line"); } if (Integer.parseInt(matcher.group("count")) != dlCount) { throw new ParserException("Dead lock count mismatched"); } } } void enroll(RawJavaThread tp) { processing.incrementAndGet(); try { QUEUE.put(tp); } catch (Throwable t) { processing.decrementAndGet(); } } ThreadType typeOf(String name, boolean javaThread) { if (javaThread) { if (name.startsWith("C1 CompilerThread") || name.startsWith("C2 CompilerThread")) { return ThreadType.JIT; } return ThreadType.JAVA; } if (name.contains("GC") || name.contains("G1") || name.contains("CMS") || name.contains("Concurrent Mark-Sweep")) { return ThreadType.GC; } return ThreadType.VM; } void fillThread(Thread thread, Matcher m) { Pool<String> symbols = snapshot.getSymbols(); String name = m.group("name"); thread.setName(symbols.add(name)); thread.setType(typeOf(name, thread instanceof JavaThread)); thread.setOsPriority(Integer.parseInt(m.group("osPriority"))); thread.setCpu(Converter.str2TimeMillis(m.group("cpu"))); thread.setElapsed(Converter.str2TimeMillis(m.group("elapsed"))); thread.setTid(Long.decode(m.group("tid"))); thread.setNid(Long.decode(m.group("nid"))); thread.setOsThreadState(OSTreadState.getByDescription(m.group("state").trim())); } void parseThreads() throws Exception { String line = input.currentLine(); do { while (StringUtils.isBlank(line)) { line = input.readLine(); if (line == null) { return; } } if (line.startsWith("\"")) { if (!line.endsWith("]")) { // not a java thread break; } RawJavaThread rjt = new RawJavaThread(); rjt.contents.add(line); rjt.lineStart = input.lineNumber(); while ((line = input.readLine()) != null) { if (StringUtils.isBlank(line)) { continue; } if (line.startsWith("\"")) { break; } if (line.startsWith(MonitorState.ELIMINATED_SCALAR_REPLACED.prefix())) { // this problem is fixed by JDK-8268780(JDK 18) int index = line.indexOf(")"); if (index > 0 && line.length() > index + 1) { rjt.contents.add(line.substring(0, index + 1)); rjt.contents.add(line.substring(index + 1).trim()); continue; } } rjt.contents.add(line); rjt.lineEnd = input.lineNumber(); } enroll(rjt); } else { break; } } while (true); // other threads do { while (StringUtils.isBlank(line)) { line = input.readLine(); if (line == null) { return; } } if (line.startsWith("\"")) { parseByElementPattern(Element.NON_JAVA_THREAD, m -> { Thread thread = new Thread(); fillThread(thread, m); thread.setLineStart(input.lineNumber()); thread.setLineEnd(input.lineNumber()); snapshot.getNonJavaThreads().add(thread); }, true); } else { break; } // step in parseByElementPattern } while ((line = input.currentLine()) != null); } void done() { int remain = processing.decrementAndGet(); if (remain == 0) { synchronized (this) { this.notify(); } } } void recordError(Throwable t) { synchronized (this) { errors.add(t); t.printStackTrace(); } } void onParseRawThreadError(Throwable t) { recordError(t); int remain = processing.decrementAndGet(); if (remain == 0) { synchronized (this) { this.notify(); } } } void checkLastFrameNotNull(Frame last, String line) { if (last == null) { throw new ParserException("Last frame doesn't exist: " + line); } } Monitor assembleMonitor(Thread thread, boolean needMap, MonitorState state, long address, boolean isClass, String clazz) { RawMonitor rm = new RawMonitor(); rm.setAddress(address); rm.setClassInstance(isClass); rm.setClazz(clazz); rm = snapshot.getRawMonitors().add(rm); Monitor monitor = new Monitor(); monitor.setRawMonitor(rm); monitor.setState(state); monitor = snapshot.getMonitors().add(monitor); if (needMap) { synchronized (this) { boolean shouldMap = true; if (state == MonitorState.LOCKED) { Map<MonitorState, List<Thread>> map = snapshot.getMonitorThreads().get(rm.getId()); if (map != null) { for (Map.Entry<MonitorState, List<Thread>> entry : map.entrySet()) { if (entry.getKey() != MonitorState.LOCKED && entry.getValue().contains(thread)) { shouldMap = false; break; } } } } if (shouldMap) { snapshot.getMonitorThreads() .computeIfAbsent(rm.getId(), i -> new HashMap<>()) .computeIfAbsent(state, s -> new ArrayList<>()) .add(thread); } } } return monitor; } Trace parseStackTrace(Thread thread, boolean deadLockThread, List<String> stackTraces) { Pool<String> symbolPool = snapshot.getSymbols(); Pool<Frame> framePool = snapshot.getFrames(); Pool<Monitor> monitorPool = snapshot.getMonitors(); Trace trace = new Trace(); List<Frame> frames = new ArrayList<>(); List<Monitor> monitors = new ArrayList<>(); Frame last = null; for (int i = 0; i < stackTraces.size(); i++) { Matcher m; String line = stackTraces.get(i); if (line.startsWith("at")) { m = PATTERNS.JAVA_FRAME.matcher(line); if (!m.matches()) { throw new ParserException("Illegal java frame: " + line); } if (!monitors.isEmpty()) { last.setMonitors(monitors.toArray(new Monitor[0])); monitors.clear(); } if (last != null) { // add frame here since all related information has been processed frames.add(framePool.add(last)); } last = new Frame(); last.setClazz(symbolPool.add(m.group("class"))); last.setMethod(symbolPool.add(m.group("method"))); String module = m.group("module"); if (module != null) { // strip '/' last.setModule(symbolPool.add(module.substring(0, module.length() - 1))); } String source = m.group("source"); SourceType sourceType = SourceType.judge(source); last.setSourceType(sourceType); if (sourceType == SourceType.SOURCE_FILE_WITH_LINE_NUMBER) { int index = source.indexOf(":"); last.setLine(Integer.parseInt(source.substring(index + 1))); source = source.substring(0, index); last.setSource(symbolPool.add(source)); } else if (sourceType == SourceType.SOURCE_FILE) { last.setSource(symbolPool.add(source)); } } else { if (line.startsWith(MonitorState.PARKING.prefix())) { assert last != null; m = PATTERNS.PARKING.matcher(line); if (!m.matches()) { throw new ParserException("Illegal parking line: " + line); } monitors.add(assembleMonitor(thread, !deadLockThread, MonitorState.PARKING, Long.decode(m.group("address")), false, symbolPool.add(m.group("class")))); } else if (line.startsWith(MonitorState.WAITING_ON.prefix())) { assert last != null; if (line.contains("<no object reference available>")) { monitors .add(assembleMonitor(thread, !deadLockThread, MonitorState.WAITING_ON_NO_OBJECT_REFERENCE_AVAILABLE, -1, false, null)); } else { m = PATTERNS.WAITING_ON.matcher(line); if (!m.matches()) { throw new ParserException("Illegal waiting line: " + line); } monitors .add(assembleMonitor(thread, !deadLockThread, MonitorState.WAITING_ON, Long.decode(m.group("address")), m.group("isClass") != null, symbolPool.add(m.group("class")))); } } else if (line.startsWith(MonitorState.WAITING_TO_RE_LOCK.prefix())) { assert last != null; m = PATTERNS.WAITING_TO_RE_LOCK.matcher(line); if (!m.matches()) { throw new ParserException("Illegal waiting to re-lock line: " + line); } monitors .add(assembleMonitor(thread, !deadLockThread, MonitorState.WAITING_TO_RE_LOCK, Long.decode(m.group("address")), m.group("isClass") != null, symbolPool.add(m.group("class")))); } else if (line.startsWith(MonitorState.WAITING_ON_CLASS_INITIALIZATION.prefix())) { assert last != null; m = PATTERNS.WAITING_ON_CLASS_INITIALIZATION.matcher(line); if (!m.matches()) { throw new ParserException( "Illegal waiting on class initialization line: " + line); } monitors .add(assembleMonitor(thread, !deadLockThread, MonitorState.WAITING_ON_CLASS_INITIALIZATION, -1, true, symbolPool.add(m.group("class")))); } else if (line.startsWith(MonitorState.LOCKED.prefix())) { checkLastFrameNotNull(last, line); m = PATTERNS.LOCKED.matcher(line); if (!m.matches()) { throw new ParserException("Illegal locked line: " + line); } monitors.add(assembleMonitor(thread, !deadLockThread, MonitorState.LOCKED, Long.decode(m.group("address")), m.group("isClass") != null, symbolPool.add(m.group("class")))); } else if (line.startsWith(MonitorState.WAITING_TO_LOCK.prefix())) { checkLastFrameNotNull(last, line); m = PATTERNS.WAITING_TO_LOCK.matcher(line); if (!m.matches()) { throw new ParserException("Illegal waiting to lock line: " + line); } monitors.add(assembleMonitor(thread, !deadLockThread, MonitorState.WAITING_TO_LOCK, Long.decode(m.group("address")), m.group("isClass") != null, symbolPool.add(m.group("class")))); } else if (line.startsWith(MonitorState.ELIMINATED.prefix())) { checkLastFrameNotNull(last, line); m = PATTERNS.ELIMINATED.matcher(line); if (!m.matches()) { throw new ParserException("Illegal eliminated lock line: " + line); } monitors.add(assembleMonitor(thread, !deadLockThread, MonitorState.ELIMINATED, Long.decode(m.group("address")), m.group("isClass") != null, symbolPool.add(m.group("class")))); } else if (line.startsWith(MonitorState.ELIMINATED_SCALAR_REPLACED.prefix())) { checkLastFrameNotNull(last, line); m = PATTERNS.ELIMINATED_SCALAR_REPLACED.matcher(line); if (!m.matches()) { throw new ParserException( "Illegal eliminated(scalar replaced) lock line: " + line); } monitors.add(assembleMonitor(thread, !deadLockThread, MonitorState.ELIMINATED_SCALAR_REPLACED, -1, false, symbolPool.add(m.group("class")))); } else if (line.equals(PATTERNS.LOCKED_OWNABLE_SYNCHRONIZERS)) { // concurrent locks int lockIndex = i + 1; line = stackTraces.get(lockIndex); if (PATTERNS.NONE.equals(line)) { if (lockIndex + 1 != stackTraces.size()) { throw new ParserException("Should not have content after: " + line); } } else { Pool<ConcurrentLock> concurrentPool = snapshot.getConcurrentLocks(); List<ConcurrentLock> concurrentLocks = new ArrayList<>(); do { m = PATTERNS.LOCKED_SYNCHRONIZER.matcher(line); if (!m.matches()) { throw new ParserException("Illegal lock synchronizer line: " + line); } ConcurrentLock concurrentLock = new ConcurrentLock(); concurrentLock.setAddress(Long.decode(m.group("address"))); concurrentLock.setClazz(symbolPool.add(m.group("class"))); concurrentLocks.add(concurrentPool.add(concurrentLock)); if (++lockIndex < stackTraces.size()) { line = stackTraces.get(lockIndex); } else { break; } } while (true); trace.setConcurrentLocks( concurrentLocks.toArray(new ConcurrentLock[0])); } break; } else { throw new ParserException("Unrecognized line: " + line); } } } if (last != null) { if (!monitors.isEmpty()) { last.setMonitors(monitors.toArray(new Monitor[0])); } frames.add(framePool.add(last)); } trace.setFrames(frames.toArray(new Frame[0])); return trace; } void parse(RawJavaThread rjt) { try { List<String> contents = rjt.contents; assert contents.size() >= 2; String line = contents.get(0); Matcher m = PATTERNS.JAVA_THREAD.matcher(contents.get(0)); if (!m.matches()) { throw new ParserException("Illegal java thread: " + line); } JavaThread thread = new JavaThread(); fillThread(thread, m); thread.setLineStart(rjt.lineStart); thread.setLineEnd(rjt.lineEnd); thread.setJid(Long.parseLong(m.group("id"))); thread.setDaemon(m.group("daemon") != null); thread.setPriority(Integer.parseInt(m.group("priority"))); thread.setLastJavaSP(Long.decode(m.group("lastJavaSP"))); // java thread state line = contents.get(1); m = PATTERNS.JAVA_STATE.matcher(line); if (!m.matches()) { throw new ParserException("Illegal java thread state: " + line); } thread.setJavaThreadState(JavaThreadState.getByDescription(m.group("state"))); if (contents.size() > 2 && thread.getType() == ThreadType.JAVA /* skip jit */) { // trace Trace trace = parseStackTrace(thread, false, contents.subList(2, contents.size())); snapshot.getCallSiteTree().add(trace); thread.setTrace(snapshot.getTraces().add(trace)); } synchronized (this) { snapshot.getJavaThreads().add(thread); } done(); } catch (Throwable t) { onParseRawThreadError(t); } } enum Element { TIME("dump time"), VERSION("vm version"), JNI_GLOBAL_REFS("JNI global references"), NON_JAVA_THREAD("Non Java Thread"); private final String description; Element(String description) { this.description = description; } } interface Action { void onMatched(Matcher matcher) throws Exception; } class RawJavaThread { private final List<String> contents; private int lineStart; private int lineEnd; public RawJavaThread() { contents = new ArrayList<>(); } void parse() { ParserImpl.this.parse(this); } } } }
2,990
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/parser/Parser.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.parser; import org.eclipse.jifa.common.listener.ProgressListener; import org.eclipse.jifa.tda.model.Snapshot; import java.nio.file.Path; /** * Thread dump parser */ public interface Parser { /** * Generate a snapshot for the thread dump identified by path * * @param path the path of thread dump * @param listener progress listener for parsing * @return the snapshot of thread dump * @throws ParserException the exception occurred during parsing */ Snapshot parse(Path path, ProgressListener listener) throws ParserException; }
2,991
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/parser/ParserFactory.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.parser; import java.nio.file.Path; public class ParserFactory { private static final Parser DEFAULT = new SerDesParser(new JStackParser()); public static Parser buildParser(Path path) { return DEFAULT; } }
2,992
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/parser/ParserException.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.parser; public class ParserException extends RuntimeException { public ParserException() { } public ParserException(String message) { super(message); } public ParserException(String message, Throwable cause) { super(message, cause); } public ParserException(Throwable cause) { super(cause); } }
2,993
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/parser/SerDesParser.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.parser; import com.esotericsoftware.kryo.Kryo; import com.esotericsoftware.kryo.io.Input; import com.esotericsoftware.kryo.io.Output; import org.eclipse.jifa.common.listener.ProgressListener; import org.eclipse.jifa.tda.model.Snapshot; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; public class SerDesParser implements Parser { private static final Logger LOGGER = LoggerFactory.getLogger(SerDesParser.class); private static final ThreadLocal<Kryo> KRYO; static { KRYO = ThreadLocal.withInitial(() -> { Kryo kryo = new Kryo(); kryo.register(Snapshot.class); return kryo; }); } private final Parser parser; public SerDesParser(Parser parser) { this.parser = parser; } private Path storage(Path from) { return Paths.get(from.toFile().getAbsoluteFile() + ".kryo"); } @Override public Snapshot parse(Path path, ProgressListener listener) { // TODO: multi-threads support Path storage = storage(path); if (storage.toFile().exists()) { try { listener.beginTask("Deserializing thread dump snapshot", 100); Snapshot snapshot = deserialize(storage); listener.worked(100); return snapshot; } catch (Throwable t) { LOGGER.error("Deserialize thread dump snapshot failed", t); listener.sendUserMessage(ProgressListener.Level.WARNING, "Deserialize thread dump snapshot failed", t); listener.reset(); } } listener.beginTask(null, 5); Snapshot snapshot = parser.parse(path, listener); try { serialize(snapshot, storage); listener.worked(5); } catch (Throwable t) { LOGGER.error("Serialize snapshot failed"); } return snapshot; } private void serialize(Snapshot snapshot, Path path) throws FileNotFoundException { Kryo kryo = KRYO.get(); try (Output out = new Output(new FileOutputStream(path.toFile()))) { kryo.writeObject(out, snapshot); } } private Snapshot deserialize(Path path) throws IOException { Kryo kryo = KRYO.get(); try (Input input = new Input(new FileInputStream(path.toFile()))) { return kryo.readObject(input, Snapshot.class); } } }
2,994
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/parser/Input.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.parser; import java.io.Closeable; import java.io.FileReader; import java.io.IOException; import java.io.LineNumberReader; import java.nio.file.Path; public class Input implements Closeable { private final LineNumberReader lnr; private String current; public Input(Path dumpPath) throws IOException { lnr = new LineNumberReader(new FileReader(dumpPath.toFile())); } public void mark() throws IOException { lnr.mark(1024); } public void reset() throws IOException { lnr.reset(); } public int lineNumber() { return lnr.getLineNumber(); } public String readLine() throws IOException { current = lnr.readLine(); if (current != null) { current = current.trim(); } return current; } public String currentLine() { return current; } @Override public void close() throws IOException { lnr.close(); } }
2,995
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/model/IdentityPool.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.model; import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; public class IdentityPool<O extends Identity> extends Pool<O> { private final AtomicInteger id; private ConcurrentHashMap<O, AtomicInteger> refCountMap; private List<O> objects; public IdentityPool() { id = new AtomicInteger(); refCountMap = new ConcurrentHashMap<>(); } @Override public O add(O o) { O pooled = map.computeIfAbsent(o, k -> { k.setId(nextId()); return k; }); refCountMap.computeIfAbsent(pooled, k -> new AtomicInteger(0)).incrementAndGet(); return pooled; } private int nextId() { int id = this.id.incrementAndGet(); assert id > 0; return id; } public void freeze() { objects = new ArrayList<>(map.values()); objects.sort((k1, k2) -> refCountMap.get(k2).get() - refCountMap.get(k1).get()); super.freeze(); refCountMap = null; } public List<O> objects() { return objects; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; if (!super.equals(o)) return false; IdentityPool<?> that = (IdentityPool<?>) o; return Objects.equals(id.get(), that.id.get()) && Objects.equals(refCountMap, that.refCountMap) && Objects.equals(objects, that.objects); } @Override public int hashCode() { return Objects.hash(super.hashCode(), id, refCountMap, objects); } }
2,996
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/model/CallSiteTree.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.model; import lombok.Data; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Objects; @Data public class CallSiteTree { private final Node root; private List<List<Node>> allChildren; private int count; private Node[] id2Node; public CallSiteTree() { root = new Node(); allChildren = new ArrayList<>(); } public synchronized void add(Trace trace) { Frame[] frames = trace.getFrames(); root.weight++; Node parent = root; for (Frame frame : frames) { parent = addChildren(parent, frame); } } public void freeze() { id2Node = new Node[count + 1]; id2Node[0] = root; int index = 1; for (List<Node> children : allChildren) { children.sort((o1, o2) -> o2.weight - o1.weight); for (Node n : children) { n.setId(index); id2Node[index++] = n; } } assert index == count + 1; allChildren = null; } private Node addChildren(Node parent, Frame frame) { List<Node> children = parent.children; if (children == null) { Node node = new Node(frame); count++; children = new ArrayList<>(); children.add(node); parent.children = children; allChildren.add(children); return node; } int low = 0; int high = children.size() - 1; while (low <= high) { int mid = low + (high - low) / 2; Node node = children.get(mid); if (node.frame.equals(frame)) { node.weight++; return node; } else if (node.frame.hashCode() < frame.hashCode()) { low = mid + 1; } else { high = mid - 1; } } Node node = new Node(frame); count++; children.add(low, node); return node; } @Override public String toString() { return "CallSiteTree{" + "root=" + root + ", allChildren=" + allChildren + '}'; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; CallSiteTree tree = (CallSiteTree) o; return count == tree.count && Objects.equals(root, tree.root) && Objects.equals(allChildren, tree.allChildren) && Arrays.equals(id2Node, tree.id2Node); } @Override public int hashCode() { int result = Objects.hash(root, allChildren, count); result = 31 * result + Arrays.hashCode(id2Node); return result; } @Data public static class Node extends Identity { Frame frame; int weight; List<Node> children; public Node(Frame frame) { this.frame = frame; weight = 1; } public Node() { frame = null; weight = 0; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Node node = (Node) o; return weight == node.weight && Objects.equals(frame, node.frame) && Objects.equals(children, node.children); } @Override public int hashCode() { return Objects.hash(frame, weight, children); } } }
2,997
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/model/Frame.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.model; import lombok.Data; import org.eclipse.jifa.tda.enums.SourceType; import java.util.Arrays; import java.util.Objects; @Data public class Frame { private String clazz; private String method; private String module; private SourceType sourceType; // -1 means unknown private String source; // -1 means unknown private int line = -1; private Monitor[] monitors; @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Frame frame = (Frame) o; return line == frame.line && Objects.equals(clazz, frame.clazz) && Objects.equals(method, frame.method) && Objects.equals(module, frame.module) && sourceType == frame.sourceType && Objects.equals(source, frame.source) && Arrays.equals(monitors, frame.monitors); } @Override public int hashCode() { int result = Objects.hash(clazz, method, module, sourceType, source, line); result = 31 * result + Arrays.hashCode(monitors); return result; } }
2,998
0
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda
Create_ds/eclipse-jifa/backend/thread-dump-analyzer/src/main/java/org/eclipse/jifa/tda/model/Pool.java
/******************************************************************************** * Copyright (c) 2022 Contributors to the Eclipse Foundation * * See the NOTICE file(s) distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 ********************************************************************************/ package org.eclipse.jifa.tda.model; import lombok.Data; import java.util.Map; import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; @Data public class Pool<O> { Map<O, O> map; public Pool() { map = new ConcurrentHashMap<>(); } public O add(O o) { return map.computeIfAbsent(o, k -> k); } public int size() { return map.size(); } public void freeze() { map = null; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Pool<?> pool = (Pool<?>) o; return Objects.equals(map, pool.map); } @Override public int hashCode() { return Objects.hash(map); } }
2,999