code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
public static Map<String, String> serializeCatalogMaterializedTable( ResolvedCatalogMaterializedTable resolvedMaterializedTable, SqlFactory sqlFactory) { try { final Map<String, String> properties = new HashMap<>(); serializeResolvedSchema( properties, resolvedMaterializedTable.getResolvedSchema(), sqlFactory); final String comment = resolvedMaterializedTable.getComment(); if (comment != null && comment.length() > 0) { properties.put(COMMENT, comment); } final Optional<Long> snapshot = resolvedMaterializedTable.getSnapshot(); snapshot.ifPresent(snapshotId -> properties.put(SNAPSHOT, Long.toString(snapshotId))); serializePartitionKeys(properties, resolvedMaterializedTable.getPartitionKeys()); properties.putAll(resolvedMaterializedTable.getOptions()); properties.put(DEFINITION_QUERY, resolvedMaterializedTable.getDefinitionQuery()); IntervalFreshness intervalFreshness = resolvedMaterializedTable.getDefinitionFreshness(); properties.put(FRESHNESS_INTERVAL, intervalFreshness.getInterval()); properties.put(FRESHNESS_UNIT, intervalFreshness.getTimeUnit().name()); properties.put( LOGICAL_REFRESH_MODE, resolvedMaterializedTable.getLogicalRefreshMode().name()); properties.put(REFRESH_MODE, resolvedMaterializedTable.getRefreshMode().name()); properties.put(REFRESH_STATUS, resolvedMaterializedTable.getRefreshStatus().name()); resolvedMaterializedTable .getRefreshHandlerDescription() .ifPresent( refreshHandlerDesc -> properties.put(REFRESH_HANDLER_DESC, refreshHandlerDesc)); if (resolvedMaterializedTable.getSerializedRefreshHandler() != null) { properties.put( REFRESH_HANDLER_BYTES, encodeBytesToBase64( resolvedMaterializedTable.getSerializedRefreshHandler())); } return properties; } catch (Exception e) { throw new CatalogException("Error in serializing catalog materialized table.", e); } }
Serializes the given {@link ResolvedCatalogMaterializedTable} into a map of string properties.
serializeCatalogMaterializedTable
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogPropertiesUtil.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogPropertiesUtil.java
Apache-2.0
public static Map<String, String> serializeResolvedCatalogModel( ResolvedCatalogModel resolvedModel, SqlFactory sqlFactory) { try { final Map<String, String> properties = new HashMap<>(); serializeResolvedModelSchema( properties, resolvedModel.getResolvedInputSchema(), resolvedModel.getResolvedOutputSchema(), sqlFactory); final String comment = resolvedModel.getComment(); if (comment != null && !comment.isEmpty()) { properties.put(COMMENT, comment); } properties.putAll(resolvedModel.getOptions()); return properties; } catch (Exception e) { throw new CatalogException("Error in serializing catalog model.", e); } }
Serializes the given {@link ResolvedCatalogModel} into a map of string properties.
serializeResolvedCatalogModel
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogPropertiesUtil.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogPropertiesUtil.java
Apache-2.0
public static CatalogTable deserializeCatalogTable(Map<String, String> properties) { return deserializeCatalogTable(properties, null); }
Deserializes the given map of string properties into an unresolved {@link CatalogTable}.
deserializeCatalogTable
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogPropertiesUtil.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogPropertiesUtil.java
Apache-2.0
public static CatalogTable deserializeCatalogTable( Map<String, String> properties, @Nullable String fallbackKey) { try { int count = getCount(properties, SCHEMA, NAME); String schemaKey = SCHEMA; if (count == 0 && fallbackKey != null) { schemaKey = fallbackKey; } final Schema schema = deserializeSchema(properties, schemaKey); final @Nullable String comment = properties.get(COMMENT); final @Nullable Long snapshot = properties.containsKey(SNAPSHOT) ? getValue(properties, SNAPSHOT, Long::parseLong) : null; final List<String> partitionKeys = deserializePartitionKeys(properties); final Map<String, String> options = deserializeOptions(properties, schemaKey); return CatalogTable.newBuilder() .schema(schema) .comment(comment) .partitionKeys(partitionKeys) .options(options) .snapshot(snapshot) .build(); } catch (Exception e) { throw new CatalogException("Error in deserializing catalog table.", e); } }
Deserializes the given map of string properties into an unresolved {@link CatalogTable}. @param properties The properties to deserialize from @param fallbackKey The fallback key to get the schema properties. This is meant to support the old table (1.10) deserialization @return a catalog table instance.
deserializeCatalogTable
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogPropertiesUtil.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogPropertiesUtil.java
Apache-2.0
private static void putIndexedProperties( Map<String, String> map, String key, List<String> subKeys, List<List<String>> subKeyValues) { checkNotNull(key); checkNotNull(subKeys); checkNotNull(subKeyValues); for (int idx = 0; idx < subKeyValues.size(); idx++) { final List<String> values = subKeyValues.get(idx); if (values == null || values.size() != subKeys.size()) { throw new IllegalArgumentException("Values must have same arity as keys."); } if (values.stream().allMatch(Objects::isNull)) { throw new IllegalArgumentException("Values must have at least one non-null value."); } for (int keyIdx = 0; keyIdx < values.size(); keyIdx++) { String value = values.get(keyIdx); if (value != null) { map.put(compoundKey(key, idx, subKeys.get(keyIdx)), values.get(keyIdx)); } } } }
Adds an indexed sequence of properties (with sub-properties) under a common key. It supports the property's value to be null, in which case it would be ignored. The sub-properties should at least have one non-null value. <p>For example: <pre> schema.fields.0.type = INT, schema.fields.0.name = test schema.fields.1.type = LONG, schema.fields.1.name = test2 schema.fields.2.type = LONG, schema.fields.2.name = test3, schema.fields.2.expr = test2 + 1 </pre> <p>The arity of each subKeyValues must match the arity of propertyKeys.
putIndexedProperties
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogPropertiesUtil.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogPropertiesUtil.java
Apache-2.0
public static Builder newBuilder() { return new Builder(); }
A holder for a {@link CatalogStore} instance and the necessary information for creating and initializing {@link Catalog} instances, including a {@link CatalogStoreFactory}, a {@link ReadableConfig} instance, and a {@link ClassLoader} instance. This class provides automatic resource management using the {@link AutoCloseable} interface, ensuring that the catalog-related resources are properly closed and released when they are no longer needed. <p>A {@link CatalogStoreFactory} may create multiple {@link CatalogStore} instances, which can be useful in SQL gateway scenarios where different sessions may use different catalog stores. However, in some scenarios, a single {@link CatalogStore} instance may be sufficient, in which case the {@link CatalogStoreFactory} can be stored in the holder to ensure that it is properly closed when the {@link CatalogStore} is closed.
newBuilder
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogStoreHolder.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogStoreHolder.java
Apache-2.0
default Optional<Long> getSnapshot() { return Optional.empty(); }
Return the snapshot specified for the table. Return Optional.empty() if not specified.
getSnapshot
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogTable.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogTable.java
Apache-2.0
default Optional<TableDistribution> getDistribution() { return Optional.empty(); }
Returns the distribution of the table if the {@code DISTRIBUTED} clause is defined.
getDistribution
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogTable.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogTable.java
Apache-2.0
public static ComputedColumn computed(String name, ResolvedExpression expression) { Preconditions.checkNotNull(name, "Column name can not be null."); Preconditions.checkNotNull(expression, "Column expression can not be null."); return new ComputedColumn(name, expression.getOutputDataType(), expression); }
Creates a computed column that is computed from the given {@link ResolvedExpression}.
computed
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/Column.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/Column.java
Apache-2.0
public static MetadataColumn metadata( String name, DataType dataType, @Nullable String metadataKey, boolean isVirtual) { Preconditions.checkNotNull(name, "Column name can not be null."); Preconditions.checkNotNull(dataType, "Column data type can not be null."); return new MetadataColumn(name, dataType, metadataKey, isVirtual); }
Creates a metadata column from metadata of the given column name or from metadata of the given key (if not null). <p>Allows to specify whether the column is virtual or not.
metadata
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/Column.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/Column.java
Apache-2.0
public Optional<String> getComment() { return Optional.ofNullable(comment); }
Returns the comment of this column.
getComment
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/Column.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/Column.java
Apache-2.0
static SetOption set(String key, String value) { return new SetOption(key, value); }
A model change to set the model option. <p>It is equal to the following statement: <pre> ALTER MODEL &lt;model_name&gt; SET (key=value); </pre> @param key the option name to set. @param value the option value to set. @return a ModelChange represents the modification.
set
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ModelChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ModelChange.java
Apache-2.0
static ResetOption reset(String key) { return new ResetOption(key); }
A model change to reset the model option. <p>It is equal to the following statement: <pre> ALTER MODEL &lt;model_name&gt; RESET (key) </pre> @param key the option name to set. @return a ModelChange represents the modification.
reset
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ModelChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ModelChange.java
Apache-2.0
public static ObjectIdentifier of(String catalogName, String databaseName, String objectName) { if (Objects.equals(catalogName, UNKNOWN) || Objects.equals(databaseName, UNKNOWN)) { throw new IllegalArgumentException( String.format("Catalog or database cannot be named '%s'", UNKNOWN)); } return new ObjectIdentifier( Preconditions.checkNotNull(catalogName, "Catalog name must not be null."), Preconditions.checkNotNull(databaseName, "Database name must not be null."), Preconditions.checkNotNull(objectName, "Object name must not be null.")); }
Identifies an object in a catalog. It allows to identify objects such as tables, views, function, or types in a catalog. An identifier must be fully qualified. It is the responsibility of the catalog manager to resolve an identifier to an object. <p>While {@link ObjectPath} is used within the same catalog, instances of this class can be used across catalogs. <p>Two objects are considered equal if they share the same object identifier in a stable session context.
of
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ObjectIdentifier.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ObjectIdentifier.java
Apache-2.0
public ObjectPath toObjectPath() throws TableException { if (catalogName == null) { throw new TableException( "This ObjectIdentifier instance refers to an anonymous object, " + "hence it cannot be converted to ObjectPath and cannot be serialized."); } return new ObjectPath(databaseName, objectName); }
Convert this {@link ObjectIdentifier} to {@link ObjectPath}. @throws TableException if the identifier cannot be converted
toObjectPath
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ObjectIdentifier.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ObjectIdentifier.java
Apache-2.0
public List<String> toList() { if (catalogName == null) { return Collections.singletonList(getObjectName()); } return Arrays.asList(getCatalogName(), getDatabaseName(), getObjectName()); }
List of the component names of this object identifier.
toList
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ObjectIdentifier.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ObjectIdentifier.java
Apache-2.0
public ResolvedCatalogTable toResolvedCatalogTable() { return new ResolvedCatalogTable( CatalogTable.newBuilder() .schema(getUnresolvedSchema()) .comment(getComment()) .partitionKeys(getPartitionKeys()) .options(getOptions()) .snapshot(getSnapshot().orElse(null)) .build(), getResolvedSchema()); }
Convert this object to a {@link ResolvedCatalogTable} object for planner optimize query.
toResolvedCatalogTable
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedCatalogMaterializedTable.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedCatalogMaterializedTable.java
Apache-2.0
default Map<String, String> toProperties() { return toProperties(DefaultSqlFactory.INSTANCE); }
Serializes this instance into a map of string-based properties. <p>Compared to the pure table options in {@link #getOptions()}, the map includes input schema, output schema, comment and options.
toProperties
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedCatalogModel.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedCatalogModel.java
Apache-2.0
static CatalogModel fromProperties(Map<String, String> properties) { return CatalogPropertiesUtil.deserializeCatalogModel(properties); }
Creates an instance of {@link CatalogModel} from a map of string properties that were previously created with {@link ResolvedCatalogModel#toProperties(SqlFactory)}. @param properties serialized version of a {@link ResolvedCatalogModel} that includes input schema, output schema, comment and options.
fromProperties
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedCatalogModel.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedCatalogModel.java
Apache-2.0
static ResolvedCatalogModel of( CatalogModel origin, ResolvedSchema resolvedInputSchema, ResolvedSchema resolvedOutputSchema) { return new DefaultResolvedCatalogModel(origin, resolvedInputSchema, resolvedOutputSchema); }
Creates a basic implementation of this interface. @param origin origin unresolved catalog model @param resolvedInputSchema resolved input schema @param resolvedOutputSchema resolved output schema
of
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedCatalogModel.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedCatalogModel.java
Apache-2.0
public int getColumnCount() { return columns.size(); }
Returns the number of {@link Column}s of this schema.
getColumnCount
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
Apache-2.0
public List<Column> getColumns() { return columns; }
Returns all {@link Column}s of this schema.
getColumns
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
Apache-2.0
public List<String> getColumnNames() { return columns.stream().map(Column::getName).collect(Collectors.toList()); }
Returns all column names. It does not distinguish between different kinds of columns.
getColumnNames
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
Apache-2.0
public List<DataType> getColumnDataTypes() { return columns.stream().map(Column::getDataType).collect(Collectors.toList()); }
Returns all column data types. It does not distinguish between different kinds of columns.
getColumnDataTypes
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
Apache-2.0
public Optional<Column> getColumn(int columnIndex) { if (columnIndex < 0 || columnIndex >= columns.size()) { return Optional.empty(); } return Optional.of(this.columns.get(columnIndex)); }
Returns the {@link Column} instance for the given column index. @param columnIndex the index of the column
getColumn
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
Apache-2.0
public Optional<Column> getColumn(String columnName) { return this.columns.stream() .filter(column -> column.getName().equals(columnName)) .findFirst(); }
Returns the {@link Column} instance for the given column name. @param columnName the name of the column
getColumn
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
Apache-2.0
public List<WatermarkSpec> getWatermarkSpecs() { return watermarkSpecs; }
Returns a list of watermark specifications each consisting of a rowtime attribute and watermark strategy expression. <p>Note: Currently, there is at most one {@link WatermarkSpec} in the list, because we don't support multiple watermark definitions yet.
getWatermarkSpecs
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
Apache-2.0
public Optional<UniqueConstraint> getPrimaryKey() { return Optional.ofNullable(primaryKey); }
Returns the primary key if it has been defined.
getPrimaryKey
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
Apache-2.0
public int[] getPrimaryKeyIndexes() { final List<String> columns = getColumns().stream() .filter(Column::isPhysical) .map(Column::getName) .collect(Collectors.toList()); return getPrimaryKey() .map(UniqueConstraint::getColumns) .map(pkColumns -> pkColumns.stream().mapToInt(columns::indexOf).toArray()) .orElseGet(() -> new int[] {}); }
Returns the primary key indexes in the {@link #toPhysicalRowDataType()}, if any, otherwise returns an empty array.
getPrimaryKeyIndexes
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
Apache-2.0
public DataType toSourceRowDataType() { return toRowDataType(c -> true); }
Converts all columns of this schema into a (possibly nested) row data type. <p>This method returns the <b>source-to-query schema</b>. <p>Note: The returned row data type contains physical, computed, and metadata columns. Be careful when using this method in a table source or table sink. In many cases, {@link #toPhysicalRowDataType()} might be more appropriate. @see DataTypes#ROW(DataTypes.Field...) @see #toPhysicalRowDataType() @see #toSinkRowDataType()
toSourceRowDataType
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
Apache-2.0
public DataType toPhysicalRowDataType() { return toRowDataType(Column::isPhysical); }
Converts all physical columns of this schema into a (possibly nested) row data type. <p>Note: The returned row data type contains only physical columns. It does not include computed or metadata columns. @see DataTypes#ROW(DataTypes.Field...) @see #toSourceRowDataType() @see #toSinkRowDataType()
toPhysicalRowDataType
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/ResolvedSchema.java
Apache-2.0
static AddColumn add(Column column) { return new AddColumn(column, null); }
A table change to add the column at last. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; ADD &lt;column_definition&gt; </pre> @param column the added column definition. @return a TableChange represents the modification.
add
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static AddColumn add(Column column, @Nullable ColumnPosition position) { return new AddColumn(column, position); }
A table change to add the column with specified position. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; ADD &lt;column_definition&gt; &lt;column_position&gt; </pre> @param column the added column definition. @param position added column position. @return a TableChange represents the modification.
add
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static AddUniqueConstraint add(UniqueConstraint constraint) { return new AddUniqueConstraint(constraint); }
A table change to add a unique constraint. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; ADD PRIMARY KEY (&lt;column_name&gt;...) NOT ENFORCED </pre> @param constraint the added constraint definition. @return a TableChange represents the modification.
add
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static AddDistribution add(TableDistribution distribution) { return new AddDistribution(distribution); }
A table change to add a distribution. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; ADD DISTRIBUTION ... </pre> @param distribution the added distribution @return a TableChange represents the modification.
add
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static AddWatermark add(WatermarkSpec watermarkSpec) { return new AddWatermark(watermarkSpec); }
A table change to add a watermark. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; ADD WATERMARK FOR &lt;row_time&gt; AS &lt;row_time_expression&gt; </pre> @param watermarkSpec the added watermark definition. @return a TableChange represents the modification.
add
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static ModifyColumn modify( Column oldColumn, Column newColumn, @Nullable ColumnPosition columnPosition) { return new ModifyColumn(oldColumn, newColumn, columnPosition); }
A table change to modify a column. The modification includes: <ul> <li>change column data type <li>reorder column position <li>modify column comment <li>rename column name <li>change the computed expression <li>change the metadata column expression </ul> <p>Some fine-grained column changes are represented by the {@link TableChange#modifyPhysicalColumnType}, {@link TableChange#modifyColumnName}, {@link TableChange#modifyColumnComment} and {@link TableChange#modifyColumnPosition}. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; MODIFY &lt;column_definition&gt; COMMENT '&lt;column_comment&gt;' &lt;column_position&gt; </pre> @param oldColumn the definition of the old column. @param newColumn the definition of the new column. @param columnPosition the new position of the column. @return a TableChange represents the modification.
modify
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static ModifyPhysicalColumnType modifyPhysicalColumnType(Column oldColumn, DataType newType) { return new ModifyPhysicalColumnType(oldColumn, newType); }
A table change that modify the physical column data type. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; MODIFY &lt;column_name&gt; &lt;new_column_type&gt; </pre> @param oldColumn the definition of the old column. @param newType the type of the new column. @return a TableChange represents the modification.
modifyPhysicalColumnType
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static ModifyColumnName modifyColumnName(Column oldColumn, String newName) { return new ModifyColumnName(oldColumn, newName); }
A table change to modify the column name. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; RENAME &lt;old_column_name&gt; TO &lt;new_column_name&gt; </pre> @param oldColumn the definition of the old column. @param newName the name of the new column. @return a TableChange represents the modification.
modifyColumnName
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static ModifyColumnComment modifyColumnComment(Column oldColumn, String newComment) { return new ModifyColumnComment(oldColumn, newComment); }
A table change to modify the column comment. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; MODIFY &lt;column_name&gt; &lt;original_column_type&gt; COMMENT '&lt;new_column_comment&gt;' </pre> @param oldColumn the definition of the old column. @param newComment the modified comment. @return a TableChange represents the modification.
modifyColumnComment
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static ModifyColumnPosition modifyColumnPosition( Column oldColumn, ColumnPosition columnPosition) { return new ModifyColumnPosition(oldColumn, columnPosition); }
A table change to modify the column position. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; MODIFY &lt;column_name&gt; &lt;original_column_type&gt; &lt;column_position&gt; </pre> @param oldColumn the definition of the old column. @param columnPosition the new position of the column. @return a TableChange represents the modification.
modifyColumnPosition
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static ModifyUniqueConstraint modify(UniqueConstraint newConstraint) { return new ModifyUniqueConstraint(newConstraint); }
A table change to modify a unique constraint. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; MODIFY PRIMARY KEY (&lt;column_name&gt;...) NOT ENFORCED; </pre> @param newConstraint the modified constraint definition. @return a TableChange represents the modification.
modify
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static ModifyDistribution modify(TableDistribution distribution) { return new ModifyDistribution(distribution); }
A table change to modify a distribution. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; MODIFY DISTRIBUTION ...; </pre> @param distribution the modified distribution. @return a TableChange represents the modification.
modify
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static ModifyWatermark modify(WatermarkSpec newWatermarkSpec) { return new ModifyWatermark(newWatermarkSpec); }
A table change to modify a watermark. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; MODIFY WATERMARK FOR &lt;row_time&gt; AS &lt;row_time_expression&gt; </pre> @param newWatermarkSpec the modified watermark definition. @return a TableChange represents the modification.
modify
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static DropColumn dropColumn(String columnName) { return new DropColumn(columnName); }
A table change to drop column. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; DROP COLUMN &lt;column_name&gt; </pre> @param columnName the column to drop. @return a TableChange represents the modification.
dropColumn
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static DropWatermark dropWatermark() { return DropWatermark.INSTANCE; }
A table change to drop watermark. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; DROP WATERMARK </pre> @return a TableChange represents the modification.
dropWatermark
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static DropConstraint dropConstraint(String constraintName) { return new DropConstraint(constraintName); }
A table change to drop constraint. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; DROP CONSTRAINT &lt;constraint_name&gt; </pre> @param constraintName the constraint to drop. @return a TableChange represents the modification.
dropConstraint
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static DropDistribution dropDistribution() { return DropDistribution.INSTANCE; }
A table change to drop a table's distribution. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; DROP DISTRIBUTION </pre> @return a TableChange represents the modification.
dropDistribution
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static ModifyRefreshStatus modifyRefreshStatus( CatalogMaterializedTable.RefreshStatus refreshStatus) { return new ModifyRefreshStatus(refreshStatus); }
A table change to modify materialized table refresh status. @param refreshStatus the modified refresh status. @return a TableChange represents the modification.
modifyRefreshStatus
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static ModifyRefreshHandler modifyRefreshHandler( String refreshHandlerDesc, byte[] refreshHandlerBytes) { return new ModifyRefreshHandler(refreshHandlerDesc, refreshHandlerBytes); }
A table change to modify materialized table refresh handler. @param refreshHandlerDesc the modified refresh handler description. @param refreshHandlerBytes the modified refresh handler bytes. @return a TableChange represents the modification.
modifyRefreshHandler
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static ModifyDefinitionQuery modifyDefinitionQuery(String definitionQuery) { return new ModifyDefinitionQuery(definitionQuery); }
A table change to modify materialized table definition query. @param definitionQuery the modified definition query. @return a TableChange represents the modification.
modifyDefinitionQuery
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
public Column getOldColumn() { return oldColumn; }
Returns the original {@link Column} instance.
getOldColumn
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
public Column getNewColumn() { return newColumn; }
Returns the modified {@link Column} instance.
getNewColumn
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
private static Column createNewColumn(Column oldColumn, String newName) { if (oldColumn instanceof Column.PhysicalColumn) { return Column.physical(newName, oldColumn.getDataType()) .withComment(oldColumn.comment); } else if (oldColumn instanceof Column.MetadataColumn) { Column.MetadataColumn metadataColumn = (Column.MetadataColumn) oldColumn; return Column.metadata( newName, oldColumn.getDataType(), metadataColumn.getMetadataKey().orElse(null), metadataColumn.isVirtual()) .withComment(oldColumn.comment); } else { return Column.computed(newName, ((Column.ComputedColumn) oldColumn).getExpression()) .withComment(oldColumn.comment); } }
A table change to modify the column name. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; RENAME &lt;old_column_name&gt; TO &lt;new_column_name&gt; </pre>
createNewColumn
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
@Override public String toString() { return "DropWatermark"; }
A table change to drop the watermark. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; DROP WATERMARK </pre>
toString
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
@Override public String toString() { return "DropDistribution"; }
A table change to drop a table's distribution. <p>It is equal to the following statement: <pre> ALTER TABLE &lt;table_name&gt; DROP DISTRIBUTION </pre>
toString
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
public String getKey() { return key; }
Returns the Option key to set.
getKey
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static ColumnPosition first() { return First.INSTANCE; }
Get the position to place the column at the first.
first
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
static ColumnPosition after(String column) { return new After(column); }
Get the position to place the column after the specified column.
after
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
@Override public String toString() { return "FIRST"; }
Column position FIRST means the specified column should be the first column.
toString
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java
Apache-2.0
public static TableDistribution of( Kind kind, @Nullable Integer bucketCount, List<String> bucketKeys) { return new TableDistribution(kind, bucketCount, bucketKeys); }
Distribution of the given kind over the given keys with a declared number of buckets.
of
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableDistribution.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableDistribution.java
Apache-2.0
public static TableDistribution ofHash(List<String> bucketKeys, @Nullable Integer bucketCount) { return new TableDistribution(Kind.HASH, bucketCount, bucketKeys); }
Hash distribution over the given keys among the declared number of buckets.
ofHash
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableDistribution.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableDistribution.java
Apache-2.0
public static UnresolvedIdentifier of(String... path) { if (path == null) { throw new ValidationException("Object identifier can not be null!"); } if (path.length < 1 || path.length > 3) { throw new ValidationException("Object identifier must consist of 1 to 3 parts."); } if (Arrays.stream(path).anyMatch(StringUtils::isNullOrWhitespaceOnly)) { throw new ValidationException( "Parts of the object identifier are null or whitespace-only."); } if (path.length == 3) { return new UnresolvedIdentifier(path[0], path[1], path[2]); } else if (path.length == 2) { return new UnresolvedIdentifier(null, path[0], path[1]); } else { return new UnresolvedIdentifier(null, null, path[0]); } }
Constructs an {@link UnresolvedIdentifier} from an array of identifier segments. The length of the path must be between 1 (only object name) and 3 (fully qualified identifier with catalog, database and object name). @param path array of identifier segments @return an identifier that must be resolved before accessing an object from a catalog manager
of
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/UnresolvedIdentifier.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/UnresolvedIdentifier.java
Apache-2.0
public static UnresolvedIdentifier of(List<String> path) { return of(path.toArray(new String[0])); }
Constructs an {@link UnresolvedIdentifier} from a list of identifier segments. @see UnresolvedIdentifier#of(String...)
of
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/UnresolvedIdentifier.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/UnresolvedIdentifier.java
Apache-2.0
public static WatermarkSpec of( String rowtimeAttribute, ResolvedExpression watermarkExpression) { return new WatermarkSpec(rowtimeAttribute, watermarkExpression); }
Representation of a watermark specification in a {@link ResolvedSchema}. <p>It defines the rowtime attribute and a {@link ResolvedExpression} for watermark generation.
of
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/WatermarkSpec.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/WatermarkSpec.java
Apache-2.0
public String getRowtimeAttribute() { return rowtimeAttribute; }
Returns the name of a rowtime attribute. <p>The referenced attribute must be present in the {@link ResolvedSchema} and must be of {@link TimestampType}.
getRowtimeAttribute
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/WatermarkSpec.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/WatermarkSpec.java
Apache-2.0
private static String formatMsg(String catalogName, ObjectPath modelPath) { if (catalogName != null) { return String.format( MSG, catalogName, modelPath.getDatabaseName(), modelPath.getObjectName()); } return String.format( MSG_WITHOUT_CATALOG, modelPath.getDatabaseName(), modelPath.getObjectName()); }
Exception for trying to operate on a model that doesn't exist.
formatMsg
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/exceptions/ModelNotExistException.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/exceptions/ModelNotExistException.java
Apache-2.0
public static ChangelogMode insertOnly() { return INSERT_ONLY; }
Shortcut for a simple {@link RowKind#INSERT}-only changelog.
insertOnly
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/ChangelogMode.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/ChangelogMode.java
Apache-2.0
public static ChangelogMode upsert() { return upsert(true); }
Shortcut for an upsert changelog that describes idempotent updates on a key and thus does not contain {@link RowKind#UPDATE_BEFORE} rows.
upsert
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/ChangelogMode.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/ChangelogMode.java
Apache-2.0
public static ChangelogMode upsert(boolean keyOnlyDeletes) { if (keyOnlyDeletes) { return UPSERT; } else { return UPSERT_WITH_FULL_DELETES; } }
Shortcut for an upsert changelog that describes idempotent updates on a key and thus does not contain {@link RowKind#UPDATE_BEFORE} rows. @param keyOnlyDeletes Tells the system the DELETEs contain just the key.
upsert
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/ChangelogMode.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/ChangelogMode.java
Apache-2.0
public static ChangelogMode all() { return ALL; }
Shortcut for a changelog that can contain all {@link RowKind}s.
all
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/ChangelogMode.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/ChangelogMode.java
Apache-2.0
default Optional<Integer> getParallelism() { return Optional.empty(); }
Returns the parallelism for this instance. <p>The parallelism denotes how many parallel instances of a source or sink will be spawned during the execution. <p>Enforcing a different parallelism for sources/sinks might mess up the changelog if the output/input is not {@link ChangelogMode#insertOnly()}. Therefore, a primary key is required by which the output/input will be shuffled after/before records leave/enter the {@link ScanRuntimeProvider}/{@link SinkRuntimeProvider} implementation. @return empty if the connector does not provide a custom parallelism, then the planner will decide the number of parallel instances by itself.
getParallelism
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/ParallelismProvider.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/ParallelismProvider.java
Apache-2.0
public LogicalType project(LogicalType logicalType) { return this.project(DataTypes.of(logicalType)).getLogicalType(); }
Same as {@link #project(DataType)}, but accepting and returning {@link LogicalType}.
project
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/Projection.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/Projection.java
Apache-2.0
public Projection complement(DataType dataType) { return complement(DataType.getFieldCount(dataType)); }
Like {@link #complement(int)}, using the {@code dataType} fields count.
complement
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/Projection.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/Projection.java
Apache-2.0
public static Projection empty() { return EmptyProjection.INSTANCE; }
Create an empty {@link Projection}, that is a projection that projects no fields, returning an empty {@link DataType}.
empty
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/Projection.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/Projection.java
Apache-2.0
public static Projection of(int[] indexes) { if (indexes.length == 0) { return empty(); } return new TopLevelProjection(indexes); }
Create a {@link Projection} of the provided {@code indexes}. @see #toTopLevelIndexes()
of
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/Projection.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/Projection.java
Apache-2.0
public static Projection of(int[][] indexes) { if (indexes.length == 0) { return empty(); } return new NestedProjection(indexes); }
Create a {@link Projection} of the provided {@code indexes}. @see #toNestedIndexes()
of
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/Projection.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/Projection.java
Apache-2.0
public static Projection fromFieldNames(DataType dataType, List<String> projectedFields) { List<String> dataTypeFieldNames = DataType.getFieldNames(dataType); return new TopLevelProjection( projectedFields.stream().mapToInt(dataTypeFieldNames::indexOf).toArray()); }
Create a {@link Projection} of the provided {@code dataType} using the provided {@code projectedFields}.
fromFieldNames
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/Projection.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/Projection.java
Apache-2.0
public static Projection all(DataType dataType) { return new TopLevelProjection( IntStream.range(0, DataType.getFieldCount(dataType)).toArray()); }
Create a {@link Projection} of all the fields in the provided {@code dataType}.
all
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/Projection.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/Projection.java
Apache-2.0
default Map<String, DataType> listReadableMetadata() { return Collections.emptyMap(); }
Returns the map of metadata keys and their corresponding data types that can be produced by this format for reading. By default, this method returns an empty map. <p>Metadata columns add additional columns to the table's schema. A decoding format is responsible to add requested metadata columns at the end of produced rows. <p>See {@link SupportsReadingMetadata} for more information. <p>Note: This method is only used if the outer {@link DynamicTableSource} implements {@link SupportsReadingMetadata} and calls this method in {@link SupportsReadingMetadata#listReadableMetadata()}.
listReadableMetadata
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/format/DecodingFormat.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/format/DecodingFormat.java
Apache-2.0
@SuppressWarnings("unused") default void applyReadableMetadata(List<String> metadataKeys) { throw new UnsupportedOperationException( "A decoding format must override this method to apply metadata keys."); }
Provides a list of metadata keys that the produced row must contain as appended metadata columns. By default, this method throws an exception if metadata keys are defined. <p>See {@link SupportsReadingMetadata} for more information. <p>Note: This method is only used if the outer {@link DynamicTableSource} implements {@link SupportsReadingMetadata} and calls this method in {@link SupportsReadingMetadata#applyReadableMetadata(List, DataType)}.
applyReadableMetadata
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/format/DecodingFormat.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/format/DecodingFormat.java
Apache-2.0
default Map<String, DataType> listWritableMetadata() { return Collections.emptyMap(); }
Returns the map of metadata keys and their corresponding data types that can be consumed by this format for writing. By default, this method returns an empty map. <p>Metadata columns add additional columns to the table's schema. An encoding format is responsible to accept requested metadata columns at the end of consumed rows and persist them. <p>See {@link SupportsWritingMetadata} for more information. <p>Note: This method is only used if the outer {@link DynamicTableSink} implements {@link SupportsWritingMetadata} and calls this method in {@link SupportsWritingMetadata#listWritableMetadata()}.
listWritableMetadata
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/format/EncodingFormat.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/format/EncodingFormat.java
Apache-2.0
@SuppressWarnings("unused") default void applyWritableMetadata(List<String> metadataKeys) { throw new UnsupportedOperationException( "An encoding format must override this method to apply metadata keys."); }
Provides a list of metadata keys that the consumed row will contain as appended metadata columns. By default, this method throws an exception if metadata keys are defined. <p>See {@link SupportsWritingMetadata} for more information. <p>Note: This method is only used if the outer {@link DynamicTableSink} implements {@link SupportsWritingMetadata} and calls this method in {@link SupportsWritingMetadata#applyWritableMetadata(List, DataType)}.
applyWritableMetadata
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/format/EncodingFormat.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/format/EncodingFormat.java
Apache-2.0
default boolean supportsNestedProjection() { return false; }
Returns whether this format supports nested projection.
supportsNestedProjection
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/format/ProjectableDecodingFormat.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/format/ProjectableDecodingFormat.java
Apache-2.0
default I createRuntimeDecoder( DynamicTableSource.Context context, DataType projectedPhysicalDataType) { return createRuntimeDecoder( context, projectedPhysicalDataType, Projection.all(projectedPhysicalDataType).toNestedIndexes()); }
Creates runtime decoder implementation that is configured to produce data of type {@code Projection.of(projections).project(physicalDataType)}. For more details on the usage, check {@link DecodingFormat} documentation. @param context the context provides several utilities required to instantiate the runtime decoder implementation of the format @param physicalDataType For more details check {@link DecodingFormat} @param projections the projections array. The array represents the mapping of the fields of the original {@link DataType}, including nested rows. For example, {@code [[0, 2, 1], ...]} specifies to include the 2nd field of the 3rd field of the 1st field in the top-level row. It's guaranteed that this array won't contain nested projections if {@link #supportsNestedProjection()} returns {@code false}. For more details, check {@link Projection} as well. @return the runtime decoder @see DecodingFormat
createRuntimeDecoder
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/format/ProjectableDecodingFormat.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/format/ProjectableDecodingFormat.java
Apache-2.0
@SuppressWarnings("unused") default boolean requiresPartitionGrouping(boolean supportsGrouping) { return false; }
Returns whether data needs to be grouped by partition before it is consumed by the sink. By default, this is not required from the runtime and records arrive in arbitrary partition order. <p>If this method returns true, the sink can expect that all records will be grouped by the partition keys before consumed by the sink. In other words: The sink will receive all elements of one partition and then all elements of another partition. Elements of different partitions will not be mixed. For some sinks, this can be used to reduce the number of partition writers and improve writing performance by writing one partition at a time. <p>The given argument indicates whether the current execution mode supports grouping or not. For example, depending on the execution mode a sorting operation might not be available during runtime. @param supportsGrouping whether the current execution mode supports grouping @return whether data need to be grouped by partition before consumed by the sink. If {@code supportsGrouping} is false, it should never return true, otherwise the planner will fail.
requiresPartitionGrouping
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/sink/abilities/SupportsPartitioning.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/sink/abilities/SupportsPartitioning.java
Apache-2.0
default Optional<List<Column>> requiredColumns() { return Optional.empty(); }
The required columns by the sink to perform row-level delete. The rows consumed by sink will contain the required columns in order. If return Optional.empty(), it will contain all columns.
requiredColumns
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/sink/abilities/SupportsRowLevelDelete.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/sink/abilities/SupportsRowLevelDelete.java
Apache-2.0
default RowLevelDeleteMode getRowLevelDeleteMode() { return RowLevelDeleteMode.DELETED_ROWS; }
Planner will rewrite delete statement to query base on the {@link RowLevelDeleteInfo}, keeping the query of delete unchanged by default(in `DELETE_ROWS` mode), or changing the query to the complementary set in REMAINING_ROWS mode. <p>Take the following SQL as an example: <pre>{@code DELETE FROM t WHERE y = 2; }</pre> <p>If returns {@link SupportsRowLevelDelete.RowLevelDeleteMode#DELETED_ROWS}, the sink will get the rows to be deleted which match the filter [y = 2]. <p>If returns {@link SupportsRowLevelDelete.RowLevelDeleteMode#REMAINING_ROWS}, the sink will get the rows which don't match the filter [y = 2]. <p>Note: All rows will be of RowKind#DELETE when RowLevelDeleteMode is DELETED_ROWS, and RowKind#INSERT when RowLevelDeleteMode is REMAINING_ROWS.
getRowLevelDeleteMode
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/sink/abilities/SupportsRowLevelDelete.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/sink/abilities/SupportsRowLevelDelete.java
Apache-2.0
default RowLevelUpdateMode getRowLevelUpdateMode() { return RowLevelUpdateMode.UPDATED_ROWS; }
Planner will rewrite the update statement to query base on the {@link RowLevelUpdateMode}, keeping the query of update unchanged by default(in `UPDATED_ROWS` mode), or changing the query to union the updated rows and the other rows (in `ALL_ROWS` mode). <p>Take the following SQL as an example: <pre>{@code UPDATE t SET x = 1 WHERE y = 2; }</pre> <p>If returns {@link RowLevelUpdateMode#UPDATED_ROWS}, the sink will get the update after rows which match the filter [y = 2]. <p>If returns {@link RowLevelUpdateMode#ALL_ROWS}, the sink will get both the update after rows which match the filter [y = 2] and the other rows that don't match the filter [y = 2]. <p>Note: All rows will have RowKind#UPDATE_AFTER when RowLevelUpdateMode is UPDATED_ROWS, and RowKind#INSERT when RowLevelUpdateMode is ALL_ROWS.
getRowLevelUpdateMode
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/sink/abilities/SupportsRowLevelUpdate.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/sink/abilities/SupportsRowLevelUpdate.java
Apache-2.0
default boolean isDeterministic() { return true; }
Returns information about the determinism of this partitioner. <p>It returns true if and only if a call to the {@link #partition(RowData, int)} method is guaranteed to always return the same result given the same joinKeyRow. If the partitioning logic depends on not purely functional like <code> random(), date(), now(), ...</code> this method must return false. <p>If this method return false, planner may not apply this partitioner in upsert mode to avoid out-of-order of the changelog events.
isDeterministic
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/abilities/SupportsLookupCustomShuffle.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/abilities/SupportsLookupCustomShuffle.java
Apache-2.0
default boolean supportsMetadataProjection() { return true; }
Defines whether projections can be applied to metadata columns. <p>This method is only called if the source does <em>not</em> implement {@link SupportsProjectionPushDown}. By default, the planner will only apply metadata columns which have actually been selected in the query regardless. By returning {@code false} instead the source can inform the planner to apply all metadata columns defined in the table's schema. <p>If the source implements {@link SupportsProjectionPushDown}, projections of metadata columns are always considered before calling {@link #applyReadableMetadata(List, DataType)}.
supportsMetadataProjection
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/abilities/SupportsReadingMetadata.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/abilities/SupportsReadingMetadata.java
Apache-2.0
public Builder expireAfterAccess(Duration duration) { expireAfterAccessDuration = duration; return this; }
Specifies the duration after an entry is last accessed that it should be automatically removed.
expireAfterAccess
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/lookup/cache/DefaultLookupCache.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/lookup/cache/DefaultLookupCache.java
Apache-2.0
public Builder expireAfterWrite(Duration duration) { expireAfterWriteDuration = duration; return this; }
Specifies the duration after an entry is created that it should be automatically removed.
expireAfterWrite
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/lookup/cache/DefaultLookupCache.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/lookup/cache/DefaultLookupCache.java
Apache-2.0
public Builder maximumSize(long maximumSize) { this.maximumSize = maximumSize; return this; }
Specifies the maximum number of entries of the cache.
maximumSize
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/lookup/cache/DefaultLookupCache.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/lookup/cache/DefaultLookupCache.java
Apache-2.0
public Builder cacheMissingKey(boolean cacheMissingKey) { this.cacheMissingKey = cacheMissingKey; return this; }
Specifies whether to cache empty value into the cache. <p>Please note that "empty" means a collection without any rows in it instead of null. The cache will not accept any null key or value.
cacheMissingKey
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/lookup/cache/DefaultLookupCache.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/lookup/cache/DefaultLookupCache.java
Apache-2.0
static ElementGetter createElementGetter(LogicalType elementType) { final ElementGetter elementGetter; // ordered by type root definition switch (elementType.getTypeRoot()) { case CHAR: case VARCHAR: elementGetter = ArrayData::getString; break; case BOOLEAN: elementGetter = ArrayData::getBoolean; break; case BINARY: case VARBINARY: elementGetter = ArrayData::getBinary; break; case DECIMAL: final int decimalPrecision = getPrecision(elementType); final int decimalScale = getScale(elementType); elementGetter = (array, pos) -> array.getDecimal(pos, decimalPrecision, decimalScale); break; case TINYINT: elementGetter = ArrayData::getByte; break; case SMALLINT: elementGetter = ArrayData::getShort; break; case INTEGER: case DATE: case TIME_WITHOUT_TIME_ZONE: case INTERVAL_YEAR_MONTH: elementGetter = ArrayData::getInt; break; case BIGINT: case INTERVAL_DAY_TIME: elementGetter = ArrayData::getLong; break; case FLOAT: elementGetter = ArrayData::getFloat; break; case DOUBLE: elementGetter = ArrayData::getDouble; break; case TIMESTAMP_WITHOUT_TIME_ZONE: case TIMESTAMP_WITH_LOCAL_TIME_ZONE: final int timestampPrecision = getPrecision(elementType); elementGetter = (array, pos) -> array.getTimestamp(pos, timestampPrecision); break; case TIMESTAMP_WITH_TIME_ZONE: throw new UnsupportedOperationException(); case ARRAY: elementGetter = ArrayData::getArray; break; case MULTISET: case MAP: elementGetter = ArrayData::getMap; break; case ROW: case STRUCTURED_TYPE: final int rowFieldCount = getFieldCount(elementType); elementGetter = (array, pos) -> array.getRow(pos, rowFieldCount); break; case DISTINCT_TYPE: elementGetter = createElementGetter(((DistinctType) elementType).getSourceType()); break; case RAW: elementGetter = ArrayData::getRawValue; break; case NULL: case SYMBOL: case UNRESOLVED: case DESCRIPTOR: default: throw new IllegalArgumentException(); } return (array, pos) -> { if (array.isNullAt(pos)) { return null; } return elementGetter.getElementOrNull(array, pos); }; }
Creates an accessor for getting elements in an internal array data structure at the given position. @param elementType the element type of the array
createElementGetter
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/data/ArrayData.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/data/ArrayData.java
Apache-2.0
public int precision() { return precision; }
Returns the <i>precision</i> of this {@link DecimalData}. <p>The precision is the number of digits in the unscaled value.
precision
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/data/DecimalData.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/data/DecimalData.java
Apache-2.0
public int scale() { return scale; }
Returns the <i>scale</i> of this {@link DecimalData}.
scale
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/data/DecimalData.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/data/DecimalData.java
Apache-2.0
public BigDecimal toBigDecimal() { BigDecimal bd = decimalVal; if (bd == null) { decimalVal = bd = BigDecimal.valueOf(longVal, scale); } return bd; }
Converts this {@link DecimalData} into an instance of {@link BigDecimal}.
toBigDecimal
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/data/DecimalData.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/data/DecimalData.java
Apache-2.0
public long toUnscaledLong() { if (isCompact()) { return longVal; } else { return toBigDecimal().unscaledValue().longValueExact(); } }
Returns a long describing the <i>unscaled value</i> of this {@link DecimalData}. @throws ArithmeticException if this {@link DecimalData} does not exactly fit in a long.
toUnscaledLong
java
apache/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/data/DecimalData.java
https://github.com/apache/flink/blob/master/flink-table/flink-table-common/src/main/java/org/apache/flink/table/data/DecimalData.java
Apache-2.0