name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
hudi_BulkInsertWriterHelper_getInstantTime_rdh | /**
* Returns the write instant time.
*/
public String getInstantTime() {
return this.instantTime;
} | 3.26 |
hudi_ClusteringPlanStrategy_getFileSlicesEligibleForClustering_rdh | /**
* Return file slices eligible for clustering. FileIds in pending clustering/compaction are not eligible for clustering.
*/
protected Stream<FileSlice> getFileSlicesEligibleForClustering(String partition) {
SyncableFileSystemView fileSystemView = ((SyncableFileSystemView) (getHoodieTable().getSliceView()));
... | 3.26 |
hudi_ClusteringPlanStrategy_buildMetrics_rdh | /**
* Generate metrics for the data to be clustered.
*/
protected Map<String, Double> buildMetrics(List<FileSlice> fileSlices) {
Map<String, Double> v9 = new HashMap<>();
FileSliceMetricUtils.addFileSliceCommonMetrics(fileSlices, v9, getWriteConfig().getParquetMaxFileSize());
return v9;
} | 3.26 |
hudi_ClusteringPlanStrategy_getPlanVersion_rdh | /**
* Version to support future changes for plan.
*/
protected int getPlanVersion() {
return CLUSTERING_PLAN_VERSION_1;
} | 3.26 |
hudi_ClusteringPlanStrategy_checkAndGetClusteringPlanStrategy_rdh | /**
* Check if the given class is deprecated.
* If it is, then try to convert it to suitable one and update the write config accordingly.
*
* @param config
* write config
* @return class name of clustering plan strategy
*/
public static String checkAndGetClusteringPlanStrategy(HoodieWriteConfig config) {
... | 3.26 |
hudi_ClusteringPlanStrategy_getExtraMetadata_rdh | /**
* Returns any specific parameters to be stored as part of clustering metadata.
*/protected Map<String, String> getExtraMetadata() {
return Collections.emptyMap();
} | 3.26 |
hudi_ClusteringPlanStrategy_checkPrecondition_rdh | /**
* Check if the clustering can proceed. If not (i.e., return false), the PlanStrategy will generate an empty plan to stop the scheduling.
*/
public boolean checkPrecondition() {
return true;
} | 3.26 |
hudi_ClusteringPlanStrategy_getFileSliceInfo_rdh | /**
* Transform {@link FileSlice} to {@link HoodieSliceInfo}.
*/
protected static List<HoodieSliceInfo> getFileSliceInfo(List<FileSlice> slices) {
return slices.stream().map(slice -> new HoodieSliceInfo().newBuilder().setPartitionPath(slice.getPartitionPath()).setFileId(slice.getFileId()).setDataFilePath(slice.g... | 3.26 |
hudi_HoodieAvroReadSupport_checkLegacyMode_rdh | /**
* Check whether write map/list with legacy mode.
* legacy:
* list:
* optional group obj_ids (LIST) {
* repeated binary array (UTF8);
* }
* map:
* optional group obj_ids (MAP) {
* repeated group map (MAP_KEY_VALUE) {
* required binary key (UTF8);
* required binary va... | 3.26 |
hudi_FlinkConsistentBucketUpdateStrategy_patchFileIdToRecords_rdh | /**
* Rewrite the first record with given fileID
*/
private void patchFileIdToRecords(List<HoodieRecord> records, String fileId) {
HoodieRecord first = records.get(0);
HoodieRecord record = new HoodieAvroRecord<>(first.getKey(), ((HoodieRecordPayload) (first.getData())), first.getOperation());
HoodieRecor... | 3.26 |
hudi_MysqlDebeziumSource_m0_rdh | /**
* Debezium Kafka Payload has a nested structure (see https://debezium.io/documentation/reference/1.4/connectors/mysql.html).
* This function flattens this nested structure for the Mysql data, and also extracts a subset of Debezium metadata fields.
*
* @param rowDataset
* Dataset containing Debezium Payloads
... | 3.26 |
hudi_AbstractRealtimeRecordReader_init_rdh | /**
* Gets schema from HoodieTableMetaClient. If not, falls
* back to the schema from the latest parquet file. Finally, sets the partition column and projection fields into the
* job conf.
*/
private void init() throws Exception {
LOG.info("Getting writer schema from table avro schema ");
writerSchema = new... | 3.26 |
hudi_HoodieWriteCommitPulsarCallbackConfig_setCallbackPulsarConfigIfNeeded_rdh | /**
* Set default value for {@link HoodieWriteCommitPulsarCallbackConfig} if needed.
*/
public static void setCallbackPulsarConfigIfNeeded(HoodieConfig config) {
config.setDefaultValue(PRODUCER_ROUTE_MODE);
config.setDefaultValue(OPERATION_TIMEOUT);
config.setDefaultValue(CONNECTION_TIMEOUT);
config.... | 3.26 |
hudi_BaseHoodieFunctionalIndexClient_register_rdh | /**
* Register a functional index.
* Index definitions are stored in user-specified path or, by default, in .hoodie/.index_defs/index.json.
* For the first time, the index definition file will be created if not exists.
* For the second time, the index definition file will be updated if exists.
* Table Config is up... | 3.26 |
hudi_SparkRDDWriteClient_bootstrap_rdh | /**
* Main API to run bootstrap to hudi.
*/
@Override
public void bootstrap(Option<Map<String, String>> extraMetadata) {initTable(WriteOperationType.UPSERT, Option.ofNullable(HoodieTimeline.METADATA_BOOTSTRAP_INSTANT_TS)).bootstrap(context, extraMetadata);} | 3.26 |
hudi_SparkRDDWriteClient_insertOverwrite_rdh | /**
* Removes all existing records from the partitions affected and inserts the given HoodieRecords, into the table.
*
* @param records
* HoodieRecords to insert
* @param instantTime
* Instant time of the commit
* @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
*/
public Hoodi... | 3.26 |
hudi_SparkRDDWriteClient_insertOverwriteTable_rdh | /**
* Removes all existing records of the Hoodie table and inserts the given HoodieRecords, into the table.
*
* @param records
* HoodieRecords to insert
* @param instantTime
* Instant time of the commit
* @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
*/
public HoodieWriteRes... | 3.26 |
hudi_SparkRDDWriteClient_commit_rdh | /**
* Complete changes performed at the given instantTime marker with specified action.
*/
@Override
public boolean commit(String instantTime, JavaRDD<WriteStatus> writeStatuses, Option<Map<String, String>> extraMetadata, String commitActionType, Map<String, List<String>> partitionToReplacedFileIds, Option<BiConsumer... | 3.26 |
hudi_SparkRDDWriteClient_initializeMetadataTable_rdh | /**
* Initialize the metadata table if needed. Creating the metadata table writer
* will trigger the initial bootstrapping from the data table.
*
* @param inFlightInstantTimestamp
* - The in-flight action responsible for the metadata table initialization
*/
private void initializeMetadataTable(Option<String> in... | 3.26 |
hudi_StreamReadOperator_consumeAsMiniBatch_rdh | /**
* Consumes at most {@link #MINI_BATCH_SIZE} number of records
* for the given input split {@code split}.
*
* <p>Note: close the input format and remove the input split for the queue {@link #splits}
* if the split reads to the end.
*
* @param split
* The input split
*/
private void consumeAsMiniBatch(Merg... | 3.26 |
hudi_CompactNode_execute_rdh | /**
* Method helps to start the compact operation. It will compact the last pending compact instant in the timeline
* if it has one.
*
* @param executionContext
* Execution context to run this compaction
* @param curItrCount
* cur iteration count.
* @throws Exception
... | 3.26 |
hudi_KafkaOffsetGen_isValidTimestampCheckpointType_rdh | /**
* Check if the checkpoint is a timestamp.
*
* @param lastCheckpointStr
* @return */
private Boolean isValidTimestampCheckpointType(Option<String> lastCheckpointStr) {
if (!lastCheckpointStr.isPresent()) {
return false;
}
Pattern pattern = Pattern.compile("[-+]?[0-9]+(\\.[0-9]+)?");
... | 3.26 |
hudi_KafkaOffsetGen_fetchValidOffsets_rdh | /**
* Fetch checkpoint offsets for each partition.
*
* @param consumer
* instance of {@link KafkaConsumer} to fetch offsets from.
* @param lastCheckpointStr
* last checkpoint string.
* @param topicPartitions
* set of topic partitions.
* @return a map of Topic partitions to offsets.
*/
private Map<TopicP... | 3.26 |
hudi_KafkaOffsetGen_offsetsToStr_rdh | /**
* String representation of checkpoint
* <p>
* Format: topic1,0:offset0,1:offset1,2:offset2, .....
*/
public static String offsetsToStr(OffsetRange[] ranges) {
// merge the ranges by partition to maintain one offset range map to one topic partition.
ranges =
mergeRangesByTopicPartition(ranges);
S... | 3.26 |
hudi_KafkaOffsetGen_checkTopicExists_rdh | /**
* Check if topic exists.
*
* @param consumer
* kafka consumer
* @return */
public boolean checkTopicExists(KafkaConsumer consumer) {
Map<String, List<PartitionInfo>> result = consumer.listTopics();
return result.containsKey(topicName);
} | 3.26 |
hudi_KafkaOffsetGen_getOffsetsByTimestamp_rdh | /**
* Get the checkpoint by timestamp.
* This method returns the checkpoint format based on the timestamp.
* example:
* 1. input: timestamp, etc.
* 2. output: topicName,partition_num_0:100,partition_num_1:101,partition_num_2:102.
*
* @param consumer
* @param topicName
* @param timestamp
* @return */
private ... | 3.26 |
hudi_KafkaOffsetGen_strToOffsets_rdh | /**
* Reconstruct checkpoint from timeline.
*/
public static Map<TopicPartition, Long> strToOffsets(String checkpointStr) {
Map<TopicPartition,
Long> offsetMap = new HashMap<>();
String[] splits = checkpointStr.split(",");String topic = splits[0];
for (int i = 1; i < splits.length; i++) {String[] subS... | 3.26 |
hudi_KafkaOffsetGen_computeOffsetRanges_rdh | /**
* Compute the offset ranges to read from Kafka, while handling newly added partitions, skews, event limits.
*
* @param fromOffsetMap
* offsets where we left off last time
* @param toOffsetMap
* offsets of where each partitions is currently at
* @param numEvents
* maximum number of events to read.
*/
... | 3.26 |
hudi_KafkaOffsetGen_mergeRangesByTopicPartition_rdh | /**
* Merge ranges by topic partition, because we need to maintain the checkpoint with one offset range per topic partition.
*
* @param oldRanges
* to merge
* @return ranges merged by partition
*/
public static OffsetRange[] mergeRangesByTopicPartition(OffsetRange[] oldRanges) {
List<OffsetRange> newRanges ... | 3.26 |
hudi_KafkaOffsetGen_commitOffsetToKafka_rdh | /**
* Commit offsets to Kafka only after hoodie commit is successful.
*
* @param checkpointStr
* checkpoint string containing offsets.
*/
public ... | 3.26 |
hudi_TableChange_addPositionChange_rdh | /**
* Add position change.
*
* @param srcName
* column which need to be reordered
* @param dsrName
* reference position
* @param orderType
* change types
* @return this
*/public BaseColumnChange addPositionChange(String srcName, String dsrName, ColumnPositionChange.ColumnPositionType orderType) {
In... | 3.26 |
hudi_TableChange_checkColModifyIsLegal_rdh | // Modify hudi meta columns is prohibited
protected void checkColModifyIsLegal(String colNeedToModify) {
if (HoodieRecord.HOODIE_META_COLUMNS.stream().anyMatch(f -> f.equalsIgnoreCase(colNeedToModify))) {
throw new IllegalArgumentException(String.format("cannot modify hudi meta col: %s", colNeedToModify));
... | 3.26 |
hudi_HoodieCompactionAdminTool_printOperationResult_rdh | /**
* Print Operation Result.
*
* @param initialLine
* Initial Line
* @param result
* Result
*/
private <T> void printOperationResult(String
initialLine, List<T> result) {
System.out.println(initialLine);
for (T r : result) {
System.out.print(r);
}
} | 3.26 |
hudi_HoodieCompactionAdminTool_m0_rdh | /**
* Executes one of compaction admin operations.
*/
public void m0(JavaSparkContext jsc) throws Exception {
HoodieTableMetaClient v3 = HoodieTableMetaClient.builder().setConf(jsc.hadoopConfiguration()).setBasePath(cfg.basePath).build();
try (CompactionAdminClient admin = new CompactionAdminClient(new Hoodi... | 3.26 |
hudi_MetadataTableUtils_shouldUseBatchLookup_rdh | /**
* Whether to use batch lookup for listing the latest base files in metadata table.
* <p>
* Note that metadata table has to be enabled, and the storage type of the file system view
* cannot be EMBEDDED_KV_STORE or SPILLABLE_DISK (these two types are not integrated with
* metadata table, see HUDI-5612).
*
* @p... | 3.26 |
hudi_TableCommand_refreshMetadata_rdh | /**
* Refresh table metadata.
*/@ShellMethod(key = { "refresh", "metadata refresh", "commits refresh", "cleans refresh", "savepoints refresh" }, value = "Refresh table metadata")
public String refreshMetadata() {
HoodieCLI.refreshTableMetadata();
return ("Metadata for table " + HoodieCLI.getTableMetaClient().... | 3.26 |
hudi_TableCommand_descTable_rdh | /**
* Describes table properties.
*/
@ShellMethod(key = "desc", value = "Describe Hoodie Table properties")
public String descTable() {
HoodieTableMetaClient client =
HoodieCLI.getTableMetaClient();
TableHeader header = new TableHeader().addTableHeaderField("Property").addTableHeaderField("Value");
... | 3.26 |
hudi_TableCommand_createTable_rdh | /**
* Create a Hoodie Table if it does not exist.
*
* @param path
* Base Path
* @param name
* Hoodie Table Name
* @param tableTypeStr
* Hoodie Table Type
* @param payloadClass
* Payload Class
*/
@ShellMethod(key = "create", value = "Create a hoodie ... | 3.26 |
hudi_TableCommand_writeToFile_rdh | /**
* Use Streams when you are dealing with raw data.
*
* @param filePath
* output file path.
* @param data
* to be written to file.
*/
private static void writeToFile(String filePath, String data) throws IOException {
File outFile = new File(filePath);
if (outFile.exists()) {outFile.delete();
}
... | 3.26 |
hudi_TableCommand_fetchTableSchema_rdh | /**
* Fetches table schema in avro format.
*/
@ShellMethod(key = "fetch table schema", value = "Fetches latest table schema")
public String fetchTableSchema(@ShellOption(value = { "--outputFilePath" }, defaultValue = ShellOption.NULL, help = "File path to write schema")
final String outputFilePath) throws Exception {... | 3.26 |
hudi_HoodieAvroIndexedRecord_readRecordPayload_rdh | /**
* NOTE: This method is declared final to make sure there's no polymorphism and therefore
* JIT compiler could perform more aggressive optimizations
*/
@SuppressWarnings("unchecked")
@Overrideprotected final IndexedRecord readRecordPayload(Kryo kryo, Input input)
{// NOTE: We're leveraging Spark's default ... | 3.26 |
hudi_HoodieAvroIndexedRecord_writeRecordPayload_rdh | /**
* NOTE: This method is declared final to make sure there's no polymorphism and therefore
* JIT compiler could perform more aggressive optimizations
*/
@SuppressWarnings("unchecked")
@Override
protected final void writeRecordPayload(IndexedRecord payload, Kryo kryo, Output output) {
// NOTE: We're leveragin... | 3.26 |
hudi_HoodieClusteringJob_validateRunningMode_rdh | // make sure that cfg.runningMode couldn't be null
private static void validateRunningMode(Config cfg) {
// --mode has a higher priority than --schedule
// If we remove --schedule option in the future we need to change runningMode default value to EXECUTE
if (StringUtils.isNullOr... | 3.26 |
hudi_ImmutablePair_getRight_rdh | /**
* {@inheritDoc }
*/@Override
public R getRight() {
return right;
} | 3.26 |
hudi_ImmutablePair_of_rdh | /**
* <p>
* Obtains an immutable pair of from two objects inferring the generic types.
* </p>
*
* <p>
* This factory allows the pair to be created using inference to obtain the generic types.
* </p>
*
* @param <L>
* the left element type
* @param <R>
* the right element type
* @param left
* the left... | 3.26 |
hudi_ImmutablePair_getLeft_rdh | // -----------------------------------------------------------------------
/**
* {@inheritDoc }
*/
@Override
public L getLeft() {
return left;
} | 3.26 |
hudi_ImmutablePair_setValue_rdh | /**
* <p>
* Throws {@code UnsupportedOperationException}.
* </p>
*
* <p>
* This pair is immutable, so this operation is not supported.
* </p>
*
* @param value
* the value to set
* @return never
* @throws UnsupportedOperationException
* as this operation is not supported
*/
@Override
public R setValue... | 3.26 |
hudi_ArrayColumnReader_collectDataFromParquetPage_rdh | /**
* Collects data from a parquet page and returns the final row index where it stopped. The
* returned index can be equal to or less than total.
*
* @param total
* maximum number of rows to collect
* @param lcv
* column vector to do initial setup in data collection time
* @param valueList
* collection ... | 3.26 |
hudi_ArrayColumnReader_fetchNextValue_rdh | /**
* Reads a single value from parquet page, puts it into lastValue. Returns a boolean indicating
* if there is more values to read (true).
*
* @param category
* @return boolean
* @throws IOException
*/
private boolean fetchNextValue(LogicalType category) throws IOException {
int left = readPageIfNeed();
... | 3.26 |
hudi_ArrayColumnReader_setChildrenInfo_rdh | /**
* The lengths & offsets will be initialized as default size (1024), it should be set to the
* actual size according to the element number.
*/
private void setChildrenInfo(HeapArrayVector lcv, int itemNum, int elementNum) {
lcv.setSize(itemNum);long[] lcvLength = new long[elementNum];
long[] lcvOffset = n... | 3.26 |
hudi_ArrayColumnReader_readPrimitiveTypedRow_rdh | // Need to be in consistent with that VectorizedPrimitiveColumnReader#readBatchHelper
// TODO Reduce the duplicated code
private Object readPrimitiveTypedRow(LogicalType category) {
switch (category.getTypeRoot()) {
case CHAR :
case VARCHAR :
case BINARY :
case VARBINARY :
... | 3.26 |
hudi_TimestampBasedAvroKeyGenerator_initIfNeeded_rdh | /**
* The function takes care of lazily initialising dateTimeFormatter variables only once.
*/
private void initIfNeeded() {if (this.inputFormatter == null) {
this.inputFormatter = parser.getInputFormatter();
}
if (this.partitionFormatter == null) {
this.partitionFormatter = DateTimeFormat.forPattern(outputDateFor... | 3.26 |
hudi_TimestampBasedAvroKeyGenerator_getDefaultPartitionVal_rdh | /**
* Set default value to partitionVal if the input value of partitionPathField is null.
*/
public Object getDefaultPartitionVal() {
Object result = 1L;
if ((timestampType == TimestampType.DATE_STRING) || (timestampType == TimestampType.MIXED)) {
// since partitionVal is null, we can set a default value of any... | 3.26 |
hudi_TimestampBasedAvroKeyGenerator_getPartitionPath_rdh | /**
* Parse and fetch partition path based on data type.
*
* @param partitionVal
* partition path object value fetched from record/row
* @return the parsed partition path based on data type
*/
public String getPartitionPath(Object partitionVal) {
initIfNeeded();
long
timeMs;
if (partitionVal instanceof Double) ... | 3.26 |
hudi_MetadataPartitionType_getMetadataPartitionsNeedingWriteStatusTracking_rdh | /**
* Returns the list of metadata table partitions which require WriteStatus to track written records.
* <p>
* These partitions need the list of written records so that they can update their metadata.
*/
public static List<MetadataPartitionType> getMetadataPartitionsNeedingWriteStatusTracking() {
return Collec... | 3.26 |
hudi_PreferWriterConflictResolutionStrategy_getCandidateInstants_rdh | /**
* For tableservices like replacecommit and compaction commits this method also returns ingestion inflight commits.
*/
@Override
public Stream<HoodieInstant> getCandidateInstants(HoodieTableMetaClient metaClient, HoodieInstant currentInstant, Option<HoodieInstant>
lastSuccessfulInstant) {
HoodieActiveTimeline ... | 3.26 |
hudi_PreferWriterConflictResolutionStrategy_getCandidateInstantsForTableServicesCommits_rdh | /**
* To find which instants are conflicting, we apply the following logic
* Get both completed instants and ingestion inflight commits that have happened since the last successful write.
* We need to check for write conflicts since they may have mutated the same files
* that are being newly created by the current ... | 3.26 |
hudi_InLineFSUtils_length_rdh | /**
* Returns length of the block (embedded w/in the base file) identified by the given InlineFS path
*
* input: "inlinefs:/file1/s3a/?start_offset=20&length=40".
* output: 40
*/
public static long length(Path inlinePath) {
assertInlineFSPath(inlinePath);
String[] slices = inlinePath.toString().split("[?&=... | 3.26 |
hudi_InLineFSUtils_startOffset_rdh | /**
* Returns start offset w/in the base for the block identified by the given InlineFS path
*
* input: "inlinefs://file1/s3a/?start_offset=20&length=40".
* output: 20
*/
public static long startOffset(Path inlineFSPath) {
assertInlineFSPath(inlineFSPath);
String[] slices = inlineFSPath.toString().split("[... | 3.26 |
hudi_InLineFSUtils_getOuterFilePathFromInlinePath_rdh | /**
* InlineFS Path format:
* "inlinefs://path/to/outer/file/outer_file_scheme/?start_offset=start_offset>&length=<length>"
* <p>
* Outer File Path format:
* "outer_file_scheme://path/to/outer/file"
* <p>
* Example
* Input: "inlinefs://file1/s3a/?start_offset=20&length=40".
* Output: "s3a://file1"
*
* @param... | 3.26 |
hudi_InLineFSUtils_getInlineFilePath_rdh | /**
* Get the InlineFS Path for a given schema and its Path.
* <p>
* Examples:
* Input Path: s3a://file1, origScheme: file, startOffset = 20, length = 40
* Output: "inlinefs://file1/s3a/?start_offset=20&length=40"
*
* @param outerPath
* The outer file Path
* @param origScheme
* The file schema
* @param i... | 3.26 |
hudi_IncrSourceCloudStorageHelper_fetchFileData_rdh | /**
*
* @param filepaths
* Files from which to fetch data
* @return Data in the given list of files, as a Spark DataSet
*/
public static Option<Dataset<Row>> fetchFileData(SparkSession spark, List<String> filepaths, TypedProperties props, String fileFormat) {
if (filepaths.isEmpty()) {
return Option... | 3.26 |
hudi_BoundedFsDataInputStream_getFileLength_rdh | /* Return the file length */
private long getFileLength() throws IOException {
if (fileLen == (-1L)) {
fileLen = f0.getContentSummary(file).getLength();
}
return fileLen;
} | 3.26 |
hudi_HiveSchemaUtils_toHiveTypeInfo_rdh | /**
* Convert Flink DataType to Hive TypeInfo. For types with a precision parameter, e.g.
* timestamp, the supported precisions in Hive and Flink can be different. Therefore the
* conversion will fail for those types if the precision is not supported by Hive and
* checkPrecision is true.
*
* @param dataType
* ... | 3.26 |
hudi_HiveSchemaUtils_createHiveColumns_rdh | /**
* Create Hive columns from Flink table schema.
*/
private static List<FieldSchema> createHiveColumns(TableSchema schema) {
final DataType dataType = schema.toPersistedRowDataType();
final RowType rowType = ((RowType) (dataType.getLogicalType()));
final String[] fieldNames = rowType.getFieldNames().toArray(new St... | 3.26 |
hudi_HiveSchemaUtils_getFieldNames_rdh | /**
* Get field names from field schemas.
*/
public static List<String> getFieldNames(List<FieldSchema> fieldSchemas) {
return fieldSchemas.stream().map(FieldSchema::getName).collect(Collectors.toList());
} | 3.26 |
hudi_HiveSchemaUtils_splitSchemaByPartitionKeys_rdh | /**
* Split the field schemas by given partition keys.
*
* @param fieldSchemas
* The Hive field schemas.
* @param partitionKeys
* The partition keys.
* @return The pair of (regular columns, partition columns) schema fields
*/
public static Pair<List<FieldSchema>, List<FieldSchema>> splitSchemaByPartition... | 3.26 |
hudi_HiveSchemaUtils_toFlinkType_rdh | /**
* Convert Hive data type to a Flink data type.
*
* @param hiveType
* a Hive data type
* @return the corresponding Flink data type
*/
public static DataType toFlinkType(TypeInfo
hiveType) {
checkNotNull(hiveType, "hiveType cannot be null");
switch (hiveType.getCategory()) {
case PRIMITIVE :
... | 3.26 |
hudi_BloomFilterFactory_createBloomFilter_rdh | /**
* A Factory class to generate different versions of {@link BloomFilter}.
*/ public class BloomFilterFactory {
/**
* Creates a new {@link BloomFilter} with the given args.
*
* @param numEntries
* total number of entries
* @param errorRate
* max allowed error rate
* @par... | 3.26 |
hudi_AvroSchemaUtils_m0_rdh | /**
* Passed in {@code Union} schema and will try to resolve the field with the {@code fieldSchemaFullName}
* w/in the union returning its corresponding schema
*
* @param schema
* target schema to be inspected
* @param fieldSchemaFullName
* target field-name to be looked up w/in the union
* @return schema o... | 3.26 |
hudi_AvroSchemaUtils_getAvroRecordQualifiedName_rdh | /**
* Generates fully-qualified name for the Avro's schema based on the Table's name
*
* NOTE: PLEASE READ CAREFULLY BEFORE CHANGING
* This method should not change for compatibility reasons as older versions
* of Avro might be comparing fully-qualified names rather than just the record
* names
... | 3.26 |
hudi_AvroSchemaUtils_isAtomicSchemasCompatibleEvolution_rdh | /**
* Establishes whether {@code newReaderSchema} is compatible w/ {@code prevWriterSchema}, as
* defined by Avro's {@link AvroSchemaCompatibility}.
* {@code newReaderSchema} is considered compatible to {@code prevWriterSchema}, iff data written using {@code prevWriterSchema}
* could be read by {@code newReaderSche... | 3.26 |
hudi_AvroSchemaUtils_isSchemaCompatible_rdh | /**
* Establishes whether {@code newSchema} is compatible w/ {@code prevSchema}, as
* defined by Avro's {@link AvroSchemaCompatibility}.
* From avro's compatability standpoint, prevSchema is writer schema and new schema is reader schema.
* {@code newSchema} is considered compatible to {@code prevSchema}, iff data w... | 3.26 |
hudi_AvroSchemaUtils_canProject_rdh | /**
* Check that each field in the prevSchema can be populated in the newSchema except specified columns
*
* @param prevSchema
* prev schema.
* @param newSchema
* new schema
* @return true if prev schema is a projection of new schema.
*/
public static boolean canProject(Schema prevSchema, Schema newSchema, ... | 3.26 |
hudi_AvroSchemaUtils_isValidEvolutionOf_rdh | /**
* Validate whether the {@code targetSchema} is a valid evolution of {@code sourceSchema}.
* Basically {@link #isCompatibleProjectionOf(Schema, Schema)} but type promotion in the
* opposite direction
*/
public static boolean isValidEvolutionOf(Schema sourceSchema, Schema targetSchema) {return (sourceSchema.getTy... | 3.26 |
hudi_AvroSchemaUtils_isStrictProjectionOf_rdh | /**
* Validate whether the {@code targetSchema} is a strict projection of {@code sourceSchema}.
*
* Schema B is considered a strict projection of schema A iff
* <ol>
* <li>Schemas A and B are equal, or</li>
* <li>Schemas A and B are array schemas and element-type of B is a strict projection
* of the elemen... | 3.26 |
hudi_AvroSchemaUtils_isCompatibleProjectionOf_rdh | /**
* Validate whether the {@code targetSchema} is a "compatible" projection of {@code sourceSchema}.
* Only difference of this method from {@link #isStrictProjectionOf(Schema, Schema)} is
* the fact that it allows some legitimate type promotions (like {@code int -> long},
* {@code decimal(3, 2) -> decimal(5, 2)}, ... | 3.26 |
hudi_AvroSchemaUtils_checkSchemaCompatible_rdh | /**
* Checks whether writer schema is compatible with table schema considering {@code AVRO_SCHEMA_VALIDATE_ENABLE}
* and {@code SCHEMA_ALLOW_AUTO_EVOLUTION_COLUMN_DROP} options.
* To avoid collision of {@code SCHEMA_ALLOW_AUTO_EVOLUTION_COLUMN_DROP} and {@code DROP_PARTITION_COLUMNS}
* partition column names should... | 3.26 |
hudi_AvroSchemaUtils_createNullableSchema_rdh | /**
* Creates schema following Avro's typical nullable schema definition: {@code Union(Schema.Type.NULL, <NonNullType>)},
* wrapping around provided target non-null type
*/
public static Schema createNullableSchema(Schema.Type avroType) {
return createNullableSchema(Schema.create(avroType));
} | 3.26 |
hudi_AvroSchemaUtils_resolveNullableSchema_rdh | /**
* Resolves typical Avro's nullable schema definition: {@code Union(Schema.Type.NULL, <NonNullType>)},
* decomposing union and returning the target non-null type
*/
public static Schema resolveNullableSchema(Schema schema) {
if (schema.getType() != Type.UNION) {
return schema;
}
List<Schema> i... | 3.26 |
hudi_AvroSchemaUtils_containsFieldInSchema_rdh | /**
* Returns true in case when schema contains the field w/ provided name
*/
public static boolean containsFieldInSchema(Schema schema, String fieldName) {
try {
Schema.Field field = schema.getField(fieldName);
return field != null;
} catch (Exception e) {
return false;
}
} | 3.26 |
hudi_AvroSchemaUtils_isNullable_rdh | /**
* Returns true in case provided {@link Schema} is nullable (ie accepting null values),
* returns false otherwise
*/
public static boolean isNullable(Schema schema) {
if (schema.getType() != Type.UNION) {
return false;
}
List<Schema> innerTypes = schema.getTypes();
return (innerTypes.s... | 3.26 |
hudi_SourceCommitCallback_onCommit_rdh | /**
* Performs some action on successful Hudi commit like committing offsets to Kafka.
*
* @param lastCkptStr
* last checkpoint string.
*/default void
onCommit(String lastCkptStr) {
} | 3.26 |
hudi_InternalFilter_write_rdh | /**
* Serialize the fields of this object to <code>out</code>.
*
* @param out
* <code>DataOuput</code> to serialize this object into.
* @throws IOException
*/
public void write(DataOutput out) throws IOException {
out.writeInt(VERSION);
out.writeInt(this.nbHash);out.writeByte(this.hashType);
out.wr... | 3.26 |
hudi_InternalFilter_readFields_rdh | /**
* Deserialize the fields of this object from <code>in</code>.
*
* <p>For efficiency, implementations should attempt to re-use storage in the
* existing object where possible.</p>
*
* @param in
* <code>DataInput</code> to deseriablize this object from.
* @throws IOExcept... | 3.26 |
hudi_InternalFilter_add_rdh | /**
* Adds an array of keys to <i>this</i> filter.
*
* @param keys
* The array of keys.
*/
public void add(Key[] keys) {
if (keys == null) {
throw new IllegalArgumentException("Key[] may not be null");}
for (Key key : keys) {
add(key);
}
} | 3.26 |
hudi_HiveSchemaUtil_convertMapSchemaToHiveFieldSchema_rdh | /**
*
* @param schema
* Intermediate schema in the form of Map<String,String>
* @param syncConfig
* @return List of FieldSchema objects derived from schema without the partition fields as the HMS api expects them as different arguments for alter table commands.
* @throws IOException
*/
public static List<Field... | 3.26 |
hudi_HiveSchemaUtil_convertField_rdh | /**
* Convert one field data type of parquet schema into an equivalent Hive schema.
*
* @param parquetType
* : Single parquet field
* @return : Equivalent sHive schema
*/
private static String convertField(final Type parquetType, boolean supportTimestamp, boolean doFormat) {
StringBuilder field = new StringBuil... | 3.26 |
hudi_HiveSchemaUtil_parquetSchemaToMapSchema_rdh | /**
* Returns schema in Map<String,String> form read from a parquet file.
*
* @param messageType
* : parquet Schema
* @param supportTimestamp
* @param doFormat
* : This option controls whether schema will have spaces in the value part of the schema map. This is required because spaces in complex schema trips... | 3.26 |
hudi_HiveSchemaUtil_createHiveStruct_rdh | /**
* Return a 'struct' Hive schema from a list of Parquet fields.
*
* @param parquetFields
* : list of parquet fields
* @return : Equivalent 'struct' Hive schema
*/
private static String createHiveStruct(List<Type> parquetFields, boolean supportTimestamp, boolean doFormat) {
StringBuilder struct = new StringB... | 3.26 |
hudi_HiveSchemaUtil_createHiveMap_rdh | /**
* Create a 'Map' schema from Parquet map field.
*/
private static String createHiveMap(String keyType, String valueType, boolean doFormat) {
return ((((doFormat ? "MAP< " : "MAP<") + keyType) + (doFormat ? ", " : ",")) + valueType) + ">";
} | 3.26 |
hudi_HiveSchemaUtil_m0_rdh | /**
* Get the schema difference between the storage schema and hive table schema.
*/
public static SchemaDifference m0(MessageType storageSchema, Map<String, String> tableSchema, List<String> partitionKeys) {
return getSchemaDifference(storageSchema, tableSchema, partitionKeys, false);
} | 3.26 |
hudi_HiveSchemaUtil_convertParquetSchemaToHiveFieldSchema_rdh | /**
* Returns equivalent Hive table Field schema read from a parquet file.
*
* @param messageType
* : Parquet Schema
* @return : Hive Table schema read from parquet file List[FieldSchema] without partitionField
*/
public static List<FieldSchema> convertParquetSchemaToHiveFieldSchema(MessageType messageType, Hiv... | 3.26 |
hudi_HiveSchemaUtil_createHiveArray_rdh | /**
* Create an Array Hive schema from equivalent parquet list type.
*/
private static String createHiveArray(Type elementType, String elementName, boolean supportTimestamp, boolean
doFormat) {
StringBuilder array = new StringBuilder();
array.append(doFormat ? "ARRAY< " : "ARRAY<");
if (elementType.isPrimitive()) {
... | 3.26 |
hudi_ClientIds_getHeartbeatFolderPath_rdh | // Utilities
// -------------------------------------------------------------------------
private String getHeartbeatFolderPath(String basePath) {
return (((basePath + Path.SEPARATOR) + AUXILIARYFOLDER_NAME) + Path.SEPARATOR) + HEARTBEAT_FOLDER_NAME;
} | 3.26 |
hudi_ClientIds_builder_rdh | /**
* Returns the builder.
*/
public static Builder builder() {
return new Builder();
} | 3.26 |
hudi_ClientIds_getClientId_rdh | /**
* Returns the client id from the heartbeat file path, the path name follows
* the naming convention: _, _1, _2, ... _N.
*/
private static String getClientId(Path path) {
String[] splits = path.getName().split(HEARTBEAT_FILE_NAME_PREFIX);
return splits.length > 1 ? splits[1] : INIT_CLIENT_ID;
} | 3.26 |
hudi_FlatteningTransformer_apply_rdh | /**
* Configs supported.
*/
@Override
public Dataset<Row> apply(JavaSparkContext jsc, SparkSession sparkSession, Dataset<Row> rowDataset, TypedProperties properties) {
try
{ // tmp table name doesn't like dashes
String tmpTable = TMP_TABLE.concat(UUID.randomUUID().toString... | 3.26 |
hudi_CleanPlanner_getEarliestCommitToRetain_rdh | /**
* Returns the earliest commit to retain based on cleaning policy.
*/
public Option<HoodieInstant> getEarliestCommitToRetain() {
return CleanerUtils.getEarliestCommitToRetain(hoodieTable.getMetaClient().getActiveTimeline().getCommitsAndCompactionTimeline(),
config.getCleanerPolicy(), config.getCleanerCommi... | 3.26 |
hudi_CleanPlanner_getSavepointedDataFiles_rdh | /**
* Get the list of data file names savepointed.
*/public Stream<String> getSavepointedDataFiles(String savepointTime) {
if (!hoodieTable.getSavepointTimestamps().contains(savepointTime)) {
throw new HoodieSavepointException(("Could not get data files for savepoint " + savepointTime) + ". No such save... | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.