name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
hudi_TimelineUtils_getAllExtraMetadataForKey_rdh | /**
* Get extra metadata for specified key from all active commit/deltacommit instants.
*/
public static Map<String, Option<String>> getAllExtraMetadataForKey(HoodieTableMetaClient metaClient, String extraMetadataKey) {
return metaClient.getCommitsTimeline().filterCompletedInstants().getReverseOrderedInstants().coll... | 3.26 |
hudi_TimelineUtils_getExtraMetadataFromLatest_rdh | /**
* Get extra metadata for specified key from latest commit/deltacommit/replacecommit(eg. insert_overwrite) instant.
*/
public static Option<String> getExtraMetadataFromLatest(HoodieTableMetaClient metaClient, String extraMetadataKey) {
return // exclude clustering commits for returning user stored extra metadata
... | 3.26 |
hudi_TimelineUtils_handleHollowCommitIfNeeded_rdh | /**
* Handles hollow commit as per {@link HoodieCommonConfig#INCREMENTAL_READ_HANDLE_HOLLOW_COMMIT}
* and return filtered or non-filtered timeline for incremental query to run against.
*/
public static HoodieTimeline handleHollowCommitIfNeeded(HoodieTimeline completedCommitTimeline, HoodieTableMetaClient metaClient,... | 3.26 |
hudi_TimelineUtils_getCommitsTimelineAfter_rdh | /**
* Returns a Hudi timeline with commits after the given instant time (exclusive).
*
* @param metaClient
* {@link HoodieTableMetaClient} instance.
* @param exclusiveStartInstantTime
* Start instant time (exclusive).
* @param lastMaxCompletionTime
* Last commit max completion time synced
* @return Hudi ... | 3.26 |
hudi_TimelineUtils_getAffectedPartitions_rdh | /**
* Returns partitions that have been modified including internal operations such as clean in the passed timeline.
*/
public static List<String> getAffectedPartitions(HoodieTimeline timeline) {
return timeline.filterCompletedInstants().getInstantsAsStream().flatMap(s -> {
switch ... | 3.26 |
hudi_TimelineUtils_getExtraMetadataFromLatestIncludeClustering_rdh | /**
* Get extra metadata for specified key from latest commit/deltacommit/replacecommit instant including internal commits
* such as clustering.
*/
public static Option<String> getExtraMetadataFromLatestIncludeClustering(HoodieTableMetaClient metaClient, String extraMetadataKey) {
return metaClient.getCommitsTimeli... | 3.26 |
hudi_KeyRangeLookupTree_getMatchingIndexFiles_rdh | /**
* Fetches all the matching index files where the key could possibly be present.
*
* @param lookupKey
* the key to be searched for
* @return the {@link Set} of matching index file names
*/
Set<String> getMatchingIndexFiles(String lookupKey) {Set<String> v0 = new HashSet<>();
m1(getRoot(), lookupKey, v0);... | 3.26 |
hudi_KeyRangeLookupTree_getRoot_rdh | /**
*
* @return the root of the tree. Could be {@code null}
*/
public KeyRangeNode getRoot() {
return root;
} | 3.26 |
hudi_KeyRangeLookupTree_insert_rdh | /**
* Inserts a new {@link KeyRangeNode} to this look up tree.
*
* If no root exists, make {@code newNode} as the root and return the new root.
*
* If current root and newNode matches with min record key and max record key, merge two nodes. In other words, add
* files from {@code newNode} to current root. Return ... | 3.26 |
hudi_KeyRangeLookupTree_m1_rdh | /**
* Fetches all the matching index files where the key could possibly be present.
*
* @param root
* refers to the current root of the look up tree
* @param lookupKey
* the key to be searched for
... | 3.26 |
hudi_KeyRangeLookupTree_m0_rdh | /**
* Inserts a new {@link KeyRangeNode} to this look up tree.
*
* @param newNode
* the new {@link KeyRangeNode} to be inserted
*/
void m0(KeyRangeNode newNode) {root = insert(getRoot(), newNode);
} | 3.26 |
hudi_RealtimeSplit_getDeltaLogPaths_rdh | /**
* Return Log File Paths.
*
* @return */
default List<String> getDeltaLogPaths() {
return getDeltaLogFiles().stream().map(entry -> entry.getPath().toString()).collect(Collectors.toList());
} | 3.26 |
hudi_FailSafeConsistencyGuard_waitForFilesVisibility_rdh | /**
* Helper function to wait for all files belonging to single directory to appear.
*
* @param dirPath
* Dir Path
* @param files
* Files to appear/disappear
* @param event
* Appear/Disappear
* @throws TimeoutException
*/
public void waitForFilesVisibility(String dirPath, List<String> files, FileVisibil... | 3.26 |
hudi_FailSafeConsistencyGuard_checkFileVisibility_rdh | /**
* Helper to check of file visibility.
*
* @param filePath
* File Path
* @param visibility
* Visibility
* @return true (if file visible in Path), false (otherwise)
* @throws IOException
* -
*/
protected boolean checkFileVisibility(Path filePath, FileVisibility visibility) throws IOException {
try... | 3.26 |
hudi_FailSafeConsistencyGuard_waitForFileVisibility_rdh | /**
* Helper function to wait till file either appears/disappears.
*
* @param filePath
* File Path
*/
private void waitForFileVisibility(Path filePath, FileVisibility visibility) throws TimeoutException {
long waitMs = consistencyGuardConfig.getInitialConsistencyCheckIntervalMs();
int attempt = 0;
wh... | 3.26 |
hudi_FailSafeConsistencyGuard_getFilesWithoutSchemeAndAuthority_rdh | /**
* Generate file names without scheme and authority.
*
* @param files
* list of files of interest.
* @return the filenames without scheme and authority.
*/
protected List<String> getFilesWithoutSchemeAndAuthority(List<String> files) {
return files.stream().map(f -> Pa... | 3.26 |
hudi_FailSafeConsistencyGuard_retryTillSuccess_rdh | /**
* Retries the predicate for configurable number of times till we the predicate returns success.
*
* @param dir
* directory of interest in which list of files are checked for visibility
* @param files
* List of files to check for visibility
* @param event
* {@link org.apache.hudi.common.fs.ConsistencyG... | 3.26 |
hudi_FailSafeConsistencyGuard_checkFilesVisibility_rdh | /**
* Helper to check for file visibility based on {@link org.apache.hudi.common.fs.ConsistencyGuard.FileVisibility} event.
*
* @param retryNum
* retry attempt count.
* @param dir
* directory of interest in which list of files are checked for visibility
* @param files
* List of files to check for visibili... | 3.26 |
hudi_BaseRestoreActionExecutor_writeToMetadata_rdh | /**
* Update metadata table if available. Any update to metadata table happens within data table lock.
*
* @param restoreMetadata
* instance of {@link HoodieRestoreMetadata} to be applied to metadata.
*/
private void writeToMetadata(HoodieRestoreMetadata restoreMetadata, HoodieInstant restoreInflightInstant) {
... | 3.26 |
hudi_FlinkConcatHandle_write_rdh | /**
* Write old record as is w/o merging with incoming record.
*/
@Override
public void write(HoodieRecord oldRecord) {
Schema oldSchema = (config.populateMetaFields()) ? writeSchemaWithMetaFields : writeSchema;
String key = oldRecord.getRecordKey(oldSchema, keyGeneratorOpt);
try {
fileWriter.writ... | 3.26 |
hudi_AvroConvertor_withKafkaFieldsAppended_rdh | /**
* this.schema is required to have kafka offsets for this to work
*/
public GenericRecord
withKafkaFieldsAppended(ConsumerRecord consumerRecord) {
m0();
GenericRecord recordValue = ((GenericRecord) (consumerRecord.value()));
GenericRecordBuilder recordBuilder = new GenericRecordBuilder(this.schema);... | 3.26 |
hudi_SparkBootstrapCommitActionExecutor_metadataBootstrap_rdh | /**
* Perform Metadata Bootstrap.
*
* @param partitionFilesList
* List of partitions and files within that partitions
*/
protected Option<HoodieWriteMetadata<HoodieData<WriteStatus>>> metadataBootstrap(List<Pair<String, List<HoodieFileStatus>>> partitionFilesList) {
if ((null == partitionFilesList) || part... | 3.26 |
hudi_SparkBootstrapCommitActionExecutor_fullBootstrap_rdh | /**
* Perform Full Bootstrap.
*
* @param partitionFilesList
* List of partitions and files within that partitions
*/
protected Option<HoodieWriteMetadata<HoodieData<WriteStatus>>> fullBootstrap(List<Pair<String, List<HoodieFileStatus>>> partitionFilesList) {
if ((null == partitionFilesList) || partitionFile... | 3.26 |
hudi_SparkBootstrapCommitActionExecutor_listAndProcessSourcePartitions_rdh | /**
* Return Bootstrap Mode selections for partitions listed and figure out bootstrap Schema.
*
* @return * @throws IOException
*/
private Map<BootstrapMode, List<Pair<String, List<HoodieFileStatus>>>> listAndProcessSourcePartitions() throws IOException {
List<Pair<String, List<HoodieFileStatus>>> folders = Bo... | 3.26 |
hudi_BaseTableMetadata_checkForSpuriousDeletes_rdh | /**
* Handle spurious deletes. Depending on config, throw an exception or log a warn msg.
*/
private void checkForSpuriousDeletes(HoodieMetadataPayload metadataPayload, String partitionName) {
if (!metadataPayload.getDeletions().isEmpty()) {
if (metadataConfig.ignoreSpuriousDeletes()) {
LOG.warn((((("Meta... | 3.26 |
hudi_BaseTableMetadata_fetchAllPartitionPaths_rdh | /**
* Returns a list of all partitions.
*/
protected List<String> fetchAllPartitionPaths() {
HoodieTimer timer = HoodieTimer.start();
Option<HoodieRecord<HoodieMetadataPayload>> recordOpt = getRecordByKey(RECORDKEY_PARTITION_LIST, MetadataPartitionType.FILES.getPartitionPath());
metrics.ifPresent(m -> m.... | 3.26 |
hudi_BaseTableMetadata_getAllPartitionPaths_rdh | /**
* Return the list of partitions in the dataset.
* <p>
* If the Metadata Table is enabled, the listing is retrieved from the stored metadata. Otherwise, the list of
* partitions is retrieved directly from the underlying {@code FileSystem}.
* <p>
* On any errors retrieving the listing from the metadata, default... | 3.26 |
hudi_BaseTableMetadata_readRecordIndex_rdh | /**
* Reads record keys from record-level index.
* <p>
* If the Metadata Table is not enabled, an exception is thrown to distinguish this from the absence of the key.
*
* @param recordKeys
* The list of record keys to read
*/
@Overridepublic Map<String, HoodieRecordGlobalLocation> readRecordIndex(List<String>... | 3.26 |
hudi_BaseTableMetadata_getAllFilesInPartition_rdh | /**
* Return the list of files in a partition.
* <p>
* If the Metadata Table is enabled, the listing is retrieved from the stored metadata. Otherwise, the list of
* partitions is retrieved directly from the underlying {@code FileSystem}.
* <p>
* On any errors retrieving the listing from the metadata, defaults to ... | 3.26 |
hudi_BaseTableMetadata_fetchAllFilesInPartition_rdh | /**
* Return all the files from the partition.
*
* @param partitionPath
* The absolute path of the partition
*/
FileStatus[] fetchAllFilesInPartition(Path partitionPath) throws IOException {
String relativePartitionPath = FSUtils.getRelativePartitionPath(dataBasePath.get(), partit... | 3.26 |
hudi_LSMTimelineWriter_write_rdh | /**
* Writes the list of active actions into the timeline.
*
* @param activeActions
* The active actions
* @param preWriteCallback
* The callback before writing each action
* @param exceptionHandler
* The handle for exception
*/
public void write(List<ActiveAction> activeActions, Option<Consumer<Active... | 3.26 |
hudi_LSMTimelineWriter_updateManifest_rdh | /**
* Updates a manifest file.
*
* <p>4 steps:
* <ol>
* <li>read the latest manifest version file;</li>
* <li>read the latest manifest file for valid files;</li>
* <li>remove files to the existing file list from step2;</li>
* <li>add this new file to the existing file list from step2.</li>
* </ol>
*
... | 3.26 |
hudi_LSMTimelineWriter_compactedFileName_rdh | /**
* Returns a new file name.
*/
@VisibleForTestingpublic static String compactedFileName(List<String> files) {
String minInstant = files.stream().map(LSMTimeline::getMinInstantTime).min(Comparator.naturalOrder()).get();
String maxInstant = files.stream().map(LSMTimeline::getMaxInstantTime).max(Comparator.... | 3.26 |
hudi_LSMTimelineWriter_getOrCreateWriterConfig_rdh | /**
* Get or create a writer config for parquet writer.
*/
private HoodieWriteConfig getOrCreateWriterConfig() {
if (this.writeConfig == null) {
this.writeConfig = HoodieWriteConfig.newBuilder().withProperties(this.config.getProps()).... | 3.26 |
hudi_LSMTimelineWriter_clean_rdh | /**
* Checks whether there is any unfinished compaction operation.
*
* @param context
* HoodieEngineContext used for parallelize to delete obsolete files if necessary.
*/
public void clean(HoodieEngineContext context, int compactedVersions) throws IOException {
// if there are more than 3 version of snapshot... | 3.26 |
hudi_LSMTimelineWriter_getCandidateFiles_rdh | /**
* Returns at most {@code filesBatch} number of source files
* restricted by the gross file size by 1GB.
*/
private List<String> getCandidateFiles(List<HoodieLSMTimelineManifest.LSMFileEntry> files, int filesBatch) throws IOException {
List<String> candidates = new ArrayList<>();
long totalFileLen = 0L;... | 3.26 |
hudi_LSMTimelineWriter_newFileName_rdh | /**
* Returns a new file name.
*/
private static String newFileName(String minInstant, String maxInstant, int layer) {
return String.format("%s_%s_%d%s", minInstant, maxInstant, layer, HoodieFileFormat.PARQUET.getFileExtension());
} | 3.26 |
hudi_LSMTimelineWriter_compactAndClean_rdh | /**
* Compacts the small parquet files.
*
* <p>The parquet naming convention is:
*
* <pre>${min_instant}_${max_instant}_${level}.parquet</pre>
*
* <p>The 'min_instant' and 'max_instant' represent the instant time range of the parquet file.
* The 'level' represents the number of the level where the file is locat... | 3.26 |
hudi_SparkInternalSchemaConverter_convertDoubleType_rdh | /**
* Convert double type to other Type.
* Now only support Double -> Decimal/String
* TODO: support more types
*/
private static boolean convertDoubleType(WritableColumnVector oldV, WritableColumnVector newV, DataType newType, int len) {
if ((newType instanceof DecimalType) || (newType instanceof StringType)) ... | 3.26 |
hudi_SparkInternalSchemaConverter_collectColNamesFromSparkStruct_rdh | /**
* Collect all the leaf nodes names.
*
* @param sparkSchema
* a spark schema
* @return leaf nodes full names.
*/public static List<String> collectColNamesFromSparkStruct(StructType sparkSchema) {
List<String> result = new ArrayList<>();
collectColNamesFromStructType(sparkSchema, new LinkedList<>(),... | 3.26 |
hudi_SparkInternalSchemaConverter_m0_rdh | /**
* Convert float type to other Type.
* Now only support float -> double/String/Decimal
* TODO: support more types
*/
private static boolean m0(WritableColumnVector oldV, WritableColumnVector newV, DataType newType, int len) {
if (((newType instanceof DoubleType) || (newType instanceof StringType)) || (newTyp... | 3.26 |
hudi_SparkInternalSchemaConverter_convertDecimalType_rdh | /**
* Convert decimal type to other Type.
* Now only support Decimal -> Decimal/String
* TODO: support more types
*/
private static boolean convertDecimalType(WritableColumnVector oldV, WritableColumnVector newV, DataType newType, int len) {
DataType oldType = oldV.dataType();
if ((newType instanceof
... | 3.26 |
hudi_SparkInternalSchemaConverter_convertDateType_rdh | /**
* Convert date type to other Type.
* Now only support Date -> String
* TODO: support more types
*/
private static boolean convertDateType(WritableColumnVector oldV, WritableColumnVector newV, DataType newType, int len) {
if (newType instanceof StringType) {
for (int i = 0; i < len; i++) {if (oldV... | 3.26 |
hudi_SparkInternalSchemaConverter_convertStringType_rdh | /**
* Convert String type to other Type.
* Now only support String -> Decimal/Date.
* Notice: This convert maybe failed!!!
* TODO: support more types
*/
private static boolean convertStringType(WritableColumnVector oldV, WritableColumnVector newV, DataType newType, int len) {
if ((newType instanceof DateType)... | 3.26 |
hudi_SparkInternalSchemaConverter_convertAndPruneStructTypeToInternalSchema_rdh | /**
* Convert Spark schema to Hudi internal schema, and prune fields.
* Fields without IDs are kept and assigned fallback IDs.
*
* @param sparkSchema
* a pruned spark schema
* @param originSchema
* a internal schema for hoodie table
* @return a pruned internal schema for the provided spark schema
*/
public... | 3.26 |
hudi_SparkInternalSchemaConverter_convertIntLongType_rdh | /**
* Convert Int/long type to other Type.
* Now only support int/long -> long/float/double/string/Decimal
* TODO: support more types
*/
private static boolean convertIntLongType(WritableColumnVector oldV, WritableColumnVector newV, DataType newType, int len) {
boolean isInt = oldV.dataType() instanceof Integer... | 3.26 |
hudi_HoodieCDCUtils_cdcRecord_rdh | /**
* Build the cdc record when `hoodie.table.cdc.supplemental.logging.mode` is {@link HoodieCDCSupplementalLoggingMode#OP_KEY_ONLY}.
*/
public static Record cdcRecord(Schema cdcSchema, String op, String recordKey) {
GenericData.Record record = new GenericData.Record(cdcSchema);
record.put(f0, op);record.put(CDC_RE... | 3.26 |
hudi_SecondaryIndexUtils_fromJsonString_rdh | /**
* Parse secondary index str to List<HoodieSecondaryIndex>
*
* @param jsonStr
* Secondary indexes with json format
* @return List<HoodieSecondaryIndex>
*/
public static List<HoodieSecondaryIndex> fromJsonString(String jsonStr) {
try {
return SecondaryIndexUtils... | 3.26 |
hudi_SecondaryIndexUtils_getSecondaryIndexes_rdh | /**
* Get secondary index metadata for this table
*
* @param metaClient
* HoodieTableMetaClient
* @return HoodieSecondaryIndex List
*/
public static Option<List<HoodieSecondaryIndex>> getSecondaryIndexes(HoodieTableMetaClient metaClient) {
Option<String> indexesMetadata = metaClient.getTableConfig().getSec... | 3.26 |
hudi_BulkInsertPartitioner_getWriteHandleFactory_rdh | /**
* Return write handle factory for the given partition.
*
* @param partitionId
* data partition
* @return */
default Option<WriteHandleFactory> getWriteHandleFactory(int partitionId) {
return Option.empty();
} | 3.26 |
hudi_BulkInsertPartitioner_getFileIdPfx_rdh | /**
* Return file group id prefix for the given data partition.
* By default, return a new file group id prefix, so that incoming records will route to a fresh new file group
*
* @param partitionId
* data partition
* @return */
default String getFileIdPfx(int partitionId) {
return
FSUtils.createNewFile... | 3.26 |
hudi_AbstractHoodieLogRecordReader_reconcileSpuriousBlocksAndGetValidOnes_rdh | /**
* There could be spurious log blocks due to spark task retries. So, we will use BLOCK_SEQUENCE_NUMBER in the log block header to deduce such spurious log blocks and return
* a deduped set of log blocks.
*
* @param allValidLogBlocks
* all valid log blocks parsed so far.
* @param blockSequenceMapPerCommit
* ... | 3.26 |
hudi_AbstractHoodieLogRecordReader_scanInternal_rdh | /**
*
* @param keySpecOpt
* specifies target set of keys to be scanned
* @param skipProcessingBlocks
* controls, whether (delta) blocks have to actually be processed
*/
protected final void scanInternal(Option<KeySpec> keySpecOpt, boolean skipProcessingBlocks) {synchronized(this) {
if (enableOptimized... | 3.26 |
hudi_AbstractHoodieLogRecordReader_processDataBlock_rdh | /**
* Iterate over the GenericRecord in the block, read the hoodie key and partition path and call subclass processors to
* handle it.
*/
private void processDataBlock(HoodieDataBlock dataBlock, Option<KeySpec>
keySpecOpt) throws Exception {
checkState(partitionNameOverrideOpt.isPresent() || partitionPathFieldOpt.is... | 3.26 |
hudi_AbstractHoodieLogRecordReader_updateBlockSequenceTracker_rdh | /**
* Updates map tracking block seq no.
* Here is the map structure.
* Map<String, Map<Long, List<Pair<Integer, HoodieLogBlock>>>> blockSequenceMapPerCommit
* Key: Commit time.
* Value: Map<Long, List<Pair<Integer, HoodieLogBlock>>>>
* Value refers to a Map of different attempts for the commit of interest. Lis... | 3.26 |
hudi_AbstractHoodieLogRecordReader_composeEvolvedSchemaTransformer_rdh | /**
* Get final Read Schema for support evolution.
* step1: find the fileSchema for current dataBlock.
* step2: determine whether fileSchema is compatible with the final read internalSchema.
* step3: merge fileSchema and read internalSchema to produce final read schema.
*
* @param dataBlock
* current processed... | 3.26 |
hudi_AbstractHoodieLogRecordReader_isNewInstantBlock_rdh | /**
* Checks if the current logblock belongs to a later instant.
*/
private boolean isNewInstantBlock(HoodieLogBlock logBlock) {
return ((currentInstantLogBlocks.size() >
0) && (currentInstantLogBlocks.peek().getBlockType() != CORRUPT_BLOCK)) && (!logBlock.getLogBlockHeader().get(INSTANT_TIME).contentEquals(currentIn... | 3.26 |
hudi_AbstractHoodieLogRecordReader_processQueuedBlocksForInstant_rdh | /**
* Process the set of log blocks belonging to the last instant which is read fully.
*/
private void processQueuedBlocksForInstant(Deque<HoodieLogBlock> logBlocks, int numLogFilesSeen, Option<KeySpec> keySpecOpt) throws Exception {
while (!logBlocks.isEmpty()) {
LOG.info("Number of remaining logblocks to merge "... | 3.26 |
hudi_Transient_eager_rdh | /**
* Creates instance of {@link Transient} by eagerly setting it to provided {@code value},
* while given {@code initializer} will be used to re-instantiate the value after original
* one being dropped during serialization/deserialization cycle
*/
public static <T> Transient<T> eager(T value, SerializableSupplier<... | 3.26 |
hudi_Transient_lazy_rdh | /**
* Creates instance of {@link Transient} by lazily executing provided {@code initializer},
* to instantiate value of type {@link T}. Same initializer will be used to re-instantiate
* the value after original one being dropped during serialization/deserialization cycle
*/
public static <T> Transient<T> lazy(Seria... | 3.26 |
hudi_HoodieRowDataCreation_create_rdh | /**
* Creates a {@link AbstractHoodieRowData} instance based on the given configuration.
*/
public static AbstractHoodieRowData create(String commitTime, String commitSeqNumber, String recordKey, String partitionPath, String fileName, RowData row, boolean withOperation, boolean
withMetaFields) {return withMetaFields
... | 3.26 |
hudi_InputPathHandler_parseInputPaths_rdh | /**
* Takes in the original InputPaths and classifies each of them into incremental, snapshot and
* non-hoodie InputPaths. The logic is as follows:
* 1. Check if an inputPath starts with the same basePath as any of the metadata basePaths we know
* 1a. If yes, this belongs to a Hoodie table that we already know a... | 3.26 |
hudi_SparkSQLQueryNode_execute_rdh | /**
* Method helps to execute a sparkSql query from a hive table.
*
* @param executionContext
* Execution context to perform this query.
* @param curItrCount
* current iteration count.
* @throws Exception
* will be thrown if ant error occurred
*/
@Override
public void execute(ExecutionContext executionCo... | 3.26 |
hudi_WriteProfiles_getCommitMetadataSafely_rdh | /**
* Returns the commit metadata of the given instant safely.
*
* @param tableName
* The table name
* @param basePath
* The table base path
* @param instant
* The hoodie instant
* @param timeline
* The timeline
* @return the commit metadata or empty if any error occurs
*/
public static Option<Hoodi... | 3.26 |
hudi_WriteProfiles_getCommitMetadata_rdh | /**
* Returns the commit metadata of the given instant.
*
* @param tableName
* The table name
* @param basePath
* The table base path
* @param instant
* The hoodie instant
* @param timeline
* The timeline
* @return the commit metadata
*/
public static HoodieCommitMetadata getCommitMetadata(String t... | 3.26 |
hudi_WriteProfiles_getFilesFromMetadata_rdh | /**
* Returns all the incremental write file statuses with the given commits metadata.
*
* @param basePath
* Table base path
* @param hadoopConf
* The hadoop conf
* @param metadataList
* The commit metadata list (should in ascending order)
* @param tableType
* The table type
* @param ignoreMissingFil... | 3.26 |
hudi_BaseRollbackActionExecutor_deleteInflightAndRequestedInstant_rdh | /**
* Delete Inflight instant if enabled.
*
* @param deleteInstant
* Enable Deletion of Inflight instant
* @param activeTimeline
* Hoodie active timeline
* @param instantToBeDeleted
* Instant to be deleted
*/
protected void deleteInflightAndRequestedInstant(boolean deleteInstant, HoodieActiveTimeline act... | 3.26 |
hudi_BaseRollbackActionExecutor_validateRollbackCommitSequence_rdh | /**
* Validate commit sequence for rollback commits.
*/
private void validateRollbackCommitSequence() {
// Continue to provide the same behavior if policy is EAGER (similar to pendingRollback logic). This is required
// since with LAZY rollback we support parallel writing which can allow a new inflight while ... | 3.26 |
hudi_BaseRollbackActionExecutor_executeRollback_rdh | /**
* Execute rollback and fetch rollback stats.
*
* @param instantToRollback
* instant to be rolled back.
* @param rollbackPlan
* instance of {@link HoodieRollbackPlan} for which rollback needs to be executed.
* @return list of {@link HoodieRollbackStat}s.
*/
protected List<HoodieRollbackStat> executeRollb... | 3.26 |
hudi_FlinkWriteHandleFactory_m0_rdh | /**
* Returns the write handle factory with given write config.
*/
public static <T, I,
K, O> Factory<T, I, K, O> m0(HoodieTableConfig tableConfig, HoodieWriteConfig writeConfig, boolean overwrite) {
if (overwrite) {
return CommitWriteHandleFactory.getInstance();
}
... | 3.26 |
hudi_CloudObjectsSelectorCommon_getCloudObjectMetadataPerPartition_rdh | /**
* Return a function that extracts filepaths from a list of Rows.
* Here Row is assumed to have the schema [bucket_name, filepath_relative_to_bucket, object_size]
*
* @param storageUrlSchemePrefix
* Eg: s3:// or gs://. The storage-provider-specific prefix to use within the URL.
* ... | 3.26 |
hudi_CloudObjectsSelectorCommon_checkIfFileExists_rdh | /**
* Check if file with given path URL exists
*
* @param storageUrlSchemePrefix
* Eg: s3:// or gs://. The storage-provider-specific prefix to use within the URL.
*/
private static boolean checkIfFileExists(String storageUrlSchemePrefix, String bucket, String filePathUrl, Configuration configuration)
{
t... | 3.26 |
hudi_CloudObjectsSelectorCommon_getUrlForFile_rdh | /**
* Construct a full qualified URL string to a cloud file from a given Row. Optionally check if the file exists.
* Here Row is assumed to have the schema [bucket_name, filepath_relative_to_bucket].
* The checkIfExists logic assumes that the relevant impl classes for the storageUrlSchemePrefix are already present
... | 3.26 |
hudi_BaseCommitActionExecutor_saveWorkloadProfileMetadataToInflight_rdh | /**
* Save the workload profile in an intermediate file (here re-using commit files) This is useful when performing
* rollback for MOR tables. Only updates are recorded in the workload profile metadata since updates to log blocks
* are unknown across batches Inserts (which are new parquet files) are rolled back base... | 3.26 |
hudi_BaseCommitActionExecutor_runPrecommitValidators_rdh | /**
* Check if any validators are configured and run those validations. If any of the validations fail, throws HoodieValidationException.
*/
protected void runPrecommitValidators(HoodieWriteMetadata<O> writeMetadata) {
if (StringUtils.isNullOrEmpty(config.getPreCommitValidators())) {
return;
}
... | 3.26 |
hudi_BaseCommitActionExecutor_getSchemaToStoreInCommit_rdh | /**
* By default, return the writer schema in Write Config for storing in commit.
*/
protected String getSchemaToStoreInCommit() {
return config.getSchema();
} | 3.26 |
hudi_BaseCommitActionExecutor_finalizeWrite_rdh | /**
* Finalize Write operation.
*
* @param instantTime
* Instant Time
* @param stats
* Hoodie Write Stat
*/
protected void finalizeWrite(String instantTime, List<HoodieWriteStat> stats, HoodieWriteMetadata<O> result) {
try {Instant start = Instant.now();
table.finalizeWrite(context, instantTime,... | 3.26 |
hudi_BaseCommitActionExecutor_validateWriteResult_rdh | /**
* Validate actions taken by clustering. In the first implementation, we validate at least one new file is written.
* But we can extend this to add more validation. E.g. number of records read = number of records written etc.
* We can also make these validations in BaseCommitActionExecutor to reuse pre-commit hoo... | 3.26 |
hudi_InstantStateHandler_getAllInstantStates_rdh | /**
* Read instant states from cache of file system.
*
* @return Instant states under the input instant state path.
*/
public List<InstantStateDTO> getAllInstantStates(String instantStatePath) {
if (requestCount.incrementAndGet() >= timelineServiceConfig.instantStateForceRefreshRequestNumber) {
// Do re... | 3.26 |
hudi_InstantStateHandler_refresh_rdh | /**
* Refresh the checkpoint messages cached. Will be called when coordinator start/commit/abort instant.
*
* @return Whether refreshing is successful.
*/
public boolean refresh(String instantStatePath) {
try {
cachedInstantStates.put(instantStatePath, scanInstantState(new Path(instantStatePath)));
... | 3.26 |
hudi_InstantStateHandler_scanInstantState_rdh | /**
* Scan the instant states from file system.
*/
public List<InstantStateDTO> scanInstantState(Path instantStatePath) {
try {
// Check instantStatePath exists before list status, see HUDI-5915
if (this.fileSystem.exists(instantStatePath)) {
return Arrays.stream(this.fileSystem.listS... | 3.26 |
hudi_HoodieWriteCommitPulsarCallback_createProducer_rdh | /**
* Method helps to create {@link Producer}.
*
* @param hoodieConfig
* Pulsar configs
* @return A {@link Producer}
*/
public Producer<String> createProducer(HoodieConfig hoodieConfig) throws PulsarClientException {
MessageRoutingMode routeMode = Enum.valueOf(MessageRoutingMode.class, PRODUCER_ROUTE_MODE.defau... | 3.26 |
hudi_HoodieWriteCommitPulsarCallback_validatePulsarConfig_rdh | /**
* Validate whether both pulsar's brokerServiceUrl and topic are configured.
* Exception will be thrown if anyone of them is not configured.
*/
private void validatePulsarConfig() {
ValidationUtils.checkArgument(!StringUtils.isNullOrEmpty(serviceUrl), String.format("Config %s can not be " + "null or empty", B... | 3.26 |
hudi_Source_fetchNext_rdh | /**
* Main API called by Hoodie Streamer to fetch records.
*
* @param lastCkptStr
* Last Checkpoint
* @param sourceLimit
* Source Limit
* @return */
public final InputBatch<T> fetchNext(Option<String> lastCkptStr, long sourceLimit) {
InputBatch<T> batch = fetchNewData(lastCkptStr, sourceLimit);
// I... | 3.26 |
hudi_BootstrapUtils_getAllLeafFoldersWithFiles_rdh | /**
* Returns leaf folders with files under a path.
*
* @param baseFileFormat
* Hoodie base file format
* @param fs
* File System
* @param context
* JHoodieEngineContext
* @return list of partition paths with files under them.
* @throws IOException
*/
public static List<Pair<String, List<HoodieFileStat... | 3.26 |
hudi_HoodieTimeline_getInflightInstant_rdh | /**
* Returns the inflight instant corresponding to the instant being passed. Takes care of changes in action names
* between inflight and completed instants (compaction <=> commit) and (logcompaction <==> deltacommit).
*
* @param instant
* Hoodie Instant
* @param metaClient
* Hoodie metaClient to fetch tabl... | 3.26 |
hudi_HoodieTimeline_makeInflightLogCompactionFileName_rdh | // Log compaction action
static String makeInflightLogCompactionFileName(String instantTime) {
return StringUtils.join(instantTime, HoodieTimeline.INFLIGHT_LOG_COMPACTION_EXTENSION);
} | 3.26 |
hudi_HoodieTimeline_minInstant_rdh | /**
* Returns the smaller of the given two instants.
*/
static String minInstant(String instant1, String instant2) {
return compareTimestamps(instant1, LESSER_THAN, instant2) ? instant1 : instant2;
} | 3.26 |
hudi_HoodieTimeline_getLogCompactionInflightInstant_rdh | // Returns Log compaction inflight instant
static HoodieInstant getLogCompactionInflightInstant(final String timestamp) {
return new HoodieInstant(State.INFLIGHT, LOG_COMPACTION_ACTION, timestamp);
} | 3.26 |
hudi_HoodieTimeline_isInClosedRange_rdh | /**
* Return true if specified timestamp is in range [startTs, endTs].
*/
static boolean isInClosedRange(String timestamp, String startTs, String endTs) {
return HoodieTimeline.compareTimestamps(timestamp,
GREATER_THAN_OR_EQUALS, startTs) && HoodieTimeline.compareTimestamps(timestamp, LESSER_THAN_OR_EQUALS,... | 3.26 |
hudi_HoodieTimeline_m2_rdh | /**
* Returns the greater of the given two instants.
*/
static String m2(String instant1, String instant2) {
return compareTimestamps(instant1, GREATER_THAN, instant2) ?
instant1 : instant2;
} | 3.26 |
hudi_HoodieTimeline_getLogCompactionRequestedInstant_rdh | // Returns Log compaction requested instant
static HoodieInstant getLogCompactionRequestedInstant(final String timestamp) {
return new HoodieInstant(State.REQUESTED, LOG_COMPACTION_ACTION, timestamp);
} | 3.26 |
hudi_HoodieTimeline_isInClosedOpenRange_rdh | /**
* Return true if specified timestamp is in range [startTs, endTs).
*/
static boolean isInClosedOpenRange(String timestamp, String startTs, String endTs) {
return HoodieTimeline.compareTimestamps(timestamp, GREATER_THAN_OR_EQUALS, startTs) && HoodieTimeline.compareTimestamps(timestamp, LESSER_THAN, endTs);
} | 3.26 |
hudi_AbstractStreamWriteFunction_instantToWrite_rdh | /**
* Prepares the instant time to write with for next checkpoint.
*
* @param hasData
* Whether the task has buffering data
* @return The instant time
*/
protected String instantToWrite(boolean hasData) {
String instant = m1();
... | 3.26 |
hudi_AbstractStreamWriteFunction_restoreWriteMetadata_rdh | // -------------------------------------------------------------------------
// Utilities
// -------------------------------------------------------------------------
private void restoreWriteMetadata() throws Exception {
boolean eventSent = false;
HoodieTimeline pendingTimeline = this.metaClient.getActiveTimel... | 3.26 |
hudi_AbstractStreamWriteFunction_m1_rdh | /**
* Returns the last pending instant time.
*/
protected String m1() {
return this.ckpMetadata.lastPendingInstant();
} | 3.26 |
hudi_AbstractStreamWriteFunction_isConfirming_rdh | // -------------------------------------------------------------------------
// Getter/Setter
// -------------------------------------------------------------------------
@VisibleForTesting
public boolean isConfirming() {
return this.confirming;
} | 3.26 |
hudi_HoodieMetrics_getDurationInMs_rdh | /**
* By default, the timer context returns duration with nano seconds. Convert it to millisecond.
*/public long getDurationInMs(long ctxDuration) {
return ctxDuration / 1000000;
} | 3.26 |
hudi_HoodieMetrics_reportMetrics_rdh | /**
* Given a commit action, metrics name and value this method reports custom metrics.
*/
public void reportMetrics(String commitAction, String metricName, long value) {
metrics.registerGauge(getMetricsName(commitAction, metricName), value);
} | 3.26 |
hudi_TableSchemaResolver_getTableAvroSchema_rdh | /**
* Fetches tables schema in Avro format as of the given instant
*
* @param instant
* as of which table's schema will be fetched
*/
public Schema getTableAvroSchema(HoodieInstant instant, boolean includeMetadataFields) throws Exception {
return getTableAvroSchemaInternal(includeMetadataFields, Option.of(... | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.