name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
hudi_BufferedRandomAccessFile_init_rdh | /**
*
* @param size
* - capacity of the buffer
*/
private void
init(int size) {
this.capacity = Math.max(DEFAULT_BUFFER_SIZE, size);
this.dataBuffer = ByteBuffer.wrap(new byte[this.capacity]);
} | 3.26 |
hudi_BufferedRandomAccessFile_endPosition_rdh | /**
*
* @return endPosition of the buffer. For the last file block, this may not be a valid position.
*/
private long endPosition() {
return this.startPosition + this.capacity;
} | 3.26 |
hudi_BufferedRandomAccessFile_read_rdh | /**
* Read specified number of bytes into given array starting at given offset.
*
* @param b
* - byte array
* @param off
* - start offset
* @param len
* - length of bytes to be read
* @return - number of bytes read.
* @throws IOException
*/
@Override
public int read(byte[] b, int off, int len) throws ... | 3.26 |
hudi_BufferedRandomAccessFile_flushBuffer_rdh | /**
* Flush any dirty bytes in the buffer to disk.
*
* @throws IOException
*/
private void flushBuffer() throws IOException {
if (this.f0)
{
alignDiskPositionToBufferStartIfNeeded();
int len = ((int) (this.currentPosition - this.startPosition));
super.write(this.dataBuffer.array(), 0, len);
this.d... | 3.26 |
hudi_BufferedRandomAccessFile_getFilePointer_rdh | /**
*
* @return current file position
*/
@Override
public long getFilePointer() {
return this.currentPosition;
} | 3.26 |
hudi_BufferedRandomAccessFile_length_rdh | /**
* Returns the length of the file, depending on whether buffer has more data (to be flushed).
*
* @return - length of the file (including data yet to be flushed to the file).
* @throws IOException
*/
@Override
public long length() throws IOException {
return Math.max(this.currentPosition, super.length());
... | 3.26 |
hudi_BufferedRandomAccessFile_seek_rdh | /**
* If the new seek position is in the buffer, adjust the currentPosition.
* If the new seek position is outside of the buffer, flush the contents to
* the file and reload the buffer corresponding to the position.
*
* We logically view the file as group blocks, where each block will perfectly
* fit into the buf... | 3.26 |
hudi_BufferedRandomAccessFile_fillBuffer_rdh | /**
* read ahead file contents to buffer.
*
* @return number of bytes filled
* @throws IOException
*/
private int fillBuffer() throws
IOException {
int cnt = 0;
int bytesToRead = this.capacity;
// blocking read, until buffer is filled or EOF reached
while (bytesToRead > 0) {
int n
= super.read(this.dat... | 3.26 |
hudi_DirectMarkerBasedDetectionStrategy_checkMarkerConflict_rdh | /**
* We need to do list operation here.
* In order to reduce the list pressure as much as possible, first we build path prefix in advance:
* '$base_path/.temp/instant_time/partition_path', and only list these specific partition_paths
* we need instead of list all the '$base_path/.temp/'
*
* @param basePath
* ... | 3.26 |
hudi_HadoopConfigurations_getParquetConf_rdh | /**
* Creates a merged hadoop configuration with given flink configuration and hadoop configuration.
*/
public static Configuration getParquetConf(Configuration options, Configuration hadoopConf) {
Configuration copy = new Configuration(hadoopConf);
Map<String, String> parquetOptions = FlinkOptions.getPropert... | 3.26 |
hudi_HadoopConfigurations_getHiveConf_rdh | /**
* Creates a Hive configuration with configured dir path or empty if no Hive conf dir is set.
*/
public static Configuration getHiveConf(Configuration conf) {
String explicitDir = conf.getString(FlinkOptions.HIVE_SYNC_CONF_DIR, System.getenv("HIVE_CONF_DIR"));
Configuration hadoopConf = new Configuration... | 3.26 |
hudi_ConsistentHashingUpdateStrategyUtils_constructPartitionToIdentifier_rdh | /**
* Construct identifier for the given partitions that are under concurrent resizing (i.e., clustering).
*
* @return map from partition to pair<instant, identifier>, where instant is the clustering instant.
*/
public static Map<String, Pair<String, ConsistentBucketIdentifier>> constructPartitionToIdentifier(Set<S... | 3.26 |
hudi_DFSPropertiesConfiguration_addToGlobalProps_rdh | // test only
public static TypedProperties addToGlobalProps(String key, String
value) {
GLOBAL_PROPS.put(key, value);
return GLOBAL_PROPS;
} | 3.26 |
hudi_DFSPropertiesConfiguration_loadGlobalProps_rdh | /**
* Load global props from hudi-defaults.conf which is under class loader or CONF_FILE_DIR_ENV_NAME.
*
* @return Typed Properties
*/
public static TypedProperties loadGlobalProps() {
DFSPropertiesConfiguration conf = new DFSPropertiesConfiguration();
// First try loading the external config file from clas... | 3.26 |
hudi_DFSPropertiesConfiguration_addPropsFromFile_rdh | /**
* Add properties from external configuration files.
*
* @param filePath
* File path for configuration file
*/
public void addPropsFromFile(Path filePath) {
if (visitedFilePaths.contains(filePath.toString())) {
throw new IllegalStateException(("Loop detected; file " + filePath) + " already referen... | 3.26 |
hudi_DFSPropertiesConfiguration_addPropsFromStream_rdh | /**
* Add properties from buffered reader.
*
* @param reader
* Buffered Reader
* @throws IOException
*/
public void addPropsFromStream(BufferedReader reader, Path cfgFilePath) throws IOException {
try {
reader.lines().forEach(line -> {
if
(!isValidLine(line)) {
... | 3.26 |
hudi_IncrSourceHelper_getStrictlyLowerTimestamp_rdh | /**
* Get a timestamp which is the next value in a descending sequence.
*
* @param timestamp
* Timestamp
*/
private static String getStrictlyLowerTimestamp(String timestamp) {
long ts = Long.parseLong(timestamp);
ValidationUtils.checkArgument(ts > 0, "Timestamp must be positive");
long lower = ts - ... | 3.26 |
hudi_IncrSourceHelper_getMissingCheckpointStrategy_rdh | /**
* Determine the policy to choose if a checkpoint is missing (detected by the absence of a beginInstant),
* during a run of a {@link HoodieIncrSource}.
*
* @param props
* the usual Hudi props object
* @return */
public static MissingCheckpointStrategy getMissingCheckpointStrategy(TypedProperties
props) {
b... | 3.26 |
hudi_IncrSourceHelper_filterAndGenerateCheckpointBasedOnSourceLimit_rdh | /**
* Adjust the source dataset to size based batch based on last checkpoint key.
*
* @param sourceData
* Source dataset
* @param sourceLimit
* Max number of bytes to be read from source
* @param queryInfo
* Query Info
* @return end instants along with filtered row... | 3.26 |
hudi_IncrSourceHelper_generateQueryInfo_rdh | /**
* Find begin and end instants to be set for the next fetch.
*
* @param jssc
* Java Spark Context
* @param srcBasePath
* Base path of Hudi source table
* @param numInstantsPerFetch
* Max Instants per fetch
* @param beginInstant
* Last Checkpoint String
... | 3.26 |
hudi_IncrSourceHelper_getHollowCommitHandleMode_rdh | /**
* When hollow commits are found while using incremental source with {@link HoodieDeltaStreamer},
* unlike batch incremental query, we do not use {@link HollowCommitHandling#FAIL} by default,
* instead we use {@link HollowCommitHandling#BLOCK} to block processing data from going beyond the
* hollow commits to av... | 3.26 |
hudi_BaseAvroPayload_isDeleteRecord_rdh | /**
*
* @param genericRecord
* instance of {@link GenericRecord} of interest.
* @returns {@code true} if record represents a delete record. {@code false} otherwise.
*/
protected boolean isDeleteRecord(GenericRecord genericRecord) {
final String isDeleteKey = HoodieRecord.HOODIE_IS_DELETED_FIELD;
// Modi... | 3.26 |
hudi_BaseAvroPayload_isDeleted_rdh | /**
* Defines whether this implementation of {@link HoodieRecordPayload} is deleted.
* We will not do deserialization in this method.
*/
public boolean isDeleted(Schema schema, Properties props) {
return isDeletedRecord;
} | 3.26 |
hudi_BaseAvroPayload_canProduceSentinel_rdh | /**
* Defines whether this implementation of {@link HoodieRecordPayload} could produce
* {@link HoodieRecord#SENTINEL}
*/
public boolean canProduceSentinel() {
return false;
} | 3.26 |
hudi_InternalSchemaMerger_mergeType_rdh | /**
* Create final read schema to read avro/parquet file.
* this is auxiliary function used by mergeSchema.
*/
private Type mergeType(Type type, int currentTypeId) {
switch (type.typeId()) {
case RECORD :
... | 3.26 |
hudi_InternalSchemaMerger_mergeSchema_rdh | /**
* Create final read schema to read avro/parquet file.
*
* @return read schema to read avro/parquet file.
*/
public InternalSchema mergeSchema() {
Types.RecordType record = ((Types.RecordType)
(mergeType(querySchema.getRecord(), 0)));
return new InternalSchema(record);
} | 3.26 |
hudi_SimpleBloomFilter_serializeToString_rdh | /**
* Serialize the bloom filter as a string.
*/
@Override
public String serializeToString() {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
try {
filter.write(dos);
byte[] bytes = baos.toByteArray();
dos.close();
... | 3.26 |
hudi_SimpleBloomFilter_m0_rdh | // @Override
public void
m0(DataOutput out) throws IOException {
out.write(getUTF8Bytes(filter.toString()));
} | 3.26 |
hudi_SimpleBloomFilter_readFields_rdh | // @Override
public void readFields(DataInput in) throws IOException {
filter = new InternalBloomFilter();
filter.readFields(in);
} | 3.26 |
hudi_FlatLists_of_rdh | /**
* Creates a memory-, CPU- and cache-efficient immutable list from an
* existing list. The list is always copied.
*
* @param t
* Array of members of list
* @param <T>
* Element type
* @return List containing the given members
*/
public static <T> List<T> of(List<T> t) {
return of_(t);
} | 3.26 |
hudi_HiveHoodieTableFileIndex_listFileSlices_rdh | /**
* Lists latest file-slices (base-file along w/ delta-log files) per partition.
*
* @return mapping from string partition paths to its base/log files
*/
public Map<String, List<FileSlice>> listFileSlices() {
return getAllInputFileSlices().entrySet().stream().collect(Collectors.toMap(e -> e.getKey().getPath(... | 3.26 |
hudi_FlinkClientUtil_m0_rdh | /**
* Returns the hadoop configuration with possible hadoop conf paths.
* E.G. the configurations under path $HADOOP_CONF_DIR and $HADOOP_HOME.
*/
public static Configuration m0() {
// create hadoop configuration with hadoop conf directory configured.
Configuration hadoopConf = nu... | 3.26 |
hudi_FlinkClientUtil_createMetaClient_rdh | /**
* Creates the meta client.
*/
public static HoodieTableMetaClient createMetaClient(String basePath) {
return
HoodieTableMetaClient.builder().setBasePath(basePath).setConf(FlinkClientUtil.m0()).build();
} | 3.26 |
hudi_FlinkClientUtil_getHadoopConfiguration_rdh | /**
* Returns a new Hadoop Configuration object using the path to the hadoop conf configured.
*
* @param hadoopConfDir
* Hadoop conf directory path.
* @return A Hadoop configuration instance.
*/
private static Configuration getHadoopConfiguration(String hadoopConfDir) {
if (new File(hadoopConfDir).exists())... | 3.26 |
hudi_HoodieHFileUtils_createHFileReader_rdh | /**
* Creates HFile reader for byte array with default `primaryReplicaReader` as true.
*
* @param fs
* File system.
* @param dummyPath
* Dummy path to file to read.
* @param content
* Content in byte array.
* @return HFile reader
* @throws IOException
* Upon error.
*/
public static Reader createHFil... | 3.26 |
hudi_SourceFormatAdapter_fetchNewDataInRowFormat_rdh | /**
* Fetch new data in row format. If the source provides data in different format, they are translated to Row format
*/
public InputBatch<Dataset<Row>> fetchNewDataInRowFormat(Option<String> lastCkptStr, long sourceLimit) {
switch (source.getSourceType()) {
case ROW :
// we do the sanitizing... | 3.26 |
hudi_SourceFormatAdapter_fetchNewDataInAvroFormat_rdh | /**
* Fetch new data in avro format. If the source provides data in different format, they are translated to Avro format
*/
public InputBatch<JavaRDD<GenericRecord>> fetchNewDataInAvroFormat(Option<String> lastCkptStr, long sourceLimit) {
switch (source.getSourceType()) {
case AVRO :
// don'... | 3.26 |
hudi_SourceFormatAdapter_getInvalidCharMask_rdh | /**
* Replacement mask for invalid characters encountered in avro names.
*
* @return sanitized value.
*/
private String getInvalidCharMask() {
return invalidCharMask;
} | 3.26 |
hudi_SourceFormatAdapter_processErrorEvents_rdh | /**
* transform datasets with error events when error table is enabled
*
* @param eventsRow
* @return */
public Option<Dataset<Row>> processErrorEvents(Option<Dataset<Row>> eventsRow, ErrorEvent.ErrorReason errorReason) {
return eventsRow.map(dataset -> { if (errorTableWriter.isPresent() && Arrays.stream(datas... | 3.26 |
hudi_SourceFormatAdapter_transformJsonToGenericRdd_rdh | /**
* transform input rdd of json string to generic records with support for adding error events to error table
*
* @param inputBatch
* @return */
private JavaRDD<GenericRecord> transformJsonToGenericRdd(InputBatch<JavaRDD<String>> inputBatch) {
MercifulJsonConverter.clearCache(inputBatch.getSchemaProvider().g... | 3.26 |
hudi_SourceFormatAdapter_isFieldNameSanitizingEnabled_rdh | /**
* Config that automatically sanitizes the field names as per avro naming rules.
*
* @return enabled status.
*/
private boolean isFieldNameSanitizingEnabled() {
return shouldSanitize;
} | 3.26 |
hudi_ArchiveTask_withProps_rdh | /**
* JavaSparkContext to run spark job.
*/
private JavaSparkContext jsc;public Builder withProps(TypedProperties props) {
this.f0 = props;
return this;
} | 3.26 |
hudi_ArchiveTask_newBuilder_rdh | /**
* Utility to create builder for {@link ArchiveTask}.
*
* @return Builder for {@link ArchiveTask}.
*/
public static Builder newBuilder() {
return new Builder();
} | 3.26 |
hudi_Pair_hashCode_rdh | /**
* <p>
* Returns a suitable hash code. The hash code follows the definition in {@code Map.Entry}.
* </p>
*
* @return the hash code
*/
@Override
public int hashCode() {
// see Map.Entry API specification
return (getKey() == null ? 0 :
getKey().hashCode()) ^ (getValue() == null
? 0 : getValue().... | 3.26 |
hudi_Pair_toString_rdh | /**
* <p>
* Formats the receiver using the given format.
* </p>
*
* <p>
* This uses {@link java.util.Formattable} to perform the formatting. Two variables may be used to embed the left and
* right elements. Use {@code %1$s} for the left element (key) and {@code %2$s} for the right element (value). The
* default... | 3.26 |
hudi_Pair_of_rdh | /**
* <p>
* Obtains an immutable pair of from two objects inferring the generic types.
* </p>
*
* <p>
* This factory allows the pair to be created using inference to obtain the generic types.
* </p>
*
* @param <L>
* the left element type
* @param <R>
* the right element type
* @param left
* the left... | 3.26 |
hudi_Pair_compareTo_rdh | // -----------------------------------------------------------------------
/**
* <p>
* Compares the pair based on the left element followed by the right element. The types must be {@code Comparable}.
* </p>
*
* @param other
* the other pair, not null
* @return negative if this is less, zero if equal, positive ... | 3.26 |
hudi_Pair_equals_rdh | /**
* <p>
* Compares this pair to another based on the two elements.
* </p>
*
* @param obj
* the object to compare to, null returns false
* @return true if the elements of the pair are equal
*/
@Override public boolean equals(final Object obj) {
if (obj == this) {return true;
}
if (obj instanceof ... | 3.26 |
hudi_Key_incrementWeight_rdh | /**
* Increments the weight of <i>this</i> key by one.
*/
public void incrementWeight() {
this.weight++;
} | 3.26 |
hudi_Key_readFields_rdh | /**
* Deserialize the fields of this object from <code>in</code>.
*
* <p>For efficiency, implementations should attempt to re-use storage in the
* existing object where possible.</p>
*
* @param in
* <code>DataInput</code> to deseriablize this object from.
* @throws IOException
*/public void readFields(DataIn... | 3.26 |
hudi_Key_getBytes_rdh | /**
*
* @return byte[] The value of <i>this</i> key.
*/
public byte[] getBytes() {
return this.bytes;
} | 3.26 |
hudi_Key_m0_rdh | /**
*
* @return Returns the weight associated to <i>this</i> key.
*/
public double m0() {
return weight;} | 3.26 |
hudi_Key_write_rdh | /**
* Serialize the fields of this object to <code>out</code>.
*
* @param out
* <code>DataOuput</code> to serialize this object into.
* @throws IOException
*/
public void write(DataOutput out) throws IOException {out.writeInt(bytes.length);
out.write(bytes);
out.writeDouble(weight);
} | 3.26 |
hudi_Key_set_rdh | /**
*
* @param value
* @param weight
*/
public void set(byte[] value, double weight) {
if (value == null) {
throw
new IllegalArgumentException("value can not be null");
}
this.bytes = value;
this.weight = weight;
} | 3.26 |
hudi_Key_compareTo_rdh | // Comparable
@Override
public int compareTo(Key other) {
int result
= this.bytes.length - other.getBytes().length;
for (int i = 0; (result == 0) && (i < bytes.length); i++) {
result = this.bytes[i] - other.bytes[i];
}
if (result == 0) {
result = ((int) (this.weight
- other.... | 3.26 |
hudi_InstantRange_builder_rdh | /**
* Returns the builder.
*/
public static Builder builder() {
return new Builder();
} | 3.26 |
hudi_SecondaryIndexManager_refresh_rdh | /**
* Refresh the specific secondary index
*
* @param metaClient
* Hoodie table meta client
* @param indexName
* The target secondary index name
*/
public void
refresh(HoodieTableMetaClient metaClient, String indexName) {
// TODO
} | 3.26 |
hudi_SecondaryIndexManager_show_rdh | /**
* Show secondary indexes from hoodie table
*
* @param metaClient
* Hoodie table meta client
* @return Indexes in this table
*/
public Option<List<HoodieSecondaryIndex>>
show(HoodieTableMetaClient metaClient) {
return SecondaryIndexUtils.getSecondaryIndexes(metaClient);
} | 3.26 |
hudi_SecondaryIndexManager_indexExists_rdh | /**
* Check if the specific secondary index exists. When drop a secondary index,
* only check index name, but for adding a secondary index, we should also
* check the index type and columns when index name is different.
*
* @param secondaryIndexes
* Current secondary indexes in this ... | 3.26 |
hudi_SecondaryIndexManager_create_rdh | /**
* Create a secondary index for hoodie table, two steps will be performed:
* 1. Add secondary index metadata to hoodie.properties
* 2. Trigger build secondary index
*
* @param metaClient
* Hoodie table meta client
* @param indexName
* The unique secondary index name
... | 3.26 |
hudi_LocalRegistry_m0_rdh | /**
* Get all Counter type metrics.
*/
@Override
public Map<String,
Long> m0(boolean prefixWithRegistryName) {
HashMap<String, Long> countersMap = new HashMap<>();
counters.forEach((k, v) -> {
String key = (prefixWithRegistryName) ? (name + ".") + k : k;
countersMap.put(key, v.getValue());
... | 3.26 |
hudi_ClusteringCommitSink_validateWriteResult_rdh | /**
* Validate actions taken by clustering. In the first implementation, we validate at least one new file is written.
* But we can extend this to add more validation. E.g. number of records read = number of records written etc.
* We can also make these validations in BaseCommitActionExecutor to reuse pre-commit hoo... | 3.26 |
hudi_ClusteringCommitSink_m0_rdh | /**
* Condition to commit: the commit buffer has equal size with the clustering plan operations
* and all the clustering commit event {@link ClusteringCommitEvent} has the same clustering instant time.
*
* @param instant
* Clustering commit instant time
* @param events
* Commi... | 3.26 |
hudi_HoodieTimeGeneratorConfig_defaultConfig_rdh | /**
* Returns the default configuration.
*/
public static HoodieTimeGeneratorConfig defaultConfig(String tablePath) {
return newBuilder().withPath(tablePath).build();
} | 3.26 |
hudi_BaseHoodieDateTimeParser_getOutputDateFormat_rdh | /**
* Returns the output date format in which the partition paths will be created for the hudi dataset.
*/
public String getOutputDateFormat() {
return getStringWithAltKeys(config, TIMESTAMP_OUTPUT_DATE_FORMAT);
} | 3.26 |
hudi_BaseHoodieDateTimeParser_getConfigInputDateFormatDelimiter_rdh | /**
* Returns the input date format delimiter, comma by default.
*/public String getConfigInputDateFormatDelimiter() {
return this.configInputDateFormatDelimiter;
} | 3.26 |
hudi_MarkerDirState_parseMarkerFileIndex_rdh | /**
* Parses the marker file index from the marker file path.
* <p>
* E.g., if the marker file path is /tmp/table/.hoodie/.temp/000/MARKERS3, the index returned is 3.
*
* @param markerFilePathStr
* full path of marker file
* @return the marker file index
*/
private int parseMarkerFileIndex(String markerFilePa... | 3.26 |
hudi_MarkerDirState_m0_rdh | /**
* Adds a {@code MarkerCreationCompletableFuture} instance from a marker
* creation request to the queue.
*
* @param future
* {@code MarkerCreationCompletableFuture} instance.
*/
public void m0(MarkerCreationFuture future) {
synchronized(markerCreationFutures) {
markerCreationFutures.add(future);... | 3.26 |
hudi_MarkerDirState_flushMarkersToFile_rdh | /**
* Flushes markers to the underlying file.
*
* @param markerFileIndex
* file index to use.
*/
private void flushMarkersToFile(int markerFileIndex) {
LOG.debug(((("Write to " + markerDirPath) + "/") + MARKERS_FILENAME_PREFIX) + markerFileIndex);
HoodieTimer timer = HoodieTimer.start();
Path markersFilePath = ... | 3.26 |
hudi_MarkerDirState_getAllMarkers_rdh | /**
*
* @return all markers in the marker directory.
*/
public Set<String> getAllMarkers() {
return allMarkers;
} | 3.26 |
hudi_MarkerDirState_getPendingMarkerCreationRequests_rdh | /**
*
* @param shouldClear
* Should clear the internal request list or not.
* @return futures of pending marker creation requests.
*/
public List<MarkerCreationFuture> getPendingMarkerCreationRequests(boolean shouldClear) {
List<MarkerCreationFuture> pendingFutures;
synchronized(markerCreationFutures) {
... | 3.26 |
hudi_MarkerDirState_writeMarkerTypeToFile_rdh | /**
* Writes marker type, "TIMELINE_SERVER_BASED", to file.
*/
private void writeMarkerTypeToFile() {
Path dirPath = new Path(markerDirPath);
try {
if ((!fileSystem.exists(dirPath)) || (!MarkerUtils.doesMarkerTypeFileExist(fileSystem, markerDirPath))) {
// There is no existing marker directory, create a new directory... | 3.26 |
hudi_MarkerDirState_markFileAsAvailable_rdh | /**
* Marks the file as available to use again.
*
* @param fileIndex
* file index
*/
public void markFileAsAvailable(int
fileIndex) {
synchronized(markerCreationProcessingLock) {
threadUseStatus.set(fileIndex, false);
}
} | 3.26 |
hudi_MarkerDirState_deleteAllMarkers_rdh | /**
* Deletes markers in the directory.
*
* @return {@code true} if successful; {@code false} otherwise.
*/public boolean deleteAllMarkers() {
boolean result = FSUtils.deleteDir(hoodieEngineContext, fileSystem, new Path(markerDirPath), parallelism);
allMarkers.clear();
fileMarkersMap.clear();
return result;
} | 3.26 |
hudi_MarkerDirState_addMarkerToMap_rdh | /**
* Adds a new marker to the in-memory map.
*
* @param fileIndex
* Marker file index number.
* @param markerName
* Marker name.
*/
private void addMarkerToMap(int fileIndex, String markerName) {
allMarkers.add(markerName);
StringBuilder stringBuilder = fileMarkersMap.computeIfAbsent(fileIndex, k ->
new S... | 3.26 |
hudi_MarkerDirState_exists_rdh | /**
*
* @return {@code true} if the marker directory exists in the system.
*/
public boolean exists() {
try {
return fileSystem.exists(new Path(markerDirPath));
} catch (IOException ioe) {
throw
new HoodieIOException(ioe.getMessage(), ioe);
}
} | 3.26 |
hudi_MarkerDirState_syncMarkersFromFileSystem_rdh | /**
* Syncs all markers maintained in the underlying files under the marker directory in the file system.
*/
private void syncMarkersFromFileSystem() {
Map<String, Set<String>> fileMarkersSetMap = MarkerUtils.readTimelineServerBasedMarkersFromFileSystem(markerDirPath, fileSystem, hoodieEngineContext, parallelism);
fo... | 3.26 |
hudi_MarkerDirState_fetchPendingMarkerCreationRequests_rdh | /**
*
* @return futures of pending marker creation requests and removes them from the list.
*/
public List<MarkerCreationFuture> fetchPendingMarkerCreationRequests() {
return getPendingMarkerCreationRequests(true);
} | 3.26 |
hudi_MarkerDirState_processMarkerCreationRequests_rdh | /**
* Processes pending marker creation requests.
*
* @param pendingMarkerCreationFutures
* futures of pending marker creation requests
* @param fileIndex
* file index to use to write markers
*/
public void processMarkerCreationRequests(final List<MarkerCreationFuture> pendingMarkerCreationFutures, int file... | 3.26 |
hudi_BaseWriteHelper_deduplicateRecords_rdh | /**
* Deduplicate Hoodie records, using the given deduplication function.
*
* @param records
* hoodieRecords to deduplicate
* @param parallelism
* parallelism or partitions to be used while reducing/deduplicating
* @return Collection of HoodieRecord already be deduplicated
*/
public I deduplicateRecords(I r... | 3.26 |
hudi_GenericRecordFullPayloadSizeEstimator_getNonNull_rdh | /**
* Get the nonNull Schema of a given UNION Schema.
*
* @param schema
* @return */
protected Schema getNonNull(Schema schema) {
List<Schema> types = schema.getTypes();
return types.get(0).getType().equals(Type.NULL) ? types.get(1) : types.get(0);
} | 3.26 |
hudi_GenericRecordFullPayloadSizeEstimator_typeEstimate_rdh | /**
* Estimate the size of a given schema according to their type.
*
* @param schema
* schema to estimate.
* @return Size of the given schema.
*/
private long typeEstimate(Schema schema) {
Schema
localSchema = schema;
if (isOption(schema)) {
l... | 3.26 |
hudi_GenericRecordFullPayloadSizeEstimator_estimate_rdh | /**
* This method estimates the size of the payload if all entries of this payload were populated with one value.
* For eg. A primitive data type such as String will be populated with {@link UUID} so the length if 36 bytes
* whereas a complex data type such as an Array of type Int, will be populated with exactly 1 I... | 3.26 |
hudi_FiveToSixUpgradeHandler_deleteCompactionRequestedFileFromAuxiliaryFolder_rdh | /**
* See HUDI-6040.
*/
private void deleteCompactionRequestedFileFromAuxiliaryFolder(HoodieTable table) {
HoodieTableMetaClient metaClient = table.getMetaClient();
HoodieTimeline compactionTimeline = metaClient.getActiveTimeline().filterPendingCompactionTimeline().filter(instant -> instant.getState() == Ho... | 3.26 |
hudi_HoodieLogBlock_getContentBytes_rdh | // Return the bytes representation of the data belonging to a LogBlock
public byte[] getContentBytes() throws IOException {
throw new HoodieException("No implementation was provided");
} | 3.26 |
hudi_HoodieLogBlock_getLogMetadata_rdh | /**
* Convert bytes to LogMetadata, follow the same order as {@link HoodieLogBlock#getLogMetadataBytes}.
*/public static Map<HeaderMetadataType, String> getLogMetadata(DataInputStream dis) throws IOException {
Map<HeaderMetadataType, String> metadata = new HashMap<>();
// 1. Read the metadata written out
int metada... | 3.26 |
hudi_HoodieLogBlock_tryReadContent_rdh | /**
* Read or Skip block content of a log block in the log file. Depends on lazy reading enabled in
* {@link HoodieMergedLogRecordScanner}
*/
public static Option<byte[]> tryReadContent(FSDataInputStream inputStream, Integer contentLength, boolean readLazily) throws IOException {
if (readLazily) {
// Seek to the e... | 3.26 |
hudi_HoodieLogBlock_deflate_rdh | /**
* After the content bytes is converted into the required DataStructure by a logBlock, deflate the content to release
* byte [] and relieve memory pressure when GC kicks in. NOTE: This still leaves the heap fragmented
*/
protected void deflate() {
content =
Option.empty();
} | 3.26 |
hudi_HoodieLogBlock_getLogMetadataBytes_rdh | /**
* Convert log metadata to bytes 1. Write size of metadata 2. Write enum ordinal 3. Write actual bytes
*/
public static byte[] getLogMetadataBytes(Map<HeaderMetadataType, String> metadata) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();DataOutputStream output = new DataOutputStream(b... | 3.26 |
hudi_HoodieLogBlock_isCompactedLogBlock_rdh | /**
* Compacted blocks are created using log compaction which basically merges the consecutive blocks together and create
* huge block with all the changes.
*/
public boolean isCompactedLogBlock() {
return logBlockHeader.containsKey(HeaderMetadataType.COMPACTED_BLOCK_TIMES);
}
/**
*
* @return A {@link Roaring... | 3.26 |
hudi_HoodieLogBlock_inflate_rdh | /**
* When lazyReading of blocks is turned on, inflate the content of a log block from disk.
*/
protected void inflate() throws HoodieIOException {
checkState(!content.isPresent(), "Block has already been inflated");
checkState(inputStream != null, "Block should have input-stream provided");
try {
content = Option.of... | 3.26 |
hudi_HoodieInputFormatUtils_getTableMetaClientForBasePathUnchecked_rdh | /**
* Extract HoodieTableMetaClient from a partition path (not base path)
*/
public static HoodieTableMetaClient getTableMetaClientForBasePathUnchecked(Configuration conf, Path partitionPath) throws IOException {
Path baseDir = partitionPath;
FileSystem v23 = partitionPath.getFileSystem(conf);
if (Hoodi... | 3.26 |
hudi_HoodieInputFormatUtils_getAffectedPartitions_rdh | /**
* Extract partitions touched by the commitsToCheck.
*
* @param commitsToCheck
* @param tableMetaClient
* @param timeline
* @param inputPaths
* @return * @throws IOException
*/
public static Option<String> getAffectedPartitions(List<HoodieInstant> commitsToCheck, HoodieTableMetaClient tableMetaClient, Hoodi... | 3.26 |
hudi_HoodieInputFormatUtils_refreshFileStatus_rdh | /**
* Checks the file status for a race condition which can set the file size to 0. 1. HiveInputFormat does
* super.listStatus() and gets back a FileStatus[] 2. Then it creates the HoodieTableMetaClient for the paths listed.
* 3. Generation of splits looks at FileStatus size to create splits, which skips this file
... | 3.26 |
hudi_HoodieInputFormatUtils_getCommitsForIncrementalQuery_rdh | /**
* Get commits for incremental query from Hive map reduce configuration.
*
* @param job
* @param tableName
* @param timeline
* @return */
public static Option<List<HoodieInstant>> getCommitsForIncrementalQuery(Job job, String tableName, HoodieTimeline timeline) {
return Option.of(getHoodieTimelineForIncrement... | 3.26 |
hudi_HoodieInputFormatUtils_filterInstantsTimeline_rdh | /**
* Filter any specific instants that we do not want to process.
* example timeline:
* <p>
* t0 -> create bucket1.parquet
* t1 -> create and append updates bucket1.log
* t2 -> request compaction
* t3 -> create bucket2.parquet
* <p>
* if compaction at t2 takes a long time, incremental readers on RO tables can... | 3.26 |
hudi_HoodieInputFormatUtils_filterIncrementalFileStatus_rdh | /**
* Filter a list of FileStatus based on commitsToCheck for incremental view.
*
* @param job
* @param tableMetaClient
* @param timeline
* @param fileStatuses
* @param commitsToCheck
* @return */
public static List<FileStatus> filterIncrementalFileStatus(Job job, HoodieTableMetaClient tableMetaClient, Hoodie... | 3.26 |
hudi_HoodieInputFormatUtils_getTableMetaClientByPartitionPath_rdh | /**
* Extract HoodieTableMetaClient by partition path.
*
* @param conf
* The hadoop conf
* @param partitions
* The partitions
* @return partition path to table meta client mapping
*/
public static Map<Path, HoodieTableMetaClient> getTableMetaClientByPartitionPath(Configuration conf,
Set<Path> partitions) {
... | 3.26 |
hudi_HoodieInputFormatUtils_groupFileStatusForSnapshotPaths_rdh | /**
* Takes in a list of filesStatus and a list of table metadata. Groups the files status list
* based on given table metadata.
*
* @param fileStatuses
* @param fileExtension
* @param metaClientList
* @return * @throws IOException
*/public static Map<HoodieTableMetaClient, List<FileStatus>> groupFileStatusFor... | 3.26 |
hudi_HoodieInputFormatUtils_listAffectedFilesForCommits_rdh | /**
* Iterate through a list of commit metadata in natural order, and extract the file status of
* all affected files from the commits metadata grouping by file full path. If the files has
* been touched multiple times in the given commits, the return value will keep the one
* from the latest commit.
*
* @param b... | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.