_id stringlengths 2 7 | title stringlengths 3 140 | partition stringclasses 3
values | text stringlengths 73 34.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q172100 | MemoryManager.registerScaleCallBack | test | public void registerScaleCallBack(String callBackName, Runnable callBack) {
Preconditions.checkNotNull(callBackName, "callBackName");
Preconditions.checkNotNull(callBack, "callBack");
if (callBacks.containsKey(callBackName)) {
throw new IllegalArgumentException("The callBackName " + callBackName +
" is duplicated and has been registered already.");
} else {
callBacks.put(callBackName, callBack);
}
} | java | {
"resource": ""
} |
q172101 | ParquetFileWriter.start | test | public void start() throws IOException {
state = state.start();
LOG.debug("{}: start", out.getPos());
out.write(MAGIC);
} | java | {
"resource": ""
} |
q172102 | ParquetFileWriter.startBlock | test | public void startBlock(long recordCount) throws IOException {
state = state.startBlock();
LOG.debug("{}: start block", out.getPos());
// out.write(MAGIC); // TODO: add a magic delimiter
alignment.alignForRowGroup(out);
currentBlock = new BlockMetaData();
currentRecordCount = recordCount;
currentColumnIndexes = new ArrayList<>();
currentOffsetIndexes = new ArrayList<>();
} | java | {
"resource": ""
} |
q172103 | ParquetFileWriter.startColumn | test | public void startColumn(ColumnDescriptor descriptor,
long valueCount,
CompressionCodecName compressionCodecName) throws IOException {
state = state.startColumn();
encodingStatsBuilder.clear();
currentEncodings = new HashSet<Encoding>();
currentChunkPath = ColumnPath.get(descriptor.getPath());
currentChunkType = descriptor.getPrimitiveType();
currentChunkCodec = compressionCodecName;
currentChunkValueCount = valueCount;
currentChunkFirstDataPage = out.getPos();
compressedLength = 0;
uncompressedLength = 0;
// The statistics will be copied from the first one added at writeDataPage(s) so we have the correct typed one
currentStatistics = null;
columnIndexBuilder = ColumnIndexBuilder.getBuilder(currentChunkType, columnIndexTruncateLength);
offsetIndexBuilder = OffsetIndexBuilder.getBuilder();
firstPageOffset = -1;
} | java | {
"resource": ""
} |
q172104 | ParquetFileWriter.writeDictionaryPage | test | public void writeDictionaryPage(DictionaryPage dictionaryPage) throws IOException {
state = state.write();
LOG.debug("{}: write dictionary page: {} values", out.getPos(), dictionaryPage.getDictionarySize());
currentChunkDictionaryPageOffset = out.getPos();
int uncompressedSize = dictionaryPage.getUncompressedSize();
int compressedPageSize = (int)dictionaryPage.getBytes().size(); // TODO: fix casts
metadataConverter.writeDictionaryPageHeader(
uncompressedSize,
compressedPageSize,
dictionaryPage.getDictionarySize(),
dictionaryPage.getEncoding(),
out);
long headerSize = out.getPos() - currentChunkDictionaryPageOffset;
this.uncompressedLength += uncompressedSize + headerSize;
this.compressedLength += compressedPageSize + headerSize;
LOG.debug("{}: write dictionary page content {}", out.getPos(), compressedPageSize);
dictionaryPage.getBytes().writeAllTo(out);
encodingStatsBuilder.addDictEncoding(dictionaryPage.getEncoding());
currentEncodings.add(dictionaryPage.getEncoding());
} | java | {
"resource": ""
} |
q172105 | ParquetFileWriter.writeDataPage | test | public void writeDataPage(
int valueCount, int uncompressedPageSize,
BytesInput bytes,
Statistics statistics,
long rowCount,
Encoding rlEncoding,
Encoding dlEncoding,
Encoding valuesEncoding) throws IOException {
long beforeHeader = out.getPos();
innerWriteDataPage(valueCount, uncompressedPageSize, bytes, statistics, rlEncoding, dlEncoding, valuesEncoding);
offsetIndexBuilder.add((int) (out.getPos() - beforeHeader), rowCount);
} | java | {
"resource": ""
} |
q172106 | ParquetFileWriter.writeColumnChunk | test | void writeColumnChunk(ColumnDescriptor descriptor,
long valueCount,
CompressionCodecName compressionCodecName,
DictionaryPage dictionaryPage,
BytesInput bytes,
long uncompressedTotalPageSize,
long compressedTotalPageSize,
Statistics<?> totalStats,
ColumnIndexBuilder columnIndexBuilder,
OffsetIndexBuilder offsetIndexBuilder,
Set<Encoding> rlEncodings,
Set<Encoding> dlEncodings,
List<Encoding> dataEncodings) throws IOException {
startColumn(descriptor, valueCount, compressionCodecName);
state = state.write();
if (dictionaryPage != null) {
writeDictionaryPage(dictionaryPage);
}
LOG.debug("{}: write data pages", out.getPos());
long headersSize = bytes.size() - compressedTotalPageSize;
this.uncompressedLength += uncompressedTotalPageSize + headersSize;
this.compressedLength += compressedTotalPageSize + headersSize;
LOG.debug("{}: write data pages content", out.getPos());
firstPageOffset = out.getPos();
bytes.writeAllTo(out);
encodingStatsBuilder.addDataEncodings(dataEncodings);
if (rlEncodings.isEmpty()) {
encodingStatsBuilder.withV2Pages();
}
currentEncodings.addAll(rlEncodings);
currentEncodings.addAll(dlEncodings);
currentEncodings.addAll(dataEncodings);
currentStatistics = totalStats;
this.columnIndexBuilder = columnIndexBuilder;
this.offsetIndexBuilder = offsetIndexBuilder;
endColumn();
} | java | {
"resource": ""
} |
q172107 | ParquetFileWriter.endBlock | test | public void endBlock() throws IOException {
state = state.endBlock();
LOG.debug("{}: end block", out.getPos());
currentBlock.setRowCount(currentRecordCount);
blocks.add(currentBlock);
columnIndexes.add(currentColumnIndexes);
offsetIndexes.add(currentOffsetIndexes);
currentColumnIndexes = null;
currentOffsetIndexes = null;
currentBlock = null;
} | java | {
"resource": ""
} |
q172108 | ParquetFileWriter.copy | test | private static void copy(SeekableInputStream from, PositionOutputStream to,
long start, long length) throws IOException{
LOG.debug("Copying {} bytes at {} to {}" ,length , start , to.getPos());
from.seek(start);
long bytesCopied = 0;
byte[] buffer = COPY_BUFFER.get();
while (bytesCopied < length) {
long bytesLeft = length - bytesCopied;
int bytesRead = from.read(buffer, 0,
(buffer.length < bytesLeft ? buffer.length : (int) bytesLeft));
if (bytesRead < 0) {
throw new IllegalArgumentException(
"Unexpected end of input file at " + start + bytesCopied);
}
to.write(buffer, 0, bytesRead);
bytesCopied += bytesRead;
}
} | java | {
"resource": ""
} |
q172109 | ParquetFileWriter.end | test | public void end(Map<String, String> extraMetaData) throws IOException {
state = state.end();
serializeColumnIndexes(columnIndexes, blocks, out);
serializeOffsetIndexes(offsetIndexes, blocks, out);
LOG.debug("{}: end", out.getPos());
this.footer = new ParquetMetadata(new FileMetaData(schema, extraMetaData, Version.FULL_VERSION), blocks);
serializeFooter(footer, out);
out.close();
} | java | {
"resource": ""
} |
q172110 | ParquetFileWriter.mergeMetadataFiles | test | @Deprecated
public static ParquetMetadata mergeMetadataFiles(List<Path> files, Configuration conf) throws IOException {
Preconditions.checkArgument(!files.isEmpty(), "Cannot merge an empty list of metadata");
GlobalMetaData globalMetaData = null;
List<BlockMetaData> blocks = new ArrayList<BlockMetaData>();
for (Path p : files) {
ParquetMetadata pmd = ParquetFileReader.readFooter(conf, p, ParquetMetadataConverter.NO_FILTER);
FileMetaData fmd = pmd.getFileMetaData();
globalMetaData = mergeInto(fmd, globalMetaData, true);
blocks.addAll(pmd.getBlocks());
}
// collapse GlobalMetaData into a single FileMetaData, which will throw if they are not compatible
return new ParquetMetadata(globalMetaData.merge(), blocks);
} | java | {
"resource": ""
} |
q172111 | ParquetFileWriter.writeMergedMetadataFile | test | @Deprecated
public static void writeMergedMetadataFile(List<Path> files, Path outputPath, Configuration conf) throws IOException {
ParquetMetadata merged = mergeMetadataFiles(files, conf);
writeMetadataFile(outputPath, merged, outputPath.getFileSystem(conf));
} | java | {
"resource": ""
} |
q172112 | ParquetFileWriter.writeMetadataFile | test | @Deprecated
public static void writeMetadataFile(Configuration configuration, Path outputPath, List<Footer> footers) throws IOException {
writeMetadataFile(configuration, outputPath, footers, JobSummaryLevel.ALL);
} | java | {
"resource": ""
} |
q172113 | ParquetFileWriter.mergeInto | test | static GlobalMetaData mergeInto(
FileMetaData toMerge,
GlobalMetaData mergedMetadata) {
return mergeInto(toMerge, mergedMetadata, true);
} | java | {
"resource": ""
} |
q172114 | ColumnReaderBase.readValue | test | public void readValue() {
try {
if (!valueRead) {
binding.read();
valueRead = true;
}
} catch (RuntimeException e) {
if (CorruptDeltaByteArrays.requiresSequentialReads(writerVersion, currentEncoding) &&
e instanceof ArrayIndexOutOfBoundsException) {
// this is probably PARQUET-246, which may happen if reading data with
// MR because this can't be detected without reading all footers
throw new ParquetDecodingException("Read failure possibly due to " +
"PARQUET-246: try setting parquet.split.files to false",
new ParquetDecodingException(
format("Can't read value in column %s at value %d out of %d, " +
"%d out of %d in currentPage. repetition level: " +
"%d, definition level: %d",
path, readValues, totalValueCount,
readValues - (endOfPageValueCount - pageValueCount),
pageValueCount, repetitionLevel, definitionLevel),
e));
}
throw new ParquetDecodingException(
format("Can't read value in column %s at value %d out of %d, " +
"%d out of %d in currentPage. repetition level: " +
"%d, definition level: %d",
path, readValues, totalValueCount,
readValues - (endOfPageValueCount - pageValueCount),
pageValueCount, repetitionLevel, definitionLevel),
e);
}
} | java | {
"resource": ""
} |
q172115 | Schemas.nullOk | test | public static boolean nullOk(Schema schema) {
if (Schema.Type.NULL == schema.getType()) {
return true;
} else if (Schema.Type.UNION == schema.getType()) {
for (Schema possible : schema.getTypes()) {
if (nullOk(possible)) {
return true;
}
}
}
return false;
} | java | {
"resource": ""
} |
q172116 | Schemas.coalesce | test | @SafeVarargs
private static <E> E coalesce(E... objects) {
for (E object : objects) {
if (object != null) {
return object;
}
}
return null;
} | java | {
"resource": ""
} |
q172117 | RecordBuilder.makeValue | test | private static Object makeValue(String string, Schema schema) {
if (string == null) {
return null;
}
try {
switch (schema.getType()) {
case BOOLEAN:
return Boolean.valueOf(string);
case STRING:
return string;
case FLOAT:
return Float.valueOf(string);
case DOUBLE:
return Double.valueOf(string);
case INT:
return Integer.valueOf(string);
case LONG:
return Long.valueOf(string);
case ENUM:
// TODO: translate to enum class
if (schema.hasEnumSymbol(string)) {
return string;
} else {
try {
return schema.getEnumSymbols().get(Integer.parseInt(string));
} catch (IndexOutOfBoundsException ex) {
return null;
}
}
case UNION:
Object value = null;
for (Schema possible : schema.getTypes()) {
value = makeValue(string, possible);
if (value != null) {
return value;
}
}
return null;
case NULL:
return null;
default:
// FIXED, BYTES, MAP, ARRAY, RECORD are not supported
throw new RecordException(
"Unsupported field type:" + schema.getType());
}
} catch (NumberFormatException e) {
// empty string is considered null for numeric types
if (string.isEmpty()) {
return null;
} else {
throw e;
}
}
} | java | {
"resource": ""
} |
q172118 | ThriftMetaData.fromExtraMetaData | test | public static ThriftMetaData fromExtraMetaData(
Map<String, String> extraMetaData) {
final String thriftClassName = extraMetaData.get(THRIFT_CLASS);
final String thriftDescriptorString = extraMetaData.get(THRIFT_DESCRIPTOR);
if (thriftClassName == null || thriftDescriptorString == null) {
return null;
}
final StructType descriptor = parseDescriptor(thriftDescriptorString);
return new ThriftMetaData(thriftClassName, descriptor);
} | java | {
"resource": ""
} |
q172119 | ThriftMetaData.fromThriftClass | test | @SuppressWarnings("unchecked")
public static ThriftMetaData fromThriftClass(Class<?> thriftClass) {
if (thriftClass != null && TBase.class.isAssignableFrom(thriftClass)) {
Class<? extends TBase<?, ?>> tClass = (Class<? extends TBase<?, ?>>) thriftClass;
StructType descriptor = new ThriftSchemaConverter().toStructType(tClass);
return new ThriftMetaData(thriftClass.getName(), descriptor);
}
return null;
} | java | {
"resource": ""
} |
q172120 | ThriftMetaData.toExtraMetaData | test | public Map<String, String> toExtraMetaData() {
final Map<String, String> map = new HashMap<String, String>();
map.put(THRIFT_CLASS, getThriftClass().getName());
map.put(THRIFT_DESCRIPTOR, descriptor.toJSON());
return map;
} | java | {
"resource": ""
} |
q172121 | ColumnWriterBase.writeNull | test | @Override
public void writeNull(int repetitionLevel, int definitionLevel) {
if (DEBUG)
log(null, repetitionLevel, definitionLevel);
repetitionLevel(repetitionLevel);
definitionLevel(definitionLevel);
statistics.incrementNumNulls();
++valueCount;
} | java | {
"resource": ""
} |
q172122 | ColumnWriterBase.writePage | test | void writePage() {
if (valueCount == 0) {
throw new ParquetEncodingException("writing empty page");
}
this.rowsWrittenSoFar += pageRowCount;
if (DEBUG)
LOG.debug("write page");
try {
writePage(pageRowCount, valueCount, statistics, repetitionLevelColumn, definitionLevelColumn, dataColumn);
} catch (IOException e) {
throw new ParquetEncodingException("could not write page for " + path, e);
}
repetitionLevelColumn.reset();
definitionLevelColumn.reset();
dataColumn.reset();
valueCount = 0;
resetStatistics();
pageRowCount = 0;
} | java | {
"resource": ""
} |
q172123 | DeltaBinaryPackingValuesReader.initFromPage | test | @Override
public void initFromPage(int valueCount, ByteBufferInputStream stream) throws IOException {
this.in = stream;
long startPos = in.position();
this.config = DeltaBinaryPackingConfig.readConfig(in);
this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
allocateValuesBuffer();
bitWidths = new int[config.miniBlockNumInABlock];
//read first value from header
valuesBuffer[valuesBuffered++] = BytesUtils.readZigZagVarLong(in);
while (valuesBuffered < totalValueCount) { //values Buffered could be more than totalValueCount, since we flush on a mini block basis
loadNewBlockToBuffer();
}
updateNextOffset((int) (in.position() - startPos));
} | java | {
"resource": ""
} |
q172124 | DeltaBinaryPackingValuesReader.allocateValuesBuffer | test | private void allocateValuesBuffer() {
int totalMiniBlockCount = (int) Math.ceil((double) totalValueCount / config.miniBlockSizeInValues);
//+ 1 because first value written to header is also stored in values buffer
valuesBuffer = new long[totalMiniBlockCount * config.miniBlockSizeInValues + 1];
} | java | {
"resource": ""
} |
q172125 | BufferedProtocolReadToWrite.checkEnum | test | private void checkEnum(ThriftType expectedType, int i) {
if (expectedType.getType() == ThriftTypeID.ENUM) {
ThriftType.EnumType expectedEnumType = (ThriftType.EnumType)expectedType;
if (expectedEnumType.getEnumValueById(i) == null) {
throw new DecodingSchemaMismatchException("can not find index " + i + " in enum " + expectedType);
}
}
} | java | {
"resource": ""
} |
q172126 | DeltaBinaryPackingValuesWriterForInteger.calculateBitWidthsForDeltaBlockBuffer | test | private void calculateBitWidthsForDeltaBlockBuffer(int miniBlocksToFlush) {
for (int miniBlockIndex = 0; miniBlockIndex < miniBlocksToFlush; miniBlockIndex++) {
int mask = 0;
int miniStart = miniBlockIndex * config.miniBlockSizeInValues;
//The end of current mini block could be the end of current block(deltaValuesToFlush) buffer when data is not aligned to mini block
int miniEnd = Math.min((miniBlockIndex + 1) * config.miniBlockSizeInValues, deltaValuesToFlush);
for (int i = miniStart; i < miniEnd; i++) {
mask |= deltaBlockBuffer[i];
}
bitWidths[miniBlockIndex] = 32 - Integer.numberOfLeadingZeros(mask);
}
} | java | {
"resource": ""
} |
q172127 | Exceptions.throwIfInstance | test | public static <E extends Exception> void throwIfInstance(Throwable t,
Class<E> excClass)
throws E {
if (excClass.isAssignableFrom(t.getClass())) {
// the throwable is already an exception, so return it
throw excClass.cast(t);
}
} | java | {
"resource": ""
} |
q172128 | Statistics.getStatsBasedOnType | test | @Deprecated
public static Statistics getStatsBasedOnType(PrimitiveTypeName type) {
switch (type) {
case INT32:
return new IntStatistics();
case INT64:
return new LongStatistics();
case FLOAT:
return new FloatStatistics();
case DOUBLE:
return new DoubleStatistics();
case BOOLEAN:
return new BooleanStatistics();
case BINARY:
return new BinaryStatistics();
case INT96:
return new BinaryStatistics();
case FIXED_LEN_BYTE_ARRAY:
return new BinaryStatistics();
default:
throw new UnknownColumnTypeException(type);
}
} | java | {
"resource": ""
} |
q172129 | Statistics.getBuilderForReading | test | public static Builder getBuilderForReading(PrimitiveType type) {
switch (type.getPrimitiveTypeName()) {
case FLOAT:
return new FloatBuilder(type);
case DOUBLE:
return new DoubleBuilder(type);
default:
return new Builder(type);
}
} | java | {
"resource": ""
} |
q172130 | Statistics.mergeStatistics | test | public void mergeStatistics(Statistics stats) {
if (stats.isEmpty()) return;
// Merge stats only if they have the same type
if (type.equals(stats.type)) {
incrementNumNulls(stats.getNumNulls());
if (stats.hasNonNullValue()) {
mergeStatisticsMinMax(stats);
markAsNotEmpty();
}
} else {
throw StatisticsClassException.create(this, stats);
}
} | java | {
"resource": ""
} |
q172131 | AvroSchemaConverter.getNonNull | test | public static Schema getNonNull(Schema schema) {
if (schema.getType().equals(Schema.Type.UNION)) {
List<Schema> schemas = schema.getTypes();
if (schemas.size() == 2) {
if (schemas.get(0).getType().equals(Schema.Type.NULL)) {
return schemas.get(1);
} else if (schemas.get(1).getType().equals(Schema.Type.NULL)) {
return schemas.get(0);
} else {
return schema;
}
} else {
return schema;
}
} else {
return schema;
}
} | java | {
"resource": ""
} |
q172132 | ContextUtil.newTaskAttemptContext | test | public static TaskAttemptContext newTaskAttemptContext(
Configuration conf, TaskAttemptID taskAttemptId) {
try {
return (TaskAttemptContext)
TASK_CONTEXT_CONSTRUCTOR.newInstance(conf, taskAttemptId);
} catch (InstantiationException e) {
throw new IllegalArgumentException("Can't instantiate TaskAttemptContext", e);
} catch (IllegalAccessException e) {
throw new IllegalArgumentException("Can't instantiate TaskAttemptContext", e);
} catch (InvocationTargetException e) {
throw new IllegalArgumentException("Can't instantiate TaskAttemptContext", e);
}
} | java | {
"resource": ""
} |
q172133 | ContextUtil.invoke | test | private static Object invoke(Method method, Object obj, Object... args) {
try {
return method.invoke(obj, args);
} catch (IllegalAccessException e) {
throw new IllegalArgumentException("Can't invoke method " + method.getName(), e);
} catch (InvocationTargetException e) {
throw new IllegalArgumentException("Can't invoke method " + method.getName(), e);
}
} | java | {
"resource": ""
} |
q172134 | GroupType.membersDisplayString | test | void membersDisplayString(StringBuilder sb, String indent) {
for (Type field : fields) {
field.writeToStringBuilder(sb, indent);
if (field.isPrimitive()) {
sb.append(";");
}
sb.append("\n");
}
} | java | {
"resource": ""
} |
q172135 | GroupType.mergeFields | test | List<Type> mergeFields(GroupType toMerge, boolean strict) {
List<Type> newFields = new ArrayList<Type>();
// merge existing fields
for (Type type : this.getFields()) {
Type merged;
if (toMerge.containsField(type.getName())) {
Type fieldToMerge = toMerge.getType(type.getName());
if (type.getLogicalTypeAnnotation() != null && !type.getLogicalTypeAnnotation().equals(fieldToMerge.getLogicalTypeAnnotation())) {
throw new IncompatibleSchemaModificationException("cannot merge logical type " + fieldToMerge.getLogicalTypeAnnotation() + " into " + type.getLogicalTypeAnnotation());
}
merged = type.union(fieldToMerge, strict);
} else {
merged = type;
}
newFields.add(merged);
}
// add new fields
for (Type type : toMerge.getFields()) {
if (!this.containsField(type.getName())) {
newFields.add(type);
}
}
return newFields;
} | java | {
"resource": ""
} |
q172136 | BenchmarkCounter.initCounterFromReporter | test | public static void initCounterFromReporter(Reporter reporter, Configuration configuration) {
counterLoader = new MapRedCounterLoader(reporter, configuration);
loadCounters();
} | java | {
"resource": ""
} |
q172137 | ParquetInputFormat.getFilter | test | public static Filter getFilter(Configuration conf) {
return FilterCompat.get(getFilterPredicate(conf), getUnboundRecordFilterInstance(conf));
} | java | {
"resource": ""
} |
q172138 | ParquetInputFormat.getFooters | test | public List<Footer> getFooters(Configuration configuration, Collection<FileStatus> statuses) throws IOException {
LOG.debug("reading {} files", statuses.size());
boolean taskSideMetaData = isTaskSideMetaData(configuration);
return ParquetFileReader.readAllFootersInParallelUsingSummaryFiles(configuration, statuses, taskSideMetaData);
} | java | {
"resource": ""
} |
q172139 | ClientSideMetadataSplitStrategy.generateSplits | test | static <T> List<ParquetInputSplit> generateSplits(
List<BlockMetaData> rowGroupBlocks,
BlockLocation[] hdfsBlocksArray,
FileStatus fileStatus,
String requestedSchema,
Map<String, String> readSupportMetadata, long minSplitSize, long maxSplitSize) throws IOException {
List<SplitInfo> splitRowGroups =
generateSplitInfo(rowGroupBlocks, hdfsBlocksArray, minSplitSize, maxSplitSize);
//generate splits from rowGroups of each split
List<ParquetInputSplit> resultSplits = new ArrayList<ParquetInputSplit>();
for (SplitInfo splitInfo : splitRowGroups) {
ParquetInputSplit split = splitInfo.getParquetInputSplit(fileStatus, requestedSchema, readSupportMetadata);
resultSplits.add(split);
}
return resultSplits;
} | java | {
"resource": ""
} |
q172140 | ProtoWriteSupport.write | test | @Override
public void write(T record) {
recordConsumer.startMessage();
try {
messageWriter.writeTopLevelMessage(record);
} catch (RuntimeException e) {
Message m = (record instanceof Message.Builder) ? ((Message.Builder) record).build() : (Message) record;
LOG.error("Cannot write message " + e.getMessage() + " : " + m);
throw e;
}
recordConsumer.endMessage();
} | java | {
"resource": ""
} |
q172141 | ProtoWriteSupport.validatedMapping | test | private void validatedMapping(Descriptor descriptor, GroupType parquetSchema) {
List<FieldDescriptor> allFields = descriptor.getFields();
for (FieldDescriptor fieldDescriptor: allFields) {
String fieldName = fieldDescriptor.getName();
int fieldIndex = fieldDescriptor.getIndex();
int parquetIndex = parquetSchema.getFieldIndex(fieldName);
if (fieldIndex != parquetIndex) {
String message = "FieldIndex mismatch name=" + fieldName + ": " + fieldIndex + " != " + parquetIndex;
throw new IncompatibleSchemaModificationException(message);
}
}
} | java | {
"resource": ""
} |
q172142 | ProtoWriteSupport.serializeDescriptor | test | private String serializeDescriptor(Class<? extends Message> protoClass) {
Descriptor descriptor = Protobufs.getMessageDescriptor(protoClass);
DescriptorProtos.DescriptorProto asProto = descriptor.toProto();
return TextFormat.printToString(asProto);
} | java | {
"resource": ""
} |
q172143 | CodecFactory.createDirectCodecFactory | test | public static CodecFactory createDirectCodecFactory(Configuration config, ByteBufferAllocator allocator, int pageSize) {
return new DirectCodecFactory(config, allocator, pageSize);
} | java | {
"resource": ""
} |
q172144 | ProtocolEventsAmender.amendMissingRequiredFields | test | public List<TProtocol> amendMissingRequiredFields(StructType recordThriftType) throws TException {
Iterator<TProtocol> protocolIter = rootEvents.iterator();
checkStruct(protocolIter, recordThriftType);
return fixedEvents;
} | java | {
"resource": ""
} |
q172145 | ProtocolEventsAmender.checkSet | test | private void checkSet(Iterator<TProtocol> eventIter, ThriftField setFieldDefinition) throws TException {
TSet thriftSet = acceptProtocol(eventIter.next()).readSetBegin();
ThriftField elementFieldDefinition = ((ThriftType.SetType) setFieldDefinition.getType()).getValues();
int setSize = thriftSet.size;
for (int i = 0; i < setSize; i++) {
checkField(thriftSet.elemType, eventIter, elementFieldDefinition);
}
acceptProtocol(eventIter.next()).readSetEnd();
} | java | {
"resource": ""
} |
q172146 | Util.readFileMetaData | test | public static FileMetaData readFileMetaData(InputStream from, boolean skipRowGroups) throws IOException {
FileMetaData md = new FileMetaData();
if (skipRowGroups) {
readFileMetaData(from, new DefaultFileMetaDataConsumer(md), skipRowGroups);
} else {
read(from, md);
}
return md;
} | java | {
"resource": ""
} |
q172147 | ThriftToParquetFileWriter.close | test | @Override
public void close() throws IOException {
try {
recordWriter.close(taskAttemptContext);
} catch (InterruptedException e) {
Thread.interrupted();
throw new IOException("The thread was interrupted", e);
}
} | java | {
"resource": ""
} |
q172148 | ExampleOutputFormat.setSchema | test | public static void setSchema(Job job, MessageType schema) {
GroupWriteSupport.setSchema(schema, ContextUtil.getConfiguration(job));
} | java | {
"resource": ""
} |
q172149 | ValidTypeMap.add | test | private static void add(Class<?> c, PrimitiveTypeName p) {
Set<PrimitiveTypeName> descriptors = classToParquetType.get(c);
if (descriptors == null) {
descriptors = new HashSet<PrimitiveTypeName>();
classToParquetType.put(c, descriptors);
}
descriptors.add(p);
Set<Class<?>> classes = parquetTypeToClass.get(p);
if (classes == null) {
classes = new HashSet<Class<?>>();
parquetTypeToClass.put(p, classes);
}
classes.add(c);
} | java | {
"resource": ""
} |
q172150 | ValidTypeMap.assertTypeValid | test | public static <T extends Comparable<T>> void assertTypeValid(Column<T> foundColumn, PrimitiveTypeName primitiveType) {
Class<T> foundColumnType = foundColumn.getColumnType();
ColumnPath columnPath = foundColumn.getColumnPath();
Set<PrimitiveTypeName> validTypeDescriptors = classToParquetType.get(foundColumnType);
if (validTypeDescriptors == null) {
StringBuilder message = new StringBuilder();
message
.append("Column ")
.append(columnPath.toDotString())
.append(" was declared as type: ")
.append(foundColumnType.getName())
.append(" which is not supported in FilterPredicates.");
Set<Class<?>> supportedTypes = parquetTypeToClass.get(primitiveType);
if (supportedTypes != null) {
message
.append(" Supported types for this column are: ")
.append(supportedTypes);
} else {
message.append(" There are no supported types for columns of " + primitiveType);
}
throw new IllegalArgumentException(message.toString());
}
if (!validTypeDescriptors.contains(primitiveType)) {
StringBuilder message = new StringBuilder();
message
.append("FilterPredicate column: ")
.append(columnPath.toDotString())
.append("'s declared type (")
.append(foundColumnType.getName())
.append(") does not match the schema found in file metadata. Column ")
.append(columnPath.toDotString())
.append(" is of type: ")
.append(primitiveType)
.append("\nValid types for this column are: ")
.append(parquetTypeToClass.get(primitiveType));
throw new IllegalArgumentException(message.toString());
}
} | java | {
"resource": ""
} |
q172151 | Ints.checkedCast | test | public static int checkedCast(long value) {
int valueI = (int) value;
if (valueI != value) {
throw new IllegalArgumentException(String.format("Overflow casting %d to an int", value));
}
return valueI;
} | java | {
"resource": ""
} |
q172152 | SchemaConverter.fromArrow | test | public SchemaMapping fromArrow(Schema arrowSchema) {
List<Field> fields = arrowSchema.getFields();
List<TypeMapping> parquetFields = fromArrow(fields);
MessageType parquetType = addToBuilder(parquetFields, Types.buildMessage()).named("root");
return new SchemaMapping(arrowSchema, parquetType, parquetFields);
} | java | {
"resource": ""
} |
q172153 | SchemaConverter.fromParquet | test | public SchemaMapping fromParquet(MessageType parquetSchema) {
List<Type> fields = parquetSchema.getFields();
List<TypeMapping> mappings = fromParquet(fields);
List<Field> arrowFields = fields(mappings);
return new SchemaMapping(new Schema(arrowFields), parquetSchema, mappings);
} | java | {
"resource": ""
} |
q172154 | SchemaConverter.map | test | public SchemaMapping map(Schema arrowSchema, MessageType parquetSchema) {
List<TypeMapping> children = map(arrowSchema.getFields(), parquetSchema.getFields());
return new SchemaMapping(arrowSchema, parquetSchema, children);
} | java | {
"resource": ""
} |
q172155 | ParquetMetadataConverter.writeDataPageV2Header | test | @Deprecated
public void writeDataPageV2Header(
int uncompressedSize, int compressedSize,
int valueCount, int nullCount, int rowCount,
org.apache.parquet.column.statistics.Statistics statistics,
org.apache.parquet.column.Encoding dataEncoding,
int rlByteLength, int dlByteLength,
OutputStream to) throws IOException {
writePageHeader(
newDataPageV2Header(
uncompressedSize, compressedSize,
valueCount, nullCount, rowCount,
dataEncoding,
rlByteLength, dlByteLength), to);
} | java | {
"resource": ""
} |
q172156 | FilteredRecordReader.skipToMatch | test | private void skipToMatch() {
while (recordsRead < recordCount && !recordFilter.isMatch()) {
State currentState = getState(0);
do {
ColumnReader columnReader = currentState.column;
// currentLevel = depth + 1 at this point
// set the current value
if (columnReader.getCurrentDefinitionLevel() >= currentState.maxDefinitionLevel) {
columnReader.skip();
}
columnReader.consume();
// Based on repetition level work out next state to go to
int nextR = currentState.maxRepetitionLevel == 0 ? 0 : columnReader.getCurrentRepetitionLevel();
currentState = currentState.getNextState(nextR);
} while (currentState != null);
++ recordsRead;
}
} | java | {
"resource": ""
} |
q172157 | SerializationUtil.writeObjectToConfAsBase64 | test | public static void writeObjectToConfAsBase64(String key, Object obj, Configuration conf) throws IOException {
try(ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
try(GZIPOutputStream gos = new GZIPOutputStream(baos);
ObjectOutputStream oos = new ObjectOutputStream(gos)) {
oos.writeObject(obj);
}
conf.set(key, new String(Base64.encodeBase64(baos.toByteArray()), StandardCharsets.UTF_8));
}
} | java | {
"resource": ""
} |
q172158 | LruCache.remove | test | public V remove(final K key) {
V oldValue = cacheMap.remove(key);
if (oldValue != null) {
LOG.debug("Removed cache entry for '{}'", key);
}
return oldValue;
} | java | {
"resource": ""
} |
q172159 | LruCache.put | test | public void put(final K key, final V newValue) {
if (newValue == null || !newValue.isCurrent(key)) {
if (LOG.isWarnEnabled()) {
LOG.warn("Ignoring new cache entry for '{}' because it is {}", key,
(newValue == null ? "null" : "not current"));
}
return;
}
V oldValue = cacheMap.get(key);
if (oldValue != null && oldValue.isNewerThan(newValue)) {
if (LOG.isWarnEnabled()) {
LOG.warn("Ignoring new cache entry for '{}' because "
+ "existing cache entry is newer", key);
}
return;
}
// no existing value or new value is newer than old value
oldValue = cacheMap.put(key, newValue);
if (LOG.isDebugEnabled()) {
if (oldValue == null) {
LOG.debug("Added new cache entry for '{}'", key);
} else {
LOG.debug("Overwrote existing cache entry for '{}'", key);
}
}
} | java | {
"resource": ""
} |
q172160 | LruCache.getCurrentValue | test | public V getCurrentValue(final K key) {
V value = cacheMap.get(key);
LOG.debug("Value for '{}' {} in cache", key, (value == null ? "not " : ""));
if (value != null && !value.isCurrent(key)) {
// value is not current; remove it and return null
remove(key);
return null;
}
return value;
} | java | {
"resource": ""
} |
q172161 | AvroWriteSupport.writeValue | test | private void writeValue(Type type, Schema avroSchema, Object value) {
Schema nonNullAvroSchema = AvroSchemaConverter.getNonNull(avroSchema);
LogicalType logicalType = nonNullAvroSchema.getLogicalType();
if (logicalType != null) {
Conversion<?> conversion = model.getConversionByClass(
value.getClass(), logicalType);
writeValueWithoutConversion(type, nonNullAvroSchema,
convert(nonNullAvroSchema, logicalType, conversion, value));
} else {
writeValueWithoutConversion(type, nonNullAvroSchema, value);
}
} | java | {
"resource": ""
} |
q172162 | AvroWriteSupport.writeValueWithoutConversion | test | @SuppressWarnings("unchecked")
private void writeValueWithoutConversion(Type type, Schema avroSchema, Object value) {
switch (avroSchema.getType()) {
case BOOLEAN:
recordConsumer.addBoolean((Boolean) value);
break;
case INT:
if (value instanceof Character) {
recordConsumer.addInteger((Character) value);
} else {
recordConsumer.addInteger(((Number) value).intValue());
}
break;
case LONG:
recordConsumer.addLong(((Number) value).longValue());
break;
case FLOAT:
recordConsumer.addFloat(((Number) value).floatValue());
break;
case DOUBLE:
recordConsumer.addDouble(((Number) value).doubleValue());
break;
case FIXED:
recordConsumer.addBinary(Binary.fromReusedByteArray(((GenericFixed) value).bytes()));
break;
case BYTES:
if (value instanceof byte[]) {
recordConsumer.addBinary(Binary.fromReusedByteArray((byte[]) value));
} else {
recordConsumer.addBinary(Binary.fromReusedByteBuffer((ByteBuffer) value));
}
break;
case STRING:
recordConsumer.addBinary(fromAvroString(value));
break;
case RECORD:
writeRecord(type.asGroupType(), avroSchema, value);
break;
case ENUM:
recordConsumer.addBinary(Binary.fromString(value.toString()));
break;
case ARRAY:
listWriter.writeList(type.asGroupType(), avroSchema, value);
break;
case MAP:
writeMap(type.asGroupType(), avroSchema, (Map<CharSequence, ?>) value);
break;
case UNION:
writeUnion(type.asGroupType(), avroSchema, value);
break;
} | java | {
"resource": ""
} |
q172163 | PathGlobPattern.set | test | public void set(String glob) {
StringBuilder regex = new StringBuilder();
int setOpen = 0;
int curlyOpen = 0;
int len = glob.length();
hasWildcard = false;
for (int i = 0; i < len; i++) {
char c = glob.charAt(i);
switch (c) {
case BACKSLASH:
if (++i >= len) {
error("Missing escaped character", glob, i);
}
regex.append(c).append(glob.charAt(i));
continue;
case '.':
case '$':
case '(':
case ')':
case '|':
case '+':
// escape regex special chars that are not glob special chars
regex.append(BACKSLASH);
break;
case '*':
if (i + 1 < len && glob.charAt(i + 1) == '*') {
regex.append('.');
i++;
break;
}
regex.append("[^" + PATH_SEPARATOR + "]");
hasWildcard = true;
break;
case '?':
regex.append('.');
hasWildcard = true;
continue;
case '{': // start of a group
regex.append("(?:"); // non-capturing
curlyOpen++;
hasWildcard = true;
continue;
case ',':
regex.append(curlyOpen > 0 ? '|' : c);
continue;
case '}':
if (curlyOpen > 0) {
// end of a group
curlyOpen--;
regex.append(")");
continue;
}
break;
case '[':
if (setOpen > 0) {
error("Unclosed character class", glob, i);
}
setOpen++;
hasWildcard = true;
break;
case '^': // ^ inside [...] can be unescaped
if (setOpen == 0) {
regex.append(BACKSLASH);
}
break;
case '!': // [! needs to be translated to [^
regex.append(setOpen > 0 && '[' == glob.charAt(i - 1) ? '^' : '!');
continue;
case ']':
// Many set errors like [][] could not be easily detected here,
// as []], []-] and [-] are all valid POSIX glob and java regex.
// We'll just let the regex compiler do the real work.
setOpen = 0;
break;
default:
}
regex.append(c);
}
if (setOpen > 0) {
error("Unclosed character class", glob, len);
}
if (curlyOpen > 0) {
error("Unclosed group", glob, len);
}
compiled = Pattern.compile(regex.toString());
} | java | {
"resource": ""
} |
q172164 | BaseCommand.output | test | public void output(String content, Logger console, String filename)
throws IOException {
if (filename == null || "-".equals(filename)) {
console.info(content);
} else {
FSDataOutputStream outgoing = create(filename);
try {
outgoing.write(content.getBytes(StandardCharsets.UTF_8));
} finally {
outgoing.close();
}
}
} | java | {
"resource": ""
} |
q172165 | BaseCommand.open | test | public InputStream open(String filename) throws IOException {
if (STDIN_AS_SOURCE.equals(filename)) {
return System.in;
}
URI uri = qualifiedURI(filename);
if (RESOURCE_URI_SCHEME.equals(uri.getScheme())) {
return Resources.getResource(uri.getRawSchemeSpecificPart()).openStream();
} else {
Path filePath = new Path(uri);
// even though it was qualified using the default FS, it may not be in it
FileSystem fs = filePath.getFileSystem(getConf());
return fs.open(filePath);
}
} | java | {
"resource": ""
} |
q172166 | ColumnRecordFilter.column | test | public static final UnboundRecordFilter column(final String columnPath,
final ColumnPredicates.Predicate predicate) {
checkNotNull(columnPath, "columnPath");
checkNotNull(predicate, "predicate");
return new UnboundRecordFilter() {
final String[] filterPath = columnPath.split("\\.");
@Override
public RecordFilter bind(Iterable<ColumnReader> readers) {
for (ColumnReader reader : readers) {
if ( Arrays.equals( reader.getDescriptor().getPath(), filterPath)) {
return new ColumnRecordFilter(reader, predicate);
}
}
throw new IllegalArgumentException( "Column " + columnPath + " does not exist.");
}
};
} | java | {
"resource": ""
} |
q172167 | ThriftSchemaConverter.convert | test | public MessageType convert(StructType struct) {
MessageType messageType = ThriftSchemaConvertVisitor.convert(struct, fieldProjectionFilter, true);
fieldProjectionFilter.assertNoUnmatchedPatterns();
return messageType;
} | java | {
"resource": ""
} |
q172168 | ConversionPatterns.listWrapper | test | private static GroupType listWrapper(Repetition repetition, String alias, LogicalTypeAnnotation logicalTypeAnnotation, Type nested) {
if (!nested.isRepetition(Repetition.REPEATED)) {
throw new IllegalArgumentException("Nested type should be repeated: " + nested);
}
return new GroupType(repetition, alias, logicalTypeAnnotation, nested);
} | java | {
"resource": ""
} |
q172169 | ConversionPatterns.listOfElements | test | public static GroupType listOfElements(Repetition listRepetition, String name, Type elementType) {
Preconditions.checkArgument(elementType.getName().equals(ELEMENT_NAME),
"List element type must be named 'element'");
return listWrapper(
listRepetition,
name,
LogicalTypeAnnotation.listType(),
new GroupType(Repetition.REPEATED, "list", elementType)
);
} | java | {
"resource": ""
} |
q172170 | InitContext.getMergedKeyValueMetaData | test | @Deprecated
public Map<String, String> getMergedKeyValueMetaData() {
if (mergedKeyValueMetadata == null) {
Map<String, String> mergedKeyValues = new HashMap<String, String>();
for (Entry<String, Set<String>> entry : keyValueMetadata.entrySet()) {
if (entry.getValue().size() > 1) {
throw new RuntimeException("could not merge metadata: key " + entry.getKey() + " has conflicting values: " + entry.getValue());
}
mergedKeyValues.put(entry.getKey(), entry.getValue().iterator().next());
}
mergedKeyValueMetadata = mergedKeyValues;
}
return mergedKeyValueMetadata;
} | java | {
"resource": ""
} |
q172171 | ParquetRecordReaderWrapper.getSplit | test | protected ParquetInputSplit getSplit(
final InputSplit oldSplit,
final JobConf conf
) throws IOException {
if (oldSplit instanceof FileSplit) {
FileSplit fileSplit = (FileSplit) oldSplit;
final long splitStart = fileSplit.getStart();
final long splitLength = fileSplit.getLength();
final Path finalPath = fileSplit.getPath();
final JobConf cloneJob = hiveBinding.pushProjectionsAndFilters(conf, finalPath.getParent());
final ParquetMetadata parquetMetadata = ParquetFileReader.readFooter(cloneJob, finalPath, SKIP_ROW_GROUPS);
final FileMetaData fileMetaData = parquetMetadata.getFileMetaData();
final ReadContext readContext =
new DataWritableReadSupport()
.init(cloneJob, fileMetaData.getKeyValueMetaData(), fileMetaData.getSchema());
schemaSize = MessageTypeParser.parseMessageType(
readContext.getReadSupportMetadata().get(DataWritableReadSupport.HIVE_SCHEMA_KEY)
).getFieldCount();
return new ParquetInputSplit(
finalPath,
splitStart,
splitStart + splitLength,
splitLength,
fileSplit.getLocations(),
null);
} else {
throw new IllegalArgumentException("Unknown split type: " + oldSplit);
}
} | java | {
"resource": ""
} |
q172172 | AvroRecordConverter.getFieldsByName | test | private static Map<String, Class<?>> getFieldsByName(Class<?> recordClass,
boolean excludeJava) {
Map<String, Class<?>> fields = new LinkedHashMap<String, Class<?>>();
if (recordClass != null) {
Class<?> current = recordClass;
do {
if (excludeJava && current.getPackage() != null
&& current.getPackage().getName().startsWith("java.")) {
break; // skip java built-in classes
}
for (Field field : current.getDeclaredFields()) {
if (field.isAnnotationPresent(AvroIgnore.class) ||
isTransientOrStatic(field)) {
continue;
}
AvroName altName = field.getAnnotation(AvroName.class);
Class<?> existing = fields.put(
altName != null ? altName.value() : field.getName(),
field.getType());
if (existing != null) {
throw new AvroTypeException(
current + " contains two fields named: " + field.getName());
}
}
current = current.getSuperclass();
} while (current != null);
}
return fields;
} | java | {
"resource": ""
} |
q172173 | DataWritableReadSupport.resolveSchemaAccess | test | private MessageType resolveSchemaAccess(MessageType requestedSchema, MessageType fileSchema,
Configuration configuration) {
if(configuration.getBoolean(PARQUET_COLUMN_INDEX_ACCESS, false)) {
final List<String> listColumns = getColumns(configuration.get(IOConstants.COLUMNS));
List<Type> requestedTypes = new ArrayList<Type>();
for(Type t : requestedSchema.getFields()) {
int index = listColumns.indexOf(t.getName());
requestedTypes.add(fileSchema.getType(index));
}
requestedSchema = new MessageType(requestedSchema.getName(), requestedTypes);
}
return requestedSchema;
} | java | {
"resource": ""
} |
q172174 | MergeCommand.getInputFiles | test | private List<Path> getInputFiles(List<String> input) throws IOException {
List<Path> inputFiles = null;
if (input.size() == 1) {
Path p = new Path(input.get(0));
FileSystem fs = p.getFileSystem(conf);
FileStatus status = fs.getFileStatus(p);
if (status.isDir()) {
inputFiles = getInputFilesFromDirectory(status);
}
} else {
inputFiles = parseInputFiles(input);
}
checkParquetFiles(inputFiles);
return inputFiles;
} | java | {
"resource": ""
} |
q172175 | MergeCommand.checkParquetFiles | test | private void checkParquetFiles(List<Path> inputFiles) throws IOException {
if (inputFiles == null || inputFiles.size() <= 1) {
throw new IllegalArgumentException("Not enough files to merge");
}
for (Path inputFile: inputFiles) {
FileSystem fs = inputFile.getFileSystem(conf);
FileStatus status = fs.getFileStatus(inputFile);
if (status.isDir()) {
throw new IllegalArgumentException("Illegal parquet file: " + inputFile.toUri());
}
}
} | java | {
"resource": ""
} |
q172176 | MergeCommand.getInputFilesFromDirectory | test | private List<Path> getInputFilesFromDirectory(FileStatus partitionDir) throws IOException {
FileSystem fs = partitionDir.getPath().getFileSystem(conf);
FileStatus[] inputFiles = fs.listStatus(partitionDir.getPath(), HiddenFileFilter.INSTANCE);
List<Path> input = new ArrayList<Path>();
for (FileStatus f: inputFiles) {
input.add(f.getPath());
}
return input;
} | java | {
"resource": ""
} |
q172177 | PagedRecordFilter.page | test | public static final UnboundRecordFilter page( final long startPos, final long pageSize ) {
return new UnboundRecordFilter() {
@Override
public RecordFilter bind(Iterable<ColumnReader> readers) {
return new PagedRecordFilter( startPos, pageSize );
}
};
} | java | {
"resource": ""
} |
q172178 | Consumers.listOf | test | public static <T extends TBase<T,? extends TFieldIdEnum>> ListConsumer listOf(Class<T> c, final Consumer<List<T>> consumer) {
class ListConsumer implements Consumer<T> {
List<T> list;
@Override
public void consume(T t) {
list.add(t);
}
}
final ListConsumer co = new ListConsumer();
return new DelegatingListElementsConsumer(struct(c, co)) {
@Override
public void consumeList(TProtocol protocol,
EventBasedThriftReader reader, TList tList) throws TException {
co.list = new ArrayList<T>();
super.consumeList(protocol, reader, tList);
consumer.consume(co.list);
}
};
} | java | {
"resource": ""
} |
q172179 | Hive010Binding.init | test | private void init(final JobConf job) {
final String plan = HiveConf.getVar(job, HiveConf.ConfVars.PLAN);
if (mrwork == null && plan != null && plan.length() > 0) {
mrwork = Utilities.getMapRedWork(job);
pathToPartitionInfo.clear();
for (final Map.Entry<String, PartitionDesc> entry : mrwork.getPathToPartitionInfo().entrySet()) {
pathToPartitionInfo.put(new Path(entry.getKey()).toUri().getPath().toString(), entry.getValue());
}
}
} | java | {
"resource": ""
} |
q172180 | Summary.merge | test | private static TupleSummaryData merge(Tuple t) throws IOException {
TupleSummaryData summaryData = new TupleSummaryData();
DataBag bag = (DataBag) t.get(0);
for (Tuple tuple : bag) {
summaryData.merge(getData(tuple));
}
return summaryData;
} | java | {
"resource": ""
} |
q172181 | Summary.sumUp | test | private static TupleSummaryData sumUp(Schema schema, Tuple t) throws ExecException {
TupleSummaryData summaryData = new TupleSummaryData();
DataBag bag = (DataBag) t.get(0);
for (Tuple tuple : bag) {
summaryData.addTuple(schema, tuple);
}
return summaryData;
} | java | {
"resource": ""
} |
q172182 | EventBasedThriftReader.readStruct | test | public void readStruct(FieldConsumer c) throws TException {
protocol.readStructBegin();
readStructContent(c);
protocol.readStructEnd();
} | java | {
"resource": ""
} |
q172183 | EventBasedThriftReader.readMapEntry | test | public void readMapEntry(byte keyType, TypedConsumer keyConsumer, byte valueType, TypedConsumer valueConsumer)
throws TException {
keyConsumer.read(protocol, this, keyType);
valueConsumer.read(protocol, this, valueType);
} | java | {
"resource": ""
} |
q172184 | ByteBasedBitPackingEncoder.writeInt | test | public void writeInt(int value) throws IOException {
input[inputSize] = value;
++ inputSize;
if (inputSize == VALUES_WRITTEN_AT_A_TIME) {
pack();
if (packedPosition == slabSize) {
slabs.add(BytesInput.from(packed));
totalFullSlabSize += slabSize;
if (slabSize < bitWidth * MAX_SLAB_SIZE_MULT) {
slabSize *= 2;
}
initPackedSlab();
}
}
} | java | {
"resource": ""
} |
q172185 | BytesUtils.readIntLittleEndian | test | public static int readIntLittleEndian(ByteBuffer in, int offset) throws IOException {
int ch4 = in.get(offset) & 0xff;
int ch3 = in.get(offset + 1) & 0xff;
int ch2 = in.get(offset + 2) & 0xff;
int ch1 = in.get(offset + 3) & 0xff;
return ((ch1 << 24) + (ch2 << 16) + (ch3 << 8) + (ch4 << 0));
} | java | {
"resource": ""
} |
q172186 | AvroParquetOutputFormat.setSchema | test | public static void setSchema(Job job, Schema schema) {
AvroWriteSupport.setSchema(ContextUtil.getConfiguration(job), schema);
} | java | {
"resource": ""
} |
q172187 | MapR52StreamsValidationUtil09.createTopicIfNotExists | test | @Override
public void createTopicIfNotExists(String topic, Map<String, Object> kafkaClientConfigs, String metadataBrokerList)
throws StageException {
// check stream path and topic
if (topic.startsWith("/") && topic.contains(":")) {
String[] path = topic.split(":");
if (path.length != 2) {
// Stream topic has invalid format. Record will be sent to error
throw new StageException(MapRStreamsErrors.MAPRSTREAMS_21, topic);
}
String streamPath = path[0];
if (!streamCache.contains(streamPath)) {
// This pipeline sees this stream path for the 1st time
Configuration conf = new Configuration();
kafkaClientConfigs.forEach((k, v) -> {
conf.set(k, v.toString());
});
Admin streamAdmin = null;
try {
streamAdmin = Streams.newAdmin(conf);
// Check if the stream path exists already
streamAdmin.countTopics(streamPath);
streamCache.add(streamPath);
} catch (TableNotFoundException e) {
LOG.debug("Stream not found. Creating a new stream: " + streamPath);
try {
streamAdmin.createStream(streamPath, Streams.newStreamDescriptor());
streamCache.add(streamPath);
} catch (IOException ioex) {
throw new StageException(MapRStreamsErrors.MAPRSTREAMS_22, streamPath, e.getMessage(), e);
}
} catch (IOException | IllegalArgumentException e) {
throw new StageException(MapRStreamsErrors.MAPRSTREAMS_23, e.getMessage(), e);
} finally {
if (streamAdmin != null) {
streamAdmin.close();
}
}
}
}
// Stream topic can be created through KafkaProducer if Stream Path exists already
KafkaProducer<String, String> kafkaProducer = createProducerTopicMetadataClient(kafkaClientConfigs);
kafkaProducer.partitionsFor(topic);
} | java | {
"resource": ""
} |
q172188 | PipelineBeanCreator.duplicatePipelineStageBeans | test | public PipelineStageBeans duplicatePipelineStageBeans(
StageLibraryTask stageLib,
PipelineStageBeans pipelineStageBeans,
InterceptorCreatorContextBuilder interceptorCreatorContextBuilder,
Map<String, Object> constants,
List<Issue> errors
) {
List<StageBean> stageBeans = new ArrayList<>(pipelineStageBeans.size());
for(StageBean original: pipelineStageBeans.getStages()) {
// Create StageDefinition map for this stage
Map<Class, ServiceDefinition> services = original.getServices().stream()
.collect(Collectors.toMap(c -> c.getDefinition().getProvides(), ServiceBean::getDefinition));
StageBean stageBean = createStage(
stageLib,
original.getDefinition(),
ClassLoaderReleaser.NOOP_RELEASER,
original.getConfiguration(),
services::get,
interceptorCreatorContextBuilder,
constants,
errors
);
if (stageBean != null) {
stageBeans.add(stageBean);
}
}
return new PipelineStageBeans(stageBeans);
} | java | {
"resource": ""
} |
q172189 | PipelineBeanCreator.createStageBean | test | public StageBean createStageBean(
boolean forExecution,
StageLibraryTask library,
StageConfiguration stageConf,
boolean validateAnnotations,
boolean errorStage,
boolean pipelineLifecycleStage,
Map<String, Object> constants,
InterceptorCreatorContextBuilder interceptorContextBuilder,
List<Issue> errors
) {
IssueCreator issueCreator = IssueCreator.getStage(stageConf.getInstanceName());
StageBean bean = null;
StageDefinition stageDef = library.getStage(stageConf.getLibrary(), stageConf.getStageName(),
forExecution);
if (stageDef != null) {
// Pipeline lifecycle events validation must match, whether it's also marked as error stage does not matter
if(validateAnnotations) {
if (pipelineLifecycleStage) {
if (!stageDef.isPipelineLifecycleStage()) {
errors.add(issueCreator.create(
CreationError.CREATION_018,
stageDef.getLibraryLabel(),
stageDef.getLabel(),
stageConf.getStageVersion())
);
}
// For non pipeline lifecycle stages, the error stage annotation must match
} else if (stageDef.isErrorStage() != errorStage) {
if (stageDef.isErrorStage()) {
errors.add(issueCreator.create(CreationError.CREATION_007, stageDef.getLibraryLabel(), stageDef.getLabel(),
stageConf.getStageVersion()));
} else {
errors.add(issueCreator.create(CreationError.CREATION_008, stageDef.getLibraryLabel(), stageDef.getLabel(),
stageConf.getStageVersion()));
}
}
}
bean = createStage(
library,
stageDef,
library,
stageConf,
serviceClass -> library.getServiceDefinition(serviceClass, true),
interceptorContextBuilder,
constants,
errors
);
} else {
errors.add(issueCreator.create(CreationError.CREATION_006, stageConf.getLibrary(), stageConf.getStageName(),
stageConf.getStageVersion()));
}
return bean;
} | java | {
"resource": ""
} |
q172190 | PipelineBeanCreator.createInterceptors | test | public List<InterceptorBean> createInterceptors(
StageLibraryTask stageLib,
StageConfiguration stageConfiguration,
StageDefinition stageDefinition,
InterceptorCreatorContextBuilder contextBuilder,
InterceptorCreator.InterceptorType interceptorType,
List<Issue> issues
) {
List<InterceptorBean> beans = new ArrayList<>();
if(contextBuilder == null) {
return beans;
}
for(InterceptorDefinition definition : stageLib.getInterceptorDefinitions()) {
InterceptorBean bean = createInterceptor(stageLib, definition, stageConfiguration, stageDefinition, contextBuilder, interceptorType, issues);
if (bean != null) {
beans.add(bean);
}
}
return beans;
} | java | {
"resource": ""
} |
q172191 | PipelineBeanCreator.createInterceptor | test | public InterceptorBean createInterceptor(
StageLibraryTask stageLib,
InterceptorDefinition definition,
StageConfiguration stageConfiguration,
StageDefinition stageDefinition,
InterceptorCreatorContextBuilder contextBuilder,
InterceptorCreator.InterceptorType interceptorType,
List<Issue> issues
) {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
InterceptorCreator.Context context = contextBuilder.buildFor(
definition.getLibraryDefinition().getName(),
definition.getKlass().getName(),
stageConfiguration,
stageDefinition,
interceptorType
);
try {
Thread.currentThread().setContextClassLoader(definition.getStageClassLoader());
InterceptorCreator creator = definition.getDefaultCreator().newInstance();
Interceptor interceptor = creator.create(context);
if(interceptor == null) {
return null;
}
return new InterceptorBean(
definition,
interceptor,
stageLib
);
} catch (IllegalAccessException|InstantiationException e) {
LOG.debug("Can't instantiate interceptor: {}", e.toString(), e);
IssueCreator issueCreator = IssueCreator.getStage(stageDefinition.getName());
issues.add(issueCreator.create(
CreationError.CREATION_000, "interceptor", definition.getKlass().getName(), e.toString()
));
} finally {
Thread.currentThread().setContextClassLoader(classLoader);
}
return null;
} | java | {
"resource": ""
} |
q172192 | AvroTypeUtil.parseSchema | test | public static Schema parseSchema(String schema) {
Schema.Parser parser = new Schema.Parser();
parser.setValidate(true);
// We sadly can't use this method directly because it was added after 1.7.3 and we have to stay
// compatible with 1.7.3 as this version ships with mapr (and thus we end up using it). This code is
// however compiled against 1.7.7 and hence we don't have to do reflection here.
try {
parser.setValidateDefaults(true);
} catch (NoSuchMethodError e) {
LOG.debug("Running old Avro version that doesn't have 'setValidateDefaults' method", e);
}
return parser.parse(schema);
} | java | {
"resource": ""
} |
q172193 | AvroTypeUtil.millisToDays | test | private static int millisToDays(long millisLocal) {
// We assume millisLocal is midnight of some date. What we are basically trying to do
// here is go from local-midnight to UTC-midnight (or whatever time that happens to be).
long millisUtc = millisLocal + localTimeZone.getOffset(millisLocal);
int days;
if (millisUtc >= 0L) {
days = (int) (millisUtc / MILLIS_PER_DAY);
} else {
days = (int) ((millisUtc - 86399999 /*(MILLIS_PER_DAY - 1)*/) / MILLIS_PER_DAY);
}
return days;
} | java | {
"resource": ""
} |
q172194 | AvroTypeUtil.getAvroSchemaFromHeader | test | public static String getAvroSchemaFromHeader(Record record, String headerName) throws DataGeneratorException {
String jsonSchema = record.getHeader().getAttribute(headerName);
if(jsonSchema == null || jsonSchema.isEmpty()) {
throw new DataGeneratorException(Errors.AVRO_GENERATOR_03, record.getHeader().getSourceId());
}
return jsonSchema;
} | java | {
"resource": ""
} |
q172195 | JmsTargetUpgrader.upgradeV1ToV2 | test | private void upgradeV1ToV2(List<Config> configs, Context context) {
List<Config> dataFormatConfigs = configs.stream()
.filter(c -> c.getName().startsWith("dataFormat"))
.collect(Collectors.toList());
// Remove those configs
configs.removeAll(dataFormatConfigs);
// Provide proper prefix
dataFormatConfigs = dataFormatConfigs.stream()
.map(c -> new Config(c.getName().replace("dataFormatConfig.", "dataGeneratorFormatConfig."), c.getValue()))
.collect(Collectors.toList());
// And finally register new service
context.registerService(DataFormatGeneratorService.class, dataFormatConfigs);
} | java | {
"resource": ""
} |
q172196 | RecordWriter.getLength | test | public long getLength() throws IOException {
long length = -1;
if (generator != null) {
length = textOutputStream.getByteCount();
} else if (seqWriter != null) {
length = seqWriter.getLength();
}
return length;
} | java | {
"resource": ""
} |
q172197 | BaseClusterProvider.copyBlobstore | test | private void copyBlobstore(List<String> blobStoreResources, File rootDataDir, File pipelineDir) throws IOException {
if (blobStoreResources == null) {
return;
}
File blobstoreDir = new File(runtimeInfo.getDataDir(), BLOBSTORE_BASE_DIR);
File stagingBlobstoreDir = new File(rootDataDir, BLOBSTORE_BASE_DIR);
if (!stagingBlobstoreDir.exists()) {
if (!stagingBlobstoreDir.mkdirs()) {
throw new RuntimeException("Failed to create blobstore directory: " + pipelineDir.getPath());
}
}
for (String blobstoreFile: blobStoreResources) {
File srcFile = new File(blobstoreDir, blobstoreFile);
if (srcFile.exists()){
final File dstFile = new File(stagingBlobstoreDir, srcFile.getName());
if (srcFile.canRead()) { // ignore files which cannot be read
try (InputStream in = new FileInputStream((srcFile))) {
try (OutputStream out = new FileOutputStream((dstFile))) {
IOUtils.copy(in, out);
}
}
}
}
}
} | java | {
"resource": ""
} |
q172198 | SQLListener.reset | test | public void reset(){
columns.clear();
this.columnsExpected = null;
columnNames = null;
table = null;
schema = null;
insideStatement = false;
} | java | {
"resource": ""
} |
q172199 | ThreadHealthReporter.reportHealth | test | public boolean reportHealth(String threadName, int scheduledDelay, long timestamp) {
ThreadHealthReport threadHealthReport = new ThreadHealthReport(threadName, scheduledDelay, timestamp);
if(threadToGaugeMap.containsKey(threadName)) {
threadToGaugeMap.get(threadName).setThreadHealthReport(threadHealthReport);
return true;
}
return false;
} | java | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.