code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
@Override public BulkWriter<T> create(FSDataOutputStream stream) throws IOException { final OutputFile out = new StreamOutputFile(stream); final ParquetWriter<T> writer = writerBuilder.createWriter(out); return new ParquetBulkWriter<>(writer); }
Creates a new ParquetWriterFactory using the given builder to assemble the ParquetWriter. @param writerBuilder The builder to construct the ParquetWriter.
create
java
apache/flink
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/ParquetWriterFactory.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/ParquetWriterFactory.java
Apache-2.0
@Override public Reader<E> createReader( Configuration config, FSDataInputStream stream, long fileLen, long splitEnd) throws IOException { // current version does not support splitting. checkNotSplit(fileLen, splitEnd); return new AvroParquetRecordReader<E>( AvroParquetReader.<E>builder(new ParquetInputFile(stream, fileLen)) .withDataModel(getDataModel()) .withConf(HadoopUtils.getHadoopConfiguration(config)) .build()); }
Creates a new reader to read avro {@link GenericRecord} from Parquet input stream. <p>Several wrapper classes haven be created to Flink abstraction become compatible with the parquet abstraction. Please refer to the inner classes {@link AvroParquetRecordReader}, {@link ParquetInputFile}, {@code FSDataInputStreamAdapter} for details.
createReader
java
apache/flink
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/avro/AvroParquetRecordFormat.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/avro/AvroParquetRecordFormat.java
Apache-2.0
@Override public boolean isSplittable() { return false; }
Current version does not support splitting.
isSplittable
java
apache/flink
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/avro/AvroParquetRecordFormat.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/avro/AvroParquetRecordFormat.java
Apache-2.0
public static <T extends SpecificRecordBase> ParquetWriterFactory<T> forSpecificRecord( Class<T> type) { final String schemaString = SpecificData.get().getSchema(type).toString(); final ParquetBuilder<T> builder = (out) -> createAvroParquetWriter(schemaString, SpecificData.get(), out); return new ParquetWriterFactory<>(builder); }
Creates a ParquetWriterFactory for an Avro specific type. The Parquet writers will use the schema of that specific type to build and write the columnar data. @param type The class of the type to write.
forSpecificRecord
java
apache/flink
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/avro/AvroParquetWriters.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/avro/AvroParquetWriters.java
Apache-2.0
public static <T extends Message> ParquetWriterFactory<T> forType(Class<T> type) { ParquetBuilder<T> builder = (out) -> new ParquetProtoWriterBuilder<>(out, type).build(); return new ParquetWriterFactory<>(builder); }
Creates a {@link ParquetWriterFactory} for the given type. The type should represent a Protobuf message. @param type The class of the type to write.
forType
java
apache/flink
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/protobuf/ParquetProtoWriters.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/protobuf/ParquetProtoWriters.java
Apache-2.0
public static ParquetWriterFactory<RowData> createWriterFactory( RowType rowType, Configuration conf, boolean utcTimestamp) { return new ParquetWriterFactory<>(new FlinkParquetBuilder(rowType, conf, utcTimestamp)); }
Create a parquet {@link BulkWriter.Factory}. @param rowType row type of parquet table. @param conf hadoop configuration. @param utcTimestamp Use UTC timezone or local timezone to the conversion between epoch time and LocalDateTime. Hive 0.x/1.x/2.x use local timezone. But Hive 3.x use UTC timezone.
createWriterFactory
java
apache/flink
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/row/ParquetRowDataBuilder.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/row/ParquetRowDataBuilder.java
Apache-2.0
public static RowPosition calculateRowOffsets( ParquetField field, int[] fieldDefinitionLevels, int[] fieldRepetitionLevels) { int rowDefinitionLevel = field.getDefinitionLevel(); int rowRepetitionLevel = field.getRepetitionLevel(); int nullValuesCount = 0; BooleanArrayList nullRowFlags = new BooleanArrayList(0); for (int i = 0; i < fieldDefinitionLevels.length; i++) { // If a row's last field is an array, the repetition levels for the array's items will // be larger than the parent row's repetition level, so we need to skip those values. if (fieldRepetitionLevels[i] > rowRepetitionLevel) { continue; } if (fieldDefinitionLevels[i] >= rowDefinitionLevel) { // current row is defined and not empty nullRowFlags.add(false); } else { // current row is null nullRowFlags.add(true); nullValuesCount++; } } if (nullValuesCount == 0) { return new RowPosition(null, fieldDefinitionLevels.length); } return new RowPosition(nullRowFlags.toArray(), nullRowFlags.size()); }
Calculate row offsets according to column's max repetition level, definition level, value's repetition level and definition level. Each row has three situation: <li>Row is not defined,because it's optional parent fields is null, this is decided by its parent's repetition level <li>Row is null <li>Row is defined and not empty. @param field field that contains the row column message include max repetition level and definition level. @param fieldRepetitionLevels int array with each value's repetition level. @param fieldDefinitionLevels int array with each value's definition level. @return {@link RowPosition} contains collections row count and isNull array.
calculateRowOffsets
java
apache/flink
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/utils/NestedPositionUtil.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/utils/NestedPositionUtil.java
Apache-2.0
protected void checkTypeName(PrimitiveType.PrimitiveTypeName expectedName) { PrimitiveType.PrimitiveTypeName actualName = descriptor.getPrimitiveType().getPrimitiveTypeName(); checkArgument( actualName == expectedName, "Expected type name: %s, actual type name: %s", expectedName, actualName); }
Dictionary decoder to wrap dictionary ids input stream.
checkTypeName
java
apache/flink
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/reader/AbstractColumnReader.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/reader/AbstractColumnReader.java
Apache-2.0
protected boolean supportLazyDecode() { return true; }
Support lazy dictionary ids decode. See more in {@link ParquetDictionary}. If return false, we will decode all the data first.
supportLazyDecode
java
apache/flink
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/reader/AbstractColumnReader.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/reader/AbstractColumnReader.java
Apache-2.0
void readDictionaryIds( int total, WritableIntVector values, WritableColumnVector nulls, int rowId, int level, RunLengthDecoder data) { int left = total; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int n = Math.min(left, this.currentCount); switch (mode) { case RLE: if (currentValue == level) { data.readDictionaryIdData(n, values, rowId); } else { nulls.setNulls(rowId, n); } break; case PACKED: for (int i = 0; i < n; ++i) { if (currentBuffer[currentBufferIdx++] == level) { values.setInt(rowId + i, data.readInteger()); } else { nulls.setNullAt(rowId + i); } } break; } rowId += n; left -= n; currentCount -= n; } }
Decoding for dictionary ids. The IDs are populated into `values` and the nullability is populated into `nulls`.
readDictionaryIds
java
apache/flink
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/reader/RunLengthDecoder.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/reader/RunLengthDecoder.java
Apache-2.0
private void readDictionaryIdData(int total, WritableIntVector c, int rowId) { int left = total; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int n = Math.min(left, this.currentCount); switch (mode) { case RLE: c.setInts(rowId, n, currentValue); break; case PACKED: c.setInts(rowId, n, currentBuffer, currentBufferIdx); currentBufferIdx += n; break; } rowId += n; left -= n; currentCount -= n; } }
It is used to decode dictionary IDs.
readDictionaryIdData
java
apache/flink
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/reader/RunLengthDecoder.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/reader/RunLengthDecoder.java
Apache-2.0
private int readUnsignedVarInt() throws IOException { int value = 0; int shift = 0; int b; do { b = in.read(); value |= (b & 0x7F) << shift; shift += 7; } while ((b & 0x80) != 0); return value; }
Reads the next varint encoded int.
readUnsignedVarInt
java
apache/flink
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/reader/RunLengthDecoder.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/reader/RunLengthDecoder.java
Apache-2.0
private int readIntLittleEndian() throws IOException { int ch4 = in.read(); int ch3 = in.read(); int ch2 = in.read(); int ch1 = in.read(); return ((ch1 << 24) + (ch2 << 16) + (ch3 << 8) + ch4); }
Reads the next 4 byte little endian int.
readIntLittleEndian
java
apache/flink
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/reader/RunLengthDecoder.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/reader/RunLengthDecoder.java
Apache-2.0
private int readIntLittleEndianPaddedOnBitWidth() throws IOException { switch (bytesWidth) { case 0: return 0; case 1: return in.read(); case 2: { int ch2 = in.read(); int ch1 = in.read(); return (ch1 << 8) + ch2; } case 3: { int ch3 = in.read(); int ch2 = in.read(); int ch1 = in.read(); return (ch1 << 16) + (ch2 << 8) + ch3; } case 4: { return readIntLittleEndian(); } } throw new RuntimeException("Unreachable"); }
Reads the next byteWidth little endian int.
readIntLittleEndianPaddedOnBitWidth
java
apache/flink
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/reader/RunLengthDecoder.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/reader/RunLengthDecoder.java
Apache-2.0
public List<ParquetField> getChildren() { return children; }
Field that represent parquet's Group Field.
getChildren
java
apache/flink
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/type/ParquetGroupField.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/type/ParquetGroupField.java
Apache-2.0
@Override protected String format() { return "parquet"; }
Compaction it case for {@link ParquetFileFormatFactory}.
format
java
apache/flink
flink-formats/flink-parquet/src/test/java/org/apache/flink/formats/parquet/ParquetFileCompactionITCase.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/test/java/org/apache/flink/formats/parquet/ParquetFileCompactionITCase.java
Apache-2.0
public static String underScoreToCamelCase(String name, boolean capNext) { return SchemaUtil.toCamelCase(name, capNext); }
convert underscore name to camel name.
underScoreToCamelCase
java
apache/flink
flink-formats/flink-protobuf/src/main/java/com/google/protobuf/ProtobufInternalUtils.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/main/java/com/google/protobuf/ProtobufInternalUtils.java
Apache-2.0
@Override public String codegen(String resultVar, String pbObjectCode, int indent) throws PbCodegenException { // The type of pbObjectCode represents a general List object, // it should be converted to ArrayData of flink internal type as resultVariable. PbCodegenAppender appender = new PbCodegenAppender(indent); PbCodegenVarId varUid = PbCodegenVarId.getInstance(); int uid = varUid.getAndIncrement(); String protoTypeStr = PbCodegenUtils.getTypeStrFromProto(fd, false); String listPbVar = "list" + uid; String flinkArrVar = "newArr" + uid; String flinkArrEleVar = "subReturnVar" + uid; String iVar = "i" + uid; String subPbObjVar = "subObj" + uid; appender.appendLine("List<" + protoTypeStr + "> " + listPbVar + "=" + pbObjectCode); appender.appendLine( "Object[] " + flinkArrVar + "= new " + "Object[" + listPbVar + ".size()]"); appender.begin( "for(int " + iVar + "=0;" + iVar + " < " + listPbVar + ".size(); " + iVar + "++){"); appender.appendLine("Object " + flinkArrEleVar + " = null"); appender.appendLine( protoTypeStr + " " + subPbObjVar + " = (" + protoTypeStr + ")" + listPbVar + ".get(" + iVar + ")"); PbCodegenDeserializer codegenDes = PbCodegenDeserializeFactory.getPbCodegenDes(fd, elementType, formatContext); String code = codegenDes.codegen(flinkArrEleVar, subPbObjVar, appender.currentIndent()); appender.appendSegment(code); appender.appendLine(flinkArrVar + "[" + iVar + "]=" + flinkArrEleVar + ""); appender.end("}"); appender.appendLine(resultVar + " = new GenericArrayData(" + flinkArrVar + ")"); return appender.code(); }
Deserializer to convert proto array type object to flink array type data.
codegen
java
apache/flink
flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/deserialize/PbCodegenArrayDeserializer.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/deserialize/PbCodegenArrayDeserializer.java
Apache-2.0
@Override public String codegen(String resultVar, String pbObjectCode, int indent) throws PbCodegenException { // The type of pbObjectCode is a general Map object, // it should be converted to MapData of flink internal type as resultVariable PbCodegenVarId varUid = PbCodegenVarId.getInstance(); int uid = varUid.getAndIncrement(); LogicalType keyType = mapType.getKeyType(); LogicalType valueType = mapType.getValueType(); Descriptors.FieldDescriptor keyFd = fd.getMessageType().findFieldByName(PbConstant.PB_MAP_KEY_NAME); Descriptors.FieldDescriptor valueFd = fd.getMessageType().findFieldByName(PbConstant.PB_MAP_VALUE_NAME); PbCodegenAppender appender = new PbCodegenAppender(indent); String pbKeyTypeStr = PbCodegenUtils.getTypeStrFromProto(keyFd, false); String pbValueTypeStr = PbCodegenUtils.getTypeStrFromProto(valueFd, false); String pbMapVar = "pbMap" + uid; String pbMapEntryVar = "pbEntry" + uid; String resultDataMapVar = "resultDataMap" + uid; String flinkKeyVar = "keyDataVar" + uid; String flinkValueVar = "valueDataVar" + uid; appender.appendLine( "Map<" + pbKeyTypeStr + "," + pbValueTypeStr + "> " + pbMapVar + " = " + pbObjectCode + ";"); appender.appendLine("Map " + resultDataMapVar + " = new HashMap()"); appender.begin( "for(Map.Entry<" + pbKeyTypeStr + "," + pbValueTypeStr + "> " + pbMapEntryVar + ": " + pbMapVar + ".entrySet()){"); appender.appendLine("Object " + flinkKeyVar + "= null"); appender.appendLine("Object " + flinkValueVar + "= null"); PbCodegenDeserializer keyDes = PbCodegenDeserializeFactory.getPbCodegenDes(keyFd, keyType, formatContext); PbCodegenDeserializer valueDes = PbCodegenDeserializeFactory.getPbCodegenDes(valueFd, valueType, formatContext); String keyGenCode = keyDes.codegen( flinkKeyVar, "((" + pbKeyTypeStr + ")" + pbMapEntryVar + ".getKey())", appender.currentIndent()); appender.appendSegment(keyGenCode); String valueGenCode = valueDes.codegen( flinkValueVar, "((" + pbValueTypeStr + ")" + pbMapEntryVar + ".getValue())", appender.currentIndent()); appender.appendSegment(valueGenCode); appender.appendLine(resultDataMapVar + ".put(" + flinkKeyVar + ", " + flinkValueVar + ")"); appender.end("}"); appender.appendLine(resultVar + " = new GenericMapData(" + resultDataMapVar + ")"); return appender.code(); }
Deserializer to convert proto map type object to flink map type data.
codegen
java
apache/flink
flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/deserialize/PbCodegenMapDeserializer.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/deserialize/PbCodegenMapDeserializer.java
Apache-2.0
@Override public String codegen(String resultVar, String pbObjectCode, int indent) throws PbCodegenException { // the type of pbObjectCode must not be primitive type, // it should convert to internal flink row type like StringData. PbCodegenAppender appender = new PbCodegenAppender(indent); switch (fd.getJavaType()) { case INT: case LONG: case FLOAT: case DOUBLE: case BOOLEAN: appender.appendLine(resultVar + " = " + pbObjectCode); break; case BYTE_STRING: appender.appendLine(resultVar + " = " + pbObjectCode + ".toByteArray()"); break; case STRING: appender.appendLine( resultVar + " = BinaryStringData.fromString(" + pbObjectCode + ".toString())"); break; case ENUM: if (logicalType.getTypeRoot() == LogicalTypeRoot.CHAR || logicalType.getTypeRoot() == LogicalTypeRoot.VARCHAR) { appender.appendLine( resultVar + " = BinaryStringData.fromString(" + pbObjectCode + ".toString())"); } else if (logicalType.getTypeRoot() == LogicalTypeRoot.TINYINT || logicalType.getTypeRoot() == LogicalTypeRoot.SMALLINT || logicalType.getTypeRoot() == LogicalTypeRoot.INTEGER || logicalType.getTypeRoot() == LogicalTypeRoot.BIGINT) { appender.appendLine(resultVar + " = " + pbObjectCode + ".getNumber()"); } else { throw new PbCodegenException( "Illegal type for protobuf enum, only char/vachar/int/bigint is supported"); } break; default: throw new PbCodegenException( "Unsupported protobuf simple type: " + fd.getJavaType()); } return appender.code(); }
Deserializer to convert proto simple type object to flink simple type data.
codegen
java
apache/flink
flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/deserialize/PbCodegenSimpleDeserializer.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/deserialize/PbCodegenSimpleDeserializer.java
Apache-2.0
@Override public String codegen(String resultVar, String flinkObjectCode, int indent) throws PbCodegenException { // The type of flinkObjectCode is a ArrayData of flink, // it should be converted to array of protobuf as resultVariable. PbCodegenVarId varUid = PbCodegenVarId.getInstance(); int uid = varUid.getAndIncrement(); PbCodegenAppender appender = new PbCodegenAppender(indent); String protoTypeStr = PbCodegenUtils.getTypeStrFromProto(fd, false); String pbListVar = "pbList" + uid; String flinkArrayDataVar = "arrData" + uid; String pbElementVar = "elementPbVar" + uid; String iVar = "i" + uid; appender.appendLine("ArrayData " + flinkArrayDataVar + " = " + flinkObjectCode); appender.appendLine("List<" + protoTypeStr + "> " + pbListVar + "= new ArrayList()"); appender.begin( "for(int " + iVar + "=0;" + iVar + " < " + flinkArrayDataVar + ".size(); " + iVar + "++){"); String convertFlinkArrayElementToPbCode = PbCodegenUtils.convertFlinkArrayElementToPbWithDefaultValueCode( flinkArrayDataVar, iVar, pbElementVar, fd, elementType, formatContext, appender.currentIndent()); appender.appendSegment(convertFlinkArrayElementToPbCode); // add pb element to result list appender.appendLine(pbListVar + ".add( " + pbElementVar + ")"); // end for appender.end("}"); appender.appendLine(resultVar + " = " + pbListVar); return appender.code(); }
Serializer to convert flink array type data to proto array type object.
codegen
java
apache/flink
flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/serialize/PbCodegenArraySerializer.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/serialize/PbCodegenArraySerializer.java
Apache-2.0
@Override public String codegen(String resultVar, String flinkObjectCode, int indent) throws PbCodegenException { // The type of flinkObjectCode is a MapData of flink, // it should be converted to map of protobuf as resultVariable. PbCodegenVarId varUid = PbCodegenVarId.getInstance(); int uid = varUid.getAndIncrement(); LogicalType keyType = mapType.getKeyType(); LogicalType valueType = mapType.getValueType(); Descriptors.FieldDescriptor keyFd = fd.getMessageType().findFieldByName(PbConstant.PB_MAP_KEY_NAME); Descriptors.FieldDescriptor valueFd = fd.getMessageType().findFieldByName(PbConstant.PB_MAP_VALUE_NAME); PbCodegenAppender appender = new PbCodegenAppender(indent); String keyProtoTypeStr = PbCodegenUtils.getTypeStrFromProto(keyFd, false); String valueProtoTypeStr = PbCodegenUtils.getTypeStrFromProto(valueFd, false); String flinkKeyArrDataVar = "keyArrData" + uid; String flinkValueArrDataVar = "valueArrData" + uid; String iVar = "i" + uid; String pbMapVar = "resultPbMap" + uid; String keyPbVar = "keyPbVar" + uid; String valuePbVar = "valuePbVar" + uid; appender.appendLine( "ArrayData " + flinkKeyArrDataVar + " = " + flinkObjectCode + ".keyArray()"); appender.appendLine( "ArrayData " + flinkValueArrDataVar + " = " + flinkObjectCode + ".valueArray()"); appender.appendLine( "Map<" + keyProtoTypeStr + ", " + valueProtoTypeStr + "> " + pbMapVar + " = new HashMap()"); appender.begin( "for(int " + iVar + " = 0; " + iVar + " < " + flinkKeyArrDataVar + ".size(); " + iVar + "++){"); // process key String convertFlinkKeyArrayElementToPbCode = PbCodegenUtils.convertFlinkArrayElementToPbWithDefaultValueCode( flinkKeyArrDataVar, iVar, keyPbVar, keyFd, keyType, formatContext, appender.currentIndent()); appender.appendSegment(convertFlinkKeyArrayElementToPbCode); // process value String convertFlinkValueArrayElementToPbCode = PbCodegenUtils.convertFlinkArrayElementToPbWithDefaultValueCode( flinkValueArrDataVar, iVar, valuePbVar, valueFd, valueType, formatContext, appender.currentIndent()); appender.appendSegment(convertFlinkValueArrayElementToPbCode); appender.appendLine(pbMapVar + ".put(" + keyPbVar + ", " + valuePbVar + ")"); appender.end("}"); appender.appendLine(resultVar + " = " + pbMapVar); return appender.code(); }
Serializer to convert flink map type data to proto map type object.
codegen
java
apache/flink
flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/serialize/PbCodegenMapSerializer.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/serialize/PbCodegenMapSerializer.java
Apache-2.0
@Override public String codegen(String resultVar, String flinkObjectCode, int indent) throws PbCodegenException { // The type of flinkObjectCode is a RowData of flink, // it should be converted to object of protobuf as resultVariable. PbCodegenVarId varUid = PbCodegenVarId.getInstance(); int uid = varUid.getAndIncrement(); PbCodegenAppender appender = new PbCodegenAppender(indent); String flinkRowDataVar = "rowData" + uid; String pbMessageTypeStr = PbFormatUtils.getFullJavaName(descriptor); String messageBuilderVar = "messageBuilder" + uid; appender.appendLine("RowData " + flinkRowDataVar + " = " + flinkObjectCode); appender.appendLine( pbMessageTypeStr + ".Builder " + messageBuilderVar + " = " + pbMessageTypeStr + ".newBuilder()"); int index = 0; PbCodegenAppender splitAppender = new PbCodegenAppender(indent); for (String fieldName : rowType.getFieldNames()) { Descriptors.FieldDescriptor elementFd = descriptor.findFieldByName(fieldName); LogicalType subType = rowType.getTypeAt(rowType.getFieldIndex(fieldName)); int subUid = varUid.getAndIncrement(); String elementPbVar = "elementPbVar" + subUid; String elementPbTypeStr; if (elementFd.isMapField()) { elementPbTypeStr = PbCodegenUtils.getTypeStrFromProto(elementFd, false); } else { elementPbTypeStr = PbCodegenUtils.getTypeStrFromProto( elementFd, PbFormatUtils.isArrayType(subType)); } String strongCamelFieldName = PbFormatUtils.getStrongCamelCaseJsonName(fieldName); // Only set non-null element of flink row to proto object. The real value in proto // result depends on protobuf implementation. splitAppender.begin("if(!" + flinkRowDataVar + ".isNullAt(" + index + ")){"); splitAppender.appendLine(elementPbTypeStr + " " + elementPbVar); String flinkRowElementCode = PbCodegenUtils.flinkContainerElementCode(flinkRowDataVar, index + "", subType); PbCodegenSerializer codegen = PbCodegenSerializeFactory.getPbCodegenSer(elementFd, subType, formatContext); String code = codegen.codegen( elementPbVar, flinkRowElementCode, splitAppender.currentIndent()); splitAppender.appendSegment(code); if (subType.getTypeRoot() == LogicalTypeRoot.ARRAY) { splitAppender.appendLine( messageBuilderVar + ".addAll" + strongCamelFieldName + "(" + elementPbVar + ")"); } else if (subType.getTypeRoot() == LogicalTypeRoot.MAP) { splitAppender.appendLine( messageBuilderVar + ".putAll" + strongCamelFieldName + "(" + elementPbVar + ")"); } else { splitAppender.appendLine( messageBuilderVar + ".set" + strongCamelFieldName + "(" + elementPbVar + ")"); } splitAppender.end("}"); if (PbCodegenUtils.needToSplit(splitAppender.code().length())) { String splitMethod = formatContext.splitSerializerRowTypeMethod( flinkRowDataVar, pbMessageTypeStr + ".Builder", messageBuilderVar, splitAppender.code()); appender.appendSegment(splitMethod); splitAppender = new PbCodegenAppender(); } index += 1; } if (!splitAppender.code().isEmpty()) { appender.appendSegment(splitAppender.code()); } appender.appendLine(resultVar + " = " + messageBuilderVar + ".build()"); return appender.code(); }
Serializer to convert flink row type data to proto row type object.
codegen
java
apache/flink
flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/serialize/PbCodegenRowSerializer.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/serialize/PbCodegenRowSerializer.java
Apache-2.0
@Override public String codegen(String resultVar, String flinkObjectCode, int indent) throws PbCodegenException { // the real value of flinkObjectCode may be String, Integer, // Long, Double, Float, Boolean, byte[]. // The type of flinkObject is simple data type of flink, and flinkObject must not be null. // it should be converted to protobuf simple data as resultVariable. PbCodegenAppender appender = new PbCodegenAppender(indent); switch (type.getTypeRoot()) { case FLOAT: case DOUBLE: case BOOLEAN: appender.appendLine(resultVar + " = " + flinkObjectCode); return appender.code(); case BIGINT: case INTEGER: case SMALLINT: case TINYINT: if (fd.getJavaType() == JavaType.ENUM) { String enumTypeStr = PbFormatUtils.getFullJavaName(fd.getEnumType()); appender.appendLine( resultVar + " = " + enumTypeStr + ".forNumber((int)" + flinkObjectCode + ")"); // choose the first enum element as default value if such value is invalid enum appender.begin("if(null == " + resultVar + "){"); appender.appendLine(resultVar + " = " + enumTypeStr + ".values()[0]"); appender.end("}"); } else { appender.appendLine(resultVar + " = " + flinkObjectCode); } return appender.code(); case VARCHAR: case CHAR: int uid = PbCodegenVarId.getInstance().getAndIncrement(); String fromVar = "fromVar" + uid; appender.appendLine("String " + fromVar); appender.appendLine(fromVar + " = " + flinkObjectCode + ".toString()"); if (fd.getJavaType() == JavaType.ENUM) { String enumValueDescVar = "enumValueDesc" + uid; String enumTypeStr = PbFormatUtils.getFullJavaName(fd.getEnumType()); appender.appendLine( "Descriptors.EnumValueDescriptor " + enumValueDescVar + "=" + enumTypeStr + ".getDescriptor().findValueByName(" + fromVar + ")"); appender.begin("if(null == " + enumValueDescVar + "){"); // choose the first enum element as default value if such value is invalid enum appender.appendLine(resultVar + " = " + enumTypeStr + ".values()[0]"); appender.end("}"); appender.begin("else{"); // choose the exact enum value appender.appendLine( resultVar + " = " + enumTypeStr + ".valueOf(" + enumValueDescVar + ")"); appender.end("}"); } else { appender.appendLine(resultVar + " = " + fromVar); } return appender.code(); case VARBINARY: case BINARY: appender.appendLine(resultVar + " = ByteString.copyFrom(" + flinkObjectCode + ")"); return appender.code(); default: throw new PbCodegenException("Unsupported data type in schema: " + type); } }
Serializer to convert flink simple type data to proto simple type object.
codegen
java
apache/flink
flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/serialize/PbCodegenSimpleSerializer.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/serialize/PbCodegenSimpleSerializer.java
Apache-2.0
public static String flinkContainerElementCode( String flinkContainerCode, String index, LogicalType eleType) { switch (eleType.getTypeRoot()) { case INTEGER: return flinkContainerCode + ".getInt(" + index + ")"; case BIGINT: return flinkContainerCode + ".getLong(" + index + ")"; case FLOAT: return flinkContainerCode + ".getFloat(" + index + ")"; case DOUBLE: return flinkContainerCode + ".getDouble(" + index + ")"; case BOOLEAN: return flinkContainerCode + ".getBoolean(" + index + ")"; case VARCHAR: case CHAR: return flinkContainerCode + ".getString(" + index + ")"; case VARBINARY: case BINARY: return flinkContainerCode + ".getBinary(" + index + ")"; case ROW: int size = eleType.getChildren().size(); return flinkContainerCode + ".getRow(" + index + ", " + size + ")"; case MAP: return flinkContainerCode + ".getMap(" + index + ")"; case ARRAY: return flinkContainerCode + ".getArray(" + index + ")"; default: throw new IllegalArgumentException("Unsupported data type in schema: " + eleType); } }
@param flinkContainerCode code phrase which represent flink container type like row/array in codegen sections @param index the index number in flink container type @param eleType the element type
flinkContainerElementCode
java
apache/flink
flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/util/PbCodegenUtils.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/util/PbCodegenUtils.java
Apache-2.0
public static String getTypeStrFromProto(FieldDescriptor fd, boolean isList) throws PbCodegenException { String typeStr; switch (fd.getJavaType()) { case MESSAGE: if (fd.isMapField()) { // map FieldDescriptor keyFd = fd.getMessageType().findFieldByName(PbConstant.PB_MAP_KEY_NAME); FieldDescriptor valueFd = fd.getMessageType().findFieldByName(PbConstant.PB_MAP_VALUE_NAME); // key and value cannot be repeated String keyTypeStr = getTypeStrFromProto(keyFd, false); String valueTypeStr = getTypeStrFromProto(valueFd, false); typeStr = "Map<" + keyTypeStr + "," + valueTypeStr + ">"; } else { // simple message typeStr = PbFormatUtils.getFullJavaName(fd.getMessageType()); } break; case INT: typeStr = "Integer"; break; case LONG: typeStr = "Long"; break; case STRING: typeStr = "String"; break; case ENUM: typeStr = PbFormatUtils.getFullJavaName(fd.getEnumType()); break; case FLOAT: typeStr = "Float"; break; case DOUBLE: typeStr = "Double"; break; case BYTE_STRING: typeStr = "ByteString"; break; case BOOLEAN: typeStr = "Boolean"; break; default: throw new PbCodegenException("do not support field type: " + fd.getJavaType()); } if (isList) { return "List<" + typeStr + ">"; } else { return typeStr; } }
Get java type str from {@link FieldDescriptor} which directly fetched from protobuf object. @return The returned code phrase will be used as java type str in codegen sections. @throws PbCodegenException
getTypeStrFromProto
java
apache/flink
flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/util/PbCodegenUtils.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/util/PbCodegenUtils.java
Apache-2.0
public static String getTypeStrFromLogicType(LogicalType type) { switch (type.getTypeRoot()) { case INTEGER: return "int"; case BIGINT: return "long"; case FLOAT: return "float"; case DOUBLE: return "double"; case BOOLEAN: return "boolean"; case VARCHAR: case CHAR: return "StringData"; case VARBINARY: case BINARY: return "byte[]"; case ROW: return "RowData"; case MAP: return "MapData"; case ARRAY: return "ArrayData"; default: throw new IllegalArgumentException("Unsupported data type in schema: " + type); } }
Get java type str from {@link LogicalType} which directly fetched from flink type. @return The returned code phrase will be used as java type str in codegen sections.
getTypeStrFromLogicType
java
apache/flink
flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/util/PbCodegenUtils.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/util/PbCodegenUtils.java
Apache-2.0
public static String pbDefaultValueCode( FieldDescriptor fieldDescriptor, PbFormatContext pbFormatContext) throws PbCodegenException { String nullLiteral = pbFormatContext.getPbFormatConfig().getWriteNullStringLiterals(); switch (fieldDescriptor.getJavaType()) { case MESSAGE: return PbFormatUtils.getFullJavaName(fieldDescriptor.getMessageType()) + ".getDefaultInstance()"; case INT: return "0"; case LONG: return "0L"; case STRING: return "\"" + nullLiteral + "\""; case ENUM: return PbFormatUtils.getFullJavaName(fieldDescriptor.getEnumType()) + ".values()[0]"; case FLOAT: return "0.0f"; case DOUBLE: return "0.0d"; case BYTE_STRING: return "ByteString.EMPTY"; case BOOLEAN: return "false"; default: throw new PbCodegenException( "do not support field type: " + fieldDescriptor.getJavaType()); } }
Get protobuf default value from {@link FieldDescriptor}. @return The java code phrase which represents default value calculation.
pbDefaultValueCode
java
apache/flink
flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/util/PbCodegenUtils.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/util/PbCodegenUtils.java
Apache-2.0
public static void validate(Descriptors.Descriptor descriptor, RowType rowType) { validateTypeMatch(descriptor, rowType); }
Validation class to verify protobuf definition and flink schema.
validate
java
apache/flink
flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/util/PbSchemaValidationUtils.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/util/PbSchemaValidationUtils.java
Apache-2.0
private static void validateTypeMatch(Descriptors.Descriptor descriptor, RowType rowType) { rowType.getFields() .forEach( rowField -> { FieldDescriptor fieldDescriptor = descriptor.findFieldByName(rowField.getName()); if (null != fieldDescriptor) { validateTypeMatch(fieldDescriptor, rowField.getType()); } else { throw new ValidationException( "Column " + rowField.getName() + " does not exists in definition of proto class."); } }); }
Validate type match of row type. @param descriptor the {@link Descriptors.Descriptor} of the protobuf object. @param rowType the corresponding {@link RowType} to the {@link Descriptors.Descriptor}
validateTypeMatch
java
apache/flink
flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/util/PbSchemaValidationUtils.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/util/PbSchemaValidationUtils.java
Apache-2.0
private static void validateTypeMatch(FieldDescriptor fd, LogicalType logicalType) { if (!fd.isRepeated()) { if (fd.getJavaType() != JavaType.MESSAGE) { // simple type validateSimpleType(fd, logicalType.getTypeRoot()); } else { // message type if (!(logicalType instanceof RowType)) { throw new ValidationException( "Unexpected LogicalType: " + logicalType + ". It should be RowType"); } validateTypeMatch(fd.getMessageType(), (RowType) logicalType); } } else { if (fd.isMapField()) { // map type if (!(logicalType instanceof MapType)) { throw new ValidationException( "Unexpected LogicalType: " + logicalType + ". It should be MapType"); } MapType mapType = (MapType) logicalType; validateSimpleType( fd.getMessageType().findFieldByName(PbConstant.PB_MAP_KEY_NAME), mapType.getKeyType().getTypeRoot()); validateTypeMatch( fd.getMessageType().findFieldByName(PbConstant.PB_MAP_VALUE_NAME), mapType.getValueType()); } else { // array type if (!(logicalType instanceof ArrayType)) { throw new ValidationException( "Unexpected LogicalType: " + logicalType + ". It should be ArrayType"); } ArrayType arrayType = (ArrayType) logicalType; if (fd.getJavaType() == JavaType.MESSAGE) { // array message type LogicalType elementType = arrayType.getElementType(); if (!(elementType instanceof RowType)) { throw new ValidationException( "Unexpected logicalType: " + elementType + ". It should be RowType"); } validateTypeMatch(fd.getMessageType(), (RowType) elementType); } else { // array simple type validateSimpleType(fd, arrayType.getElementType().getTypeRoot()); } } } }
Validate type match of general type. @param fd the {@link Descriptors.Descriptor} of the protobuf object. @param logicalType the corresponding {@link LogicalType} to the {@link FieldDescriptor}
validateTypeMatch
java
apache/flink
flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/util/PbSchemaValidationUtils.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/util/PbSchemaValidationUtils.java
Apache-2.0
private static void validateSimpleType(FieldDescriptor fd, LogicalTypeRoot logicalTypeRoot) { if (!TYPE_MATCH_MAP.containsKey(fd.getJavaType())) { throw new ValidationException("Unsupported protobuf java type: " + fd.getJavaType()); } if (TYPE_MATCH_MAP.get(fd.getJavaType()).stream().noneMatch(x -> x == logicalTypeRoot)) { throw new ValidationException( "Protobuf field type does not match column type, " + fd.getJavaType() + "(protobuf) is not compatible of " + logicalTypeRoot); } }
Only validate type match for simple type like int, long, string, boolean. @param fd {@link FieldDescriptor} in proto descriptor @param logicalTypeRoot {@link LogicalTypeRoot} of row element
validateSimpleType
java
apache/flink
flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/util/PbSchemaValidationUtils.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/util/PbSchemaValidationUtils.java
Apache-2.0
@Test public void testMessage() throws Exception { MapTest.InnerMessageTest innerMessageTest = MapTest.InnerMessageTest.newBuilder().setA(1).setB(2).build(); MapTest mapTest = MapTest.newBuilder() .setA(1) .putMap1("a", "b") .putMap1("c", "d") .putMap2("f", innerMessageTest) .putMap3("e", ByteString.copyFrom(new byte[] {1, 2, 3})) .build(); RowData row = ProtobufTestHelper.pbBytesToRow(MapTest.class, mapTest.toByteArray()); MapData map1 = row.getMap(1); assertEquals("a", map1.keyArray().getString(0).toString()); assertEquals("b", map1.valueArray().getString(0).toString()); assertEquals("c", map1.keyArray().getString(1).toString()); assertEquals("d", map1.valueArray().getString(1).toString()); MapData map2 = row.getMap(2); assertEquals("f", map2.keyArray().getString(0).toString()); RowData rowData2 = map2.valueArray().getRow(0, 2); assertEquals(1, rowData2.getInt(0)); assertEquals(2L, rowData2.getLong(1)); MapData map3 = row.getMap(3); assertEquals("e", map3.keyArray().getString(0).toString()); assertArrayEquals(new byte[] {1, 2, 3}, map3.valueArray().getBinary(0)); }
Test conversion of proto map data to flink internal data.
testMessage
java
apache/flink
flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/MapProtoToRowTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/MapProtoToRowTest.java
Apache-2.0
@Test public void testMessage() throws Exception { MultipleLevelMessageTest.InnerMessageTest1.InnerMessageTest2 innerMessageTest2 = MultipleLevelMessageTest.InnerMessageTest1.InnerMessageTest2.newBuilder() .setA(1) .setB(2L) .build(); MultipleLevelMessageTest.InnerMessageTest1 innerMessageTest = MultipleLevelMessageTest.InnerMessageTest1.newBuilder() .setC(false) .setA(innerMessageTest2) .build(); MultipleLevelMessageTest multipleLevelMessageTest = MultipleLevelMessageTest.newBuilder().setD(innerMessageTest).setA(1).build(); RowData row = ProtobufTestHelper.pbBytesToRow( MultipleLevelMessageTest.class, multipleLevelMessageTest.toByteArray()); assertEquals(4, row.getArity()); RowData subRow = (RowData) row.getRow(3, 2); assertFalse(subRow.getBoolean(1)); RowData subSubRow = (RowData) subRow.getRow(0, 2); assertEquals(1, subSubRow.getInt(0)); assertEquals(2L, subSubRow.getLong(1)); }
Test conversion of multiple level of proto nested message data to flink internal data.
testMessage
java
apache/flink
flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/MultiLevelMessageProtoToRowTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/MultiLevelMessageProtoToRowTest.java
Apache-2.0
@Test public void testSimple() throws Exception { OneofTest oneofTest = OneofTest.newBuilder().setA(1).setB(2).build(); RowData row = ProtobufTestHelper.pbBytesToRow(OneofTest.class, oneofTest.toByteArray()); assertTrue(row.isNullAt(0)); assertEquals(2, row.getInt(1)); }
Test conversion of proto one_of data to flink internal data.
testSimple
java
apache/flink
flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/OneofProtoToRowTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/OneofProtoToRowTest.java
Apache-2.0
@Test public void testSimple() throws Exception { RowData row = GenericRowData.of(1, 2); byte[] bytes = ProtobufTestHelper.rowToPbBytes(row, OneofTest.class); OneofTest oneofTest = OneofTest.parseFrom(bytes); assertFalse(oneofTest.hasA()); assertEquals(2, oneofTest.getB()); }
Test conversion of flink internal map data to one_of proto data.
testSimple
java
apache/flink
flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/OneofRowToProtoTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/OneofRowToProtoTest.java
Apache-2.0
@Test public void testRepeatedMessage() throws Exception { RepeatedMessageTest.InnerMessageTest innerMessageTest = RepeatedMessageTest.InnerMessageTest.newBuilder().setA(1).setB(2L).build(); RepeatedMessageTest.InnerMessageTest innerMessageTest1 = RepeatedMessageTest.InnerMessageTest.newBuilder().setA(3).setB(4L).build(); RepeatedMessageTest repeatedMessageTest = RepeatedMessageTest.newBuilder() .addD(innerMessageTest) .addD(innerMessageTest1) .build(); RowData row = ProtobufTestHelper.pbBytesToRow( RepeatedMessageTest.class, repeatedMessageTest.toByteArray()); ArrayData objs = row.getArray(0); RowData subRow = objs.getRow(0, 2); assertEquals(1, subRow.getInt(0)); assertEquals(2L, subRow.getLong(1)); subRow = objs.getRow(1, 2); assertEquals(3, subRow.getInt(0)); assertEquals(4L, subRow.getLong(1)); }
Test conversion of proto repeated message data to flink internal data.
testRepeatedMessage
java
apache/flink
flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/RepeatedMessageProtoToRowTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/RepeatedMessageProtoToRowTest.java
Apache-2.0
@Test public void testRepeatedMessage() throws Exception { RowData subRow = GenericRowData.of(1, 2L); RowData subRow2 = GenericRowData.of(3, 4L); ArrayData tmp = new GenericArrayData(new Object[] {subRow, subRow2}); RowData row = GenericRowData.of(tmp); byte[] bytes = ProtobufTestHelper.rowToPbBytes(row, RepeatedMessageTest.class); RepeatedMessageTest repeatedMessageTest = RepeatedMessageTest.parseFrom(bytes); assertEquals(2, repeatedMessageTest.getDCount()); assertEquals(1, repeatedMessageTest.getD(0).getA()); assertEquals(2L, repeatedMessageTest.getD(0).getB()); assertEquals(3, repeatedMessageTest.getD(1).getA()); assertEquals(4L, repeatedMessageTest.getD(1).getB()); }
Test conversion of flink internal array of row to proto data.
testRepeatedMessage
java
apache/flink
flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/RepeatedMessageRowToProtoTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/RepeatedMessageRowToProtoTest.java
Apache-2.0
@Test public void testSimple() throws Exception { TimestampTestMulti timestampTestMulti = TimestampTestMulti.newBuilder() .setTs(Timestamp.newBuilder().setSeconds(1672498800).setNanos(123)) .build(); RowData row = ProtobufTestHelper.pbBytesToRow( TimestampTestMulti.class, timestampTestMulti.toByteArray()); RowData rowData = row.getRow(0, 2); assertEquals(1672498800, rowData.getLong(0)); assertEquals(123, rowData.getInt(1)); }
Test conversion of proto timestamp data with multiple_files options to flink internal data.
testSimple
java
apache/flink
flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/TimestampMultiProtoToRowTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/TimestampMultiProtoToRowTest.java
Apache-2.0
@Test public void testSimple() throws Exception { TestTimestampNomulti.TimestampTestNoMulti timestampTestNoMulti = TestTimestampNomulti.TimestampTestNoMulti.newBuilder() .setTs(Timestamp.newBuilder().setSeconds(1672498800).setNanos(123)) .build(); RowData row = ProtobufTestHelper.pbBytesToRow( TestTimestampNomulti.TimestampTestNoMulti.class, timestampTestNoMulti.toByteArray()); RowData rowData = row.getRow(0, 2); assertEquals(1672498800, rowData.getLong(0)); assertEquals(123, rowData.getInt(1)); }
Test conversion of proto timestamp data to flink internal data.
testSimple
java
apache/flink
flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/TimestampNoMultiProtoToRowTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/TimestampNoMultiProtoToRowTest.java
Apache-2.0
@Test public void testSimple() throws Exception { TimestampTestOuterMulti timestampTestOuterMulti = TimestampTestOuterMulti.newBuilder() .setTs(Timestamp.newBuilder().setSeconds(1672498800).setNanos(123)) .build(); RowData row = ProtobufTestHelper.pbBytesToRow( TimestampTestOuterMulti.class, timestampTestOuterMulti.toByteArray()); RowData rowData = row.getRow(0, 2); assertEquals(1672498800, rowData.getLong(0)); assertEquals(123, rowData.getInt(1)); }
Test conversion of proto timestamp data with multiple_files and outer_classname options to flink internal data.
testSimple
java
apache/flink
flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/TimestampOuterMultiProtoToRowTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/TimestampOuterMultiProtoToRowTest.java
Apache-2.0
@Test public void testSimple() throws Exception { TimestampTestOuterNomultiProto.TimestampTestOuterNoMulti timestampTestOuterNoMulti = TimestampTestOuterNomultiProto.TimestampTestOuterNoMulti.newBuilder() .setTs(Timestamp.newBuilder().setSeconds(1672498800).setNanos(123)) .build(); RowData row = ProtobufTestHelper.pbBytesToRow( TimestampTestOuterNomultiProto.TimestampTestOuterNoMulti.class, timestampTestOuterNoMulti.toByteArray()); RowData rowData = row.getRow(0, 2); assertEquals(1672498800, rowData.getLong(0)); assertEquals(123, rowData.getInt(1)); }
Test conversion of proto timestamp data with outer_classname options to flink internal data.
testSimple
java
apache/flink
flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/TimestampOuterNoMultiProtoToRowTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/TimestampOuterNoMultiProtoToRowTest.java
Apache-2.0
private Tuple2<org.apache.hadoop.fs.Path, String> fillWithData( String base, String fileName, int fileIdx, String sampleLine) throws IOException, InterruptedException { assert (hdfs != null); org.apache.hadoop.fs.Path tmp = new org.apache.hadoop.fs.Path(base + "/." + fileName + fileIdx); FSDataOutputStream stream = hdfs.create(tmp); StringBuilder str = new StringBuilder(); for (int i = 0; i < LINES_PER_FILE; i++) { String line = fileIdx + ": " + sampleLine + " " + i + "\n"; str.append(line); stream.write(line.getBytes(ConfigConstants.DEFAULT_CHARSET)); } stream.close(); return new Tuple2<>(tmp, str.toString()); }
Create a file and fill it with content.
fillWithData
java
apache/flink
flink-fs-tests/src/test/java/org/apache/flink/hdfstests/ContinuousFileProcessingITCase.java
https://github.com/apache/flink/blob/master/flink-fs-tests/src/test/java/org/apache/flink/hdfstests/ContinuousFileProcessingITCase.java
Apache-2.0
private <OUT> ContinuousFileMonitoringFunction<OUT> createTestContinuousFileMonitoringFunction( FileInputFormat<OUT> format, FileProcessingMode fileProcessingMode) { ContinuousFileMonitoringFunction<OUT> monitoringFunction = new ContinuousFileMonitoringFunction<>(format, fileProcessingMode, 1, INTERVAL); monitoringFunction.setRuntimeContext(new MockStreamingRuntimeContext(false, 1, 0)); return monitoringFunction; }
Create continuous monitoring function with 1 reader-parallelism and interval: {@link #INTERVAL}.
createTestContinuousFileMonitoringFunction
java
apache/flink
flink-fs-tests/src/test/java/org/apache/flink/hdfstests/ContinuousFileProcessingTest.java
https://github.com/apache/flink/blob/master/flink-fs-tests/src/test/java/org/apache/flink/hdfstests/ContinuousFileProcessingTest.java
Apache-2.0
@Test public void testBlobCacheRecovery() throws Exception { org.apache.flink.configuration.Configuration config = new org.apache.flink.configuration.Configuration(); config.set(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER"); config.set( BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath()); config.set(HighAvailabilityOptions.HA_STORAGE_PATH, hdfsURI); BlobStoreService blobStoreService = BlobUtils.createBlobStoreFromConfig(config); try { TestingBlobHelpers.testBlobCacheRecovery( config, blobStoreService, temporaryFolder.newFolder()); } finally { blobStoreService.cleanupAllData(); blobStoreService.close(); } }
Tests that with {@link HighAvailabilityMode#ZOOKEEPER} distributed JARs are recoverable from any participating BlobServer when uploaded via a BLOB cache.
testBlobCacheRecovery
java
apache/flink
flink-fs-tests/src/test/java/org/apache/flink/hdfstests/HDFSTest.java
https://github.com/apache/flink/blob/master/flink-fs-tests/src/test/java/org/apache/flink/hdfstests/HDFSTest.java
Apache-2.0
private static String getDefaultFlinkImage() { // The default container image that ties to the exact needed versions of both Flink and // Scala. boolean snapshot = EnvironmentInformation.getVersion() .toLowerCase(Locale.ENGLISH) .contains("snapshot"); String tag = snapshot ? "latest" : EnvironmentInformation.getVersion() + "-scala_" + EnvironmentInformation.getScalaVersion(); return "apache/flink:" + tag; }
This will only be used to support blocklist mechanism, which is experimental currently, so we do not want to expose this option in the documentation.
getDefaultFlinkImage
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/configuration/KubernetesConfigOptions.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/configuration/KubernetesConfigOptions.java
Apache-2.0
public ServiceType serviceType() { return serviceType; }
The flink rest service exposed type.
serviceType
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/configuration/KubernetesConfigOptions.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/configuration/KubernetesConfigOptions.java
Apache-2.0
public boolean isClusterIP() { return this == ClusterIP || this == Headless_ClusterIP; }
Check whether it is ClusterIP type.
isClusterIP
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/configuration/KubernetesConfigOptions.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/configuration/KubernetesConfigOptions.java
Apache-2.0
static Configuration loadConfiguration(Configuration dynamicParameters) { final String configDir = System.getenv(ConfigConstants.ENV_FLINK_CONF_DIR); Preconditions.checkNotNull( configDir, "Flink configuration directory (%s) in environment should not be null!", ConfigConstants.ENV_FLINK_CONF_DIR); final Configuration configuration = GlobalConfiguration.loadConfiguration(configDir, dynamicParameters); if (KubernetesUtils.isHostNetwork(configuration)) { configuration.set(RestOptions.BIND_PORT, "0"); configuration.set(JobManagerOptions.PORT, 0); configuration.set(BlobServerOptions.PORT, "0"); configuration.set(HighAvailabilityOptions.HA_JOB_MANAGER_PORT_RANGE, "0"); configuration.set(TaskManagerOptions.RPC_PORT, "0"); } if (HighAvailabilityMode.isHighAvailabilityModeActivated(configuration)) { final String ipAddress = System.getenv().get(Constants.ENV_FLINK_POD_IP_ADDRESS); Preconditions.checkState( ipAddress != null, "JobManager ip address environment variable %s not set", Constants.ENV_FLINK_POD_IP_ADDRESS); configuration.set(JobManagerOptions.ADDRESS, ipAddress); configuration.set(RestOptions.ADDRESS, ipAddress); } return configuration; }
For non-HA cluster, {@link JobManagerOptions#ADDRESS} has be set to Kubernetes service name on client side. See {@link KubernetesClusterDescriptor#deployClusterInternal}. So the TaskManager will use service address to contact with JobManager. For HA cluster, {@link JobManagerOptions#ADDRESS} will be set to the pod ip address. The TaskManager use Zookeeper or other high-availability service to find the address of JobManager. @return Updated configuration
loadConfiguration
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/entrypoint/KubernetesEntrypointUtils.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/entrypoint/KubernetesEntrypointUtils.java
Apache-2.0
@Override public String checkpointIDToName(long checkpointId) { return CHECKPOINT_ID_KEY_PREFIX + String.format("%019d", checkpointId); }
Convert a checkpoint id into a ConfigMap key. @param checkpointId to convert to the key @return key created from the given checkpoint id
checkpointIDToName
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesCheckpointStoreUtil.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesCheckpointStoreUtil.java
Apache-2.0
@Override public long nameToCheckpointID(String key) { try { return Long.parseLong(key.substring(CHECKPOINT_ID_KEY_PREFIX.length())); } catch (NumberFormatException e) { LOG.warn( "Could not parse checkpoint id from {}. This indicates that the " + "checkpoint id to path conversion has changed.", key); return INVALID_CHECKPOINT_ID; } }
Converts a key in ConfigMap to the checkpoint id. @param key in ConfigMap @return Checkpoint id parsed from the key
nameToCheckpointID
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesCheckpointStoreUtil.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesCheckpointStoreUtil.java
Apache-2.0
@Override public JobID nameToJobID(String key) { return JobID.fromHexString(key.substring(EXECUTION_PLAN_STORE_KEY_PREFIX.length())); }
Convert a key in ConfigMap to {@link JobID}. The key is stored with prefix {@link Constants#EXECUTION_PLAN_STORE_KEY_PREFIX}. @param key execution plan key in ConfigMap. @return the parsed {@link JobID}.
nameToJobID
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesExecutionPlanStoreUtil.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesExecutionPlanStoreUtil.java
Apache-2.0
@Override public String jobIDToName(JobID jobID) { return EXECUTION_PLAN_STORE_KEY_PREFIX + jobID; }
Convert a {@link JobID} to config map key. We will add prefix {@link Constants#EXECUTION_PLAN_STORE_KEY_PREFIX}. @param jobID job id @return a key to store execution plan in the ConfigMap
jobIDToName
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesExecutionPlanStoreUtil.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesExecutionPlanStoreUtil.java
Apache-2.0
@Override public HighAvailabilityServices createHAServices(Configuration configuration, Executor executor) throws Exception { return new KubernetesLeaderElectionHaServices( FlinkKubeClientFactory.getInstance() .fromConfiguration(configuration, "kubernetes-ha-services"), executor, configuration, BlobUtils.createBlobStoreFromConfig(configuration)); }
Factory for creating Kubernetes high availability services.
createHAServices
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesHaServicesFactory.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesHaServicesFactory.java
Apache-2.0
@Override public void replace(String key, StringResourceVersion resourceVersion, T state) throws Exception { checkNotNull(key, "Key in ConfigMap."); checkNotNull(state, "State."); final RetrievableStateHandle<T> newStateHandle = storage.store(state); final byte[] serializedStateHandle = serializeOrDiscard(new StateHandleWithDeleteMarker<>(newStateHandle)); // initialize flags to serve the failure case boolean discardOldState = false; boolean discardNewState = true; // We don't want to greedily pull the old state handle as we have to do that anyway in // replaceEntry method for check of delete markers. final AtomicReference<RetrievableStateHandle<T>> oldStateHandleRef = new AtomicReference<>(); try { final boolean success = updateConfigMap( cm -> { try { return replaceEntry( cm, key, serializedStateHandle, oldStateHandleRef); } catch (NotExistException e) { throw new CompletionException(e); } }) .get(); // swap subject for deletion in case of success discardOldState = success; discardNewState = !success; } catch (Exception ex) { final Optional<PossibleInconsistentStateException> possibleInconsistentStateException = ExceptionUtils.findThrowable(ex, PossibleInconsistentStateException.class); if (possibleInconsistentStateException.isPresent()) { // it's unclear whether the state handle metadata was written to the ConfigMap - // hence, we don't discard any data discardNewState = false; throw possibleInconsistentStateException.get(); } throw ExceptionUtils.findThrowable(ex, NotExistException.class).orElseThrow(() -> ex); } finally { if (discardNewState) { newStateHandle.discardState(); } if (discardOldState) { Objects.requireNonNull( oldStateHandleRef.get(), "state handle should have been set on success") .discardState(); } } }
Replaces a state handle in ConfigMap and discards the old state handle. Wo do not lock resource version and then replace in Kubernetes. Since the ConfigMap is periodically updated by leader, the resource version changes very fast. We use a "check-existence and update" transactional operation instead. @param key Key in ConfigMap @param resourceVersion resource version when checking existence via {@link #exists}. @param state State to be added @throws NotExistException if the name does not exist @throws PossibleInconsistentStateException if a failure occurred during the update operation. It's unclear whether the operation actually succeeded or not. No state was discarded. The method's caller should handle this case properly. @throws Exception if persisting state or writing state handle failed
replace
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesStateHandleStore.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesStateHandleStore.java
Apache-2.0
@Override public StringResourceVersion exists(String key) throws Exception { checkNotNull(key, "Key in ConfigMap."); return kubeClient .getConfigMap(configMapName) .map( configMap -> { final String content = configMap.getData().get(key); if (content != null) { try { final StateHandleWithDeleteMarker<T> stateHandle = deserializeStateHandle(content); if (stateHandle.isMarkedForDeletion()) { return StringResourceVersion.notExisting(); } } catch (IOException e) { // Any calls to add or replace will try to remove this resource, // so we can simply treat it as non-existent. return StringResourceVersion.notExisting(); } return StringResourceVersion.valueOf( configMap.getResourceVersion()); } return StringResourceVersion.notExisting(); }) .orElseThrow(this::getConfigMapNotExistException); }
Returns the resource version of the ConfigMap. @param key Key in ConfigMap @return resource version in {@link StringResourceVersion} format. @throws Exception if the check existence operation failed
exists
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesStateHandleStore.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesStateHandleStore.java
Apache-2.0
@Override public RetrievableStateHandle<T> getAndLock(String key) throws Exception { checkNotNull(key, "Key in ConfigMap."); final Optional<KubernetesConfigMap> optional = kubeClient.getConfigMap(configMapName); if (optional.isPresent()) { final KubernetesConfigMap configMap = optional.get(); if (configMap.getData().containsKey(key)) { final StateHandleWithDeleteMarker<T> result = deserializeStateHandle(configMap.getData().get(key)); if (result.isMarkedForDeletion()) { throw getKeyMarkedAsDeletedException(key); } return result.getInner(); } else { throw getKeyNotExistException(key); } } else { throw getConfigMapNotExistException(); } }
Gets the {@link RetrievableStateHandle} stored in the given ConfigMap. @param key Key in ConfigMap @return The retrieved state handle from the specified ConfigMap and key @throws IOException if the method failed to deserialize the stored state handle @throws NotExistException when the name does not exist @throws Exception if get state handle from ConfigMap failed
getAndLock
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesStateHandleStore.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesStateHandleStore.java
Apache-2.0
@Override public List<Tuple2<RetrievableStateHandle<T>, String>> getAllAndLock() { return kubeClient .getConfigMap(configMapName) .map( configMap -> { final List<Tuple2<RetrievableStateHandle<T>, String>> stateHandles = new ArrayList<>(); configMap.getData().entrySet().stream() .filter(entry -> configMapKeyFilter.test(entry.getKey())) .forEach( entry -> { try { final StateHandleWithDeleteMarker<T> result = deserializeStateHandle( entry.getValue()); if (!result.isMarkedForDeletion()) { stateHandles.add( new Tuple2<>( result.getInner(), entry.getKey())); } } catch (IOException e) { LOG.warn( "ConfigMap {} contained corrupted data. Ignoring the key {}.", configMapName, entry.getKey()); } }); return stateHandles; }) .orElse(Collections.emptyList()); }
Gets all available state handles from Kubernetes. @return All state handles from ConfigMap.
getAllAndLock
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesStateHandleStore.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesStateHandleStore.java
Apache-2.0
@Override public Collection<String> getAllHandles() throws Exception { return kubeClient .getConfigMap(configMapName) .map( configMap -> configMap.getData().keySet().stream() .filter(configMapKeyFilter) .filter( k -> { try { final String content = Objects.requireNonNull( configMap.getData().get(k)); return !deserializeStateHandle(content) .isMarkedForDeletion(); } catch (IOException e) { return false; } }) .collect(Collectors.toList())) .orElseThrow(this::getConfigMapNotExistException); }
Return a list of all valid keys for state handles. @return List of valid state handle keys in Kubernetes ConfigMap @throws Exception if get state handle names from ConfigMap failed.
getAllHandles
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesStateHandleStore.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesStateHandleStore.java
Apache-2.0
@Override public boolean releaseAndTryRemove(String key) throws Exception { checkNotNull(key, "Key in ConfigMap."); final AtomicReference<RetrievableStateHandle<T>> stateHandleRefer = new AtomicReference<>(); final AtomicBoolean stateHandleDoesNotExist = new AtomicBoolean(false); return updateConfigMap( configMap -> { final String content = configMap.getData().get(key); if (content != null) { try { final StateHandleWithDeleteMarker<T> result = deserializeStateHandle(content); if (!result.isMarkedForDeletion()) { // Mark the ConfigMap entry as deleting. This basically // starts a "removal transaction" that allows us to retry // the removal if needed. configMap .getData() .put( key, serializeStateHandle(result.toDeleting())); } stateHandleRefer.set(result.getInner()); } catch (IOException e) { logInvalidEntry(key, configMapName, e); // Remove entry from the config map as we can't recover from // this (the serialization would fail on the retry as well). Objects.requireNonNull(configMap.getData().remove(key)); } return Optional.of(configMap); } else { stateHandleDoesNotExist.set(true); } return Optional.empty(); }) .thenCompose( updated -> { if (updated && stateHandleRefer.get() != null) { try { stateHandleRefer.get().discardState(); return updateConfigMap( configMap -> { // Now we can safely commit the "removal // transaction" by removing the entry from the // ConfigMap. configMap.getData().remove(key); return Optional.of(configMap); }); } catch (Exception e) { throw new CompletionException(e); } } return CompletableFuture.completedFuture( stateHandleDoesNotExist.get() || updated); }) .get(); }
Remove the key in state config map. As well as the state on external storage will be removed. It returns the {@link RetrievableStateHandle} stored under the given state node if any. @param key Key to be removed from ConfigMap @return True if the state handle isn't listed anymore. @throws Exception if removing the key or discarding the state failed
releaseAndTryRemove
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesStateHandleStore.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesStateHandleStore.java
Apache-2.0
private Optional<KubernetesConfigMap> addEntry( KubernetesConfigMap configMap, String key, byte[] serializedStateHandle) throws Exception { final String oldBase64Content = configMap.getData().get(key); final String newBase64Content = toBase64(serializedStateHandle); if (oldBase64Content != null) { try { final StateHandleWithDeleteMarker<T> stateHandle = deserializeStateHandle(oldBase64Content); if (stateHandle.isMarkedForDeletion()) { // This might be a left-over after the fail-over. As the remove operation is // idempotent let's try to finish it. if (!releaseAndTryRemove(key)) { throw new IllegalStateException( "Unable to remove the marked as deleting entry."); } } else { // It could happen that the kubernetes client retries a transaction that has // already succeeded due to network issues. So we simply ignore when the // new content is same as the existing one. if (oldBase64Content.equals(newBase64Content)) { return Optional.of(configMap); } throw getKeyAlreadyExistException(key); } } catch (IOException e) { // Just log the invalid entry, it will be overridden // by the update code path below. logInvalidEntry(key, configMapName, e); } } configMap.getData().put(key, newBase64Content); return Optional.of(configMap); }
Adds entry into the ConfigMap. If the entry already exists and contains delete marker, we try to finish the removal before the actual update.
addEntry
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesStateHandleStore.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesStateHandleStore.java
Apache-2.0
@Override public FlinkPod decorateFlinkPod(FlinkPod flinkPod) { return flinkPod; }
Apply transformations on the given FlinkPod in accordance to this feature. Note that we should return a FlinkPod that keeps all of the properties of the passed FlinkPod object. <p>So this is correct: <pre>{@code Pod decoratedPod = new PodBuilder(pod) // Keeps the original state ... .build() Container decoratedContainer = new ContainerBuilder(container) // Keeps the original state ... .build() FlinkPod decoratedFlinkPod = new FlinkPodBuilder(flinkPod) // Keeps the original state ... .build() }</pre> <p>And this is the incorrect: <pre>{@code Pod decoratedPod = new PodBuilder() // Loses the original state ... .build() Container decoratedContainer = new ContainerBuilder() // Loses the original state ... .build() FlinkPod decoratedFlinkPod = new FlinkPodBuilder() // Loses the original state ... .build() }</pre>
decorateFlinkPod
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/AbstractKubernetesStepDecorator.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/AbstractKubernetesStepDecorator.java
Apache-2.0
@Override public List<HasMetadata> buildAccompanyingKubernetesResources() throws IOException { return Collections.emptyList(); }
Note that the method could have a side effect of modifying the Flink Configuration object, such as update the JobManager address.
buildAccompanyingKubernetesResources
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/AbstractKubernetesStepDecorator.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/AbstractKubernetesStepDecorator.java
Apache-2.0
@Override public List<HasMetadata> buildAccompanyingKubernetesResources() throws IOException { final Service service = kubernetesJobManagerParameters .getRestServiceExposedType() .serviceType() .buildUpExternalRestService(kubernetesJobManagerParameters); return Collections.singletonList(service); }
Creates an external Service to expose the rest port of the Flink JobManager(s).
buildAccompanyingKubernetesResources
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/ExternalServiceDecorator.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/ExternalServiceDecorator.java
Apache-2.0
public static String getExternalServiceName(String clusterId) { return clusterId + Constants.FLINK_REST_SERVICE_SUFFIX; }
Generate name of the external rest Service.
getExternalServiceName
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/ExternalServiceDecorator.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/ExternalServiceDecorator.java
Apache-2.0
public static String getNamespacedExternalServiceName(String clusterId, String namespace) { return getExternalServiceName(clusterId) + "." + namespace; }
Generate namespaced name of the external rest Service by cluster Id, This is used by other project, so do not delete it.
getNamespacedExternalServiceName
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/ExternalServiceDecorator.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/ExternalServiceDecorator.java
Apache-2.0
@Override public List<HasMetadata> buildAccompanyingKubernetesResources() throws IOException { if (!kubernetesJobManagerParameters.isInternalServiceEnabled()) { return Collections.emptyList(); } final String serviceName = getInternalServiceName(kubernetesJobManagerParameters.getClusterId()); final Service headlessService = HeadlessClusterIPService.INSTANCE.buildUpInternalService( kubernetesJobManagerParameters); // Set job manager address to namespaced service name final String namespace = kubernetesJobManagerParameters.getNamespace(); kubernetesJobManagerParameters .getFlinkConfiguration() .set( JobManagerOptions.ADDRESS, getNamespacedInternalServiceName(serviceName, namespace)); return Collections.singletonList(headlessService); }
Creates an internal Service which forwards the requests from the TaskManager(s) to the active JobManager. Note that only the non-HA scenario relies on this Service for internal communication, since in the HA mode, the TaskManager(s) directly connects to the JobManager via IP address.
buildAccompanyingKubernetesResources
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/InternalServiceDecorator.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/InternalServiceDecorator.java
Apache-2.0
public static String getInternalServiceName(String clusterId) { return clusterId; }
Generate name of the internal Service.
getInternalServiceName
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/InternalServiceDecorator.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/InternalServiceDecorator.java
Apache-2.0
public static String getNamespacedInternalServiceName(String clusterId, String namespace) { return getInternalServiceName(clusterId) + "." + namespace; }
Generate namespaced name of the internal Service.
getNamespacedInternalServiceName
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/InternalServiceDecorator.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/InternalServiceDecorator.java
Apache-2.0
public static KubernetesPod buildTaskManagerKubernetesPod( FlinkPod podTemplate, KubernetesTaskManagerParameters kubernetesTaskManagerParameters) { FlinkPod flinkPod = Preconditions.checkNotNull(podTemplate).copy(); final List<KubernetesStepDecorator> stepDecorators = new ArrayList<>( Arrays.asList( new InitTaskManagerDecorator(kubernetesTaskManagerParameters), new EnvSecretsDecorator(kubernetesTaskManagerParameters), new MountSecretsDecorator(kubernetesTaskManagerParameters), new CmdTaskManagerDecorator(kubernetesTaskManagerParameters))); Configuration configuration = kubernetesTaskManagerParameters.getFlinkConfiguration(); if (configuration.get(KUBERNETES_HADOOP_CONF_MOUNT_DECORATOR_ENABLED)) { stepDecorators.add(new HadoopConfMountDecorator(kubernetesTaskManagerParameters)); } if (configuration.get(KUBERNETES_KERBEROS_MOUNT_DECORATOR_ENABLED)) { stepDecorators.add(new KerberosMountDecorator(kubernetesTaskManagerParameters)); } stepDecorators.add(new FlinkConfMountDecorator(kubernetesTaskManagerParameters)); for (KubernetesStepDecorator stepDecorator : stepDecorators) { flinkPod = stepDecorator.decorateFlinkPod(flinkPod); } final Pod resolvedPod = new PodBuilder(flinkPod.getPodWithoutMainContainer()) .editOrNewSpec() .addToContainers(flinkPod.getMainContainer()) .endSpec() .build(); return new KubernetesPod(resolvedPod); }
Utility class for constructing the TaskManager Pod on the JobManager.
buildTaskManagerKubernetesPod
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/factory/KubernetesTaskManagerFactory.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/factory/KubernetesTaskManagerFactory.java
Apache-2.0
private static Informable<ConfigMap> getInformabaleConfigMaps( NamespacedKubernetesClient client, String name) { Preconditions.checkArgument( !StringUtils.isNullOrWhitespaceOnly(name), "Name must not be null or empty"); return client.configMaps().withName(name); }
The shared informer for {@link ConfigMap}, it can be used as a shared watcher.
getInformabaleConfigMaps
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/resources/KubernetesConfigMapSharedInformer.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/resources/KubernetesConfigMapSharedInformer.java
Apache-2.0
public static KubernetesOwnerReference fromMap(Map<String, String> stringMap) { final OwnerReferenceBuilder ownerReferenceBuilder = new OwnerReferenceBuilder(); stringMap.forEach( (k, v) -> { switch (k.toLowerCase()) { case API_VERSION: ownerReferenceBuilder.withApiVersion(v); break; case DELETION: ownerReferenceBuilder.withBlockOwnerDeletion(Boolean.valueOf(v)); break; case CONTROLLER: ownerReferenceBuilder.withController(Boolean.valueOf(v)); break; case KIND: ownerReferenceBuilder.withKind(v); break; case NAME: ownerReferenceBuilder.withName(v); break; case UUID: ownerReferenceBuilder.withUid(v); break; default: LOG.warn("Unrecognized key({}) of toleration, will ignore.", k); break; } }); return new KubernetesOwnerReference(ownerReferenceBuilder.build()); }
Represent Owner reference resource in kubernetes.
fromMap
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/resources/KubernetesOwnerReference.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/resources/KubernetesOwnerReference.java
Apache-2.0
public Service buildUpExternalRestService( KubernetesJobManagerParameters kubernetesJobManagerParameters) { final String serviceName = ExternalServiceDecorator.getExternalServiceName( kubernetesJobManagerParameters.getClusterId()); return new ServiceBuilder() .withApiVersion(Constants.API_VERSION) .withNewMetadata() .withName(serviceName) .withLabels(kubernetesJobManagerParameters.getCommonLabels()) .withAnnotations(kubernetesJobManagerParameters.getRestServiceAnnotations()) .endMetadata() .withNewSpec() .withType( kubernetesJobManagerParameters .getRestServiceExposedType() .serviceType() .getType()) .withSelector(kubernetesJobManagerParameters.getSelectors()) .addNewPort() .withName(Constants.REST_PORT_NAME) .withPort(kubernetesJobManagerParameters.getRestPort()) .withNewTargetPort(kubernetesJobManagerParameters.getRestBindPort()) .endPort() .endSpec() .build(); }
Build up the external rest service template, according to the jobManager parameters. @param kubernetesJobManagerParameters the parameters of jobManager. @return the external rest service
buildUpExternalRestService
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/services/ServiceType.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/services/ServiceType.java
Apache-2.0
public static void checkAndUpdatePortConfigOption( Configuration flinkConfig, ConfigOption<String> port, int fallbackPort) { if (KubernetesUtils.parsePort(flinkConfig, port) == 0) { flinkConfig.set(port, String.valueOf(fallbackPort)); LOG.info( "Kubernetes deployment requires a fixed port. Configuration {} will be set to {}", port.key(), fallbackPort); } }
Check whether the port config option is a fixed port. If not, the fallback port will be set to configuration. @param flinkConfig flink configuration @param port config option need to be checked @param fallbackPort the fallback port that will be set to the configuration
checkAndUpdatePortConfigOption
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
Apache-2.0
public static Integer parsePort(Configuration flinkConfig, ConfigOption<String> port) { checkNotNull(flinkConfig.get(port), port.key() + " should not be null."); try { return Integer.parseInt(flinkConfig.get(port)); } catch (NumberFormatException ex) { throw new FlinkRuntimeException( port.key() + " should be specified to a fixed port. Do not support a range of ports.", ex); } }
Parse a valid port for the config option. A fixed port is expected, and do not support a range of ports. @param flinkConfig flink config @param port port config option @return valid port
parsePort
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
Apache-2.0
public static Map<String, String> getTaskManagerSelectors(String clusterId) { final Map<String, String> labels = getCommonLabels(clusterId); labels.put(Constants.LABEL_COMPONENT_KEY, Constants.LABEL_COMPONENT_TASK_MANAGER); return Collections.unmodifiableMap(labels); }
Get task manager selectors for the current Flink cluster. They could be used to watch the pods status. @return Task manager labels.
getTaskManagerSelectors
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
Apache-2.0
public static Map<String, String> getJobManagerSelectors(String clusterId) { final Map<String, String> labels = getCommonLabels(clusterId); labels.put(Constants.LABEL_COMPONENT_KEY, Constants.LABEL_COMPONENT_JOB_MANAGER); return Collections.unmodifiableMap(labels); }
Get job manager selectors for the current Flink cluster. @return JobManager selectors.
getJobManagerSelectors
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
Apache-2.0
public static Map<String, String> getCommonLabels(String clusterId) { final Map<String, String> commonLabels = new HashMap<>(); commonLabels.put(Constants.LABEL_TYPE_KEY, Constants.LABEL_TYPE_NATIVE_TYPE); commonLabels.put(Constants.LABEL_APP_KEY, clusterId); return commonLabels; }
Get the common labels for Flink native clusters. All the Kubernetes resources will be set with these labels. @param clusterId cluster id @return Return common labels map
getCommonLabels
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
Apache-2.0
public static Map<String, String> getConfigMapLabels(String clusterId) { final Map<String, String> labels = new HashMap<>(getCommonLabels(clusterId)); return Collections.unmodifiableMap(labels); }
Get ConfigMap labels for the current Flink cluster. They could be used to filter and clean-up the resources. @param clusterId cluster id @return Return ConfigMap labels.
getConfigMapLabels
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
Apache-2.0
public static KubernetesConfigMap getOnlyConfigMap( List<KubernetesConfigMap> configMaps, String expectedConfigMapName) { if (configMaps.size() == 1 && expectedConfigMapName.equals(configMaps.get(0).getName())) { return configMaps.get(0); } throw new IllegalStateException( String.format( "ConfigMap list should only contain a single ConfigMap [%s].", expectedConfigMapName)); }
Check the ConfigMap list should only contain the expected one. @param configMaps ConfigMap list to check @param expectedConfigMapName expected ConfigMap Name @return Return the expected ConfigMap
getOnlyConfigMap
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
Apache-2.0
public static LeaderInformation getLeaderInformationFromConfigMap( KubernetesConfigMap configMap) { final String leaderAddress = configMap.getData().get(LEADER_ADDRESS_KEY); final String sessionIDStr = configMap.getData().get(LEADER_SESSION_ID_KEY); final UUID sessionID = sessionIDStr == null ? null : UUID.fromString(sessionIDStr); if (leaderAddress == null && sessionIDStr == null) { return LeaderInformation.empty(); } return LeaderInformation.known(sessionID, leaderAddress); }
Get the {@link LeaderInformation} from ConfigMap. @param configMap ConfigMap contains the leader information @return Parsed leader information. It could be {@link LeaderInformation#empty()} if there is no corresponding data in the ConfigMap.
getLeaderInformationFromConfigMap
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
Apache-2.0
public static ExecutionPlanStore createExecutionPlanStore( Configuration configuration, FlinkKubeClient flinkKubeClient, String configMapName, String lockIdentity) throws Exception { final KubernetesStateHandleStore<ExecutionPlan> stateHandleStore = createExecutionPlanStateHandleStore( configuration, flinkKubeClient, configMapName, lockIdentity); return new DefaultExecutionPlanStore<>( stateHandleStore, NoOpExecutionPlanStoreWatcher.INSTANCE, KubernetesExecutionPlanStoreUtil.INSTANCE); }
Create a {@link DefaultExecutionPlanStore} with {@link NoOpExecutionPlanStoreWatcher}. @param configuration configuration to build a RetrievableStateStorageHelper @param flinkKubeClient flink kubernetes client @param configMapName ConfigMap name @param lockIdentity lock identity to check the leadership @return a {@link DefaultExecutionPlanStore} with {@link NoOpExecutionPlanStoreWatcher} @throws Exception when create the storage helper
createExecutionPlanStore
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
Apache-2.0
public static KubernetesStateHandleStore<ExecutionPlan> createExecutionPlanStateHandleStore( Configuration configuration, FlinkKubeClient flinkKubeClient, String configMapName, String lockIdentity) throws Exception { final RetrievableStateStorageHelper<ExecutionPlan> stateStorage = new FileSystemStateStorageHelper<>( HighAvailabilityServicesUtils.getClusterHighAvailableStoragePath( configuration), SUBMITTED_EXECUTION_PLAN_FILE_PREFIX); return new KubernetesStateHandleStore<>( flinkKubeClient, configMapName, stateStorage, k -> k.startsWith(EXECUTION_PLAN_STORE_KEY_PREFIX), lockIdentity); }
Create a {@link KubernetesStateHandleStore} which storing {@link ExecutionPlan}. @param configuration configuration to build a RetrievableStateStorageHelper @param flinkKubeClient flink kubernetes client @param configMapName ConfigMap name @param lockIdentity lock identity to check the leadership @return a {@link KubernetesStateHandleStore} which storing {@link ExecutionPlan}. @throws Exception when create the storage helper
createExecutionPlanStateHandleStore
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
Apache-2.0
public static CompletedCheckpointStore createCompletedCheckpointStore( Configuration configuration, FlinkKubeClient kubeClient, Executor executor, String configMapName, @Nullable String lockIdentity, int maxNumberOfCheckpointsToRetain, SharedStateRegistryFactory sharedStateRegistryFactory, Executor ioExecutor, RecoveryClaimMode recoveryClaimMode) throws Exception { final RetrievableStateStorageHelper<CompletedCheckpoint> stateStorage = new FileSystemStateStorageHelper<>( HighAvailabilityServicesUtils.getClusterHighAvailableStoragePath( configuration), COMPLETED_CHECKPOINT_FILE_SUFFIX); final KubernetesStateHandleStore<CompletedCheckpoint> stateHandleStore = new KubernetesStateHandleStore<>( kubeClient, configMapName, stateStorage, k -> k.startsWith(CHECKPOINT_ID_KEY_PREFIX), lockIdentity); Collection<CompletedCheckpoint> checkpoints = DefaultCompletedCheckpointStoreUtils.retrieveCompletedCheckpoints( stateHandleStore, KubernetesCheckpointStoreUtil.INSTANCE); return new DefaultCompletedCheckpointStore<>( maxNumberOfCheckpointsToRetain, stateHandleStore, KubernetesCheckpointStoreUtil.INSTANCE, checkpoints, sharedStateRegistryFactory.create(ioExecutor, checkpoints, recoveryClaimMode), executor); }
Create a {@link DefaultCompletedCheckpointStore} with {@link KubernetesStateHandleStore}. @param configuration configuration to build a RetrievableStateStorageHelper @param kubeClient flink kubernetes client @param configMapName ConfigMap name @param executor executor to run blocking calls @param lockIdentity lock identity to check the leadership @param maxNumberOfCheckpointsToRetain max number of checkpoints to retain on state store handle @param recoveryClaimMode the mode in which the job is restoring @return a {@link DefaultCompletedCheckpointStore} with {@link KubernetesStateHandleStore}. @throws Exception when create the storage helper failed
createCompletedCheckpointStore
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
Apache-2.0
public static <T> String resolveUserDefinedValue( Configuration flinkConfig, ConfigOption<T> configOption, String valueOfConfigOptionOrDefault, @Nullable String valueOfPodTemplate, String fieldDescription) { final String resolvedValue; if (valueOfPodTemplate != null) { // The config option is explicitly set. if (flinkConfig.contains(configOption)) { resolvedValue = valueOfConfigOptionOrDefault; LOG.info( "The {} configured in pod template will be overwritten to '{}' " + "because of explicitly configured options.", fieldDescription, resolvedValue); } else { resolvedValue = valueOfPodTemplate; } } else { resolvedValue = valueOfConfigOptionOrDefault; } return resolvedValue; }
Resolve the user defined value with the precedence. First an explicit config option value is taken, then the value in pod template and at last the default value of a config option if nothing is specified. @param flinkConfig flink configuration @param configOption the config option to define the Kubernetes fields @param valueOfConfigOptionOrDefault the value defined by explicit config option or default @param valueOfPodTemplate the value defined in the pod template @param fieldDescription Kubernetes fields description @param <T> The type of value associated with the configuration option. @return the resolved value
resolveUserDefinedValue
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
Apache-2.0
public static String resolveDNSPolicy(String dnsPolicy, boolean hostNetworkEnabled) { if (hostNetworkEnabled) { return DNS_POLICY_HOSTNETWORK; } if (!StringUtils.isNullOrWhitespaceOnly(dnsPolicy)) { return dnsPolicy; } return DNS_POLICY_DEFAULT; }
Resolve the DNS policy defined value. Return DNS_POLICY_HOSTNETWORK if host network enabled. If not, check whether there is a DNS policy overridden in pod template. @param dnsPolicy DNS policy defined in pod template spec @param hostNetworkEnabled Host network enabled or not @return the resolved value
resolveDNSPolicy
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
Apache-2.0
@Nullable public static String getServiceAccount(FlinkPod flinkPod) { final String serviceAccount = flinkPod.getPodWithoutMainContainer().getSpec().getServiceAccount(); if (serviceAccount == null) { return flinkPod.getPodWithoutMainContainer().getSpec().getServiceAccountName(); } return serviceAccount; }
Get the service account from the input pod first, if not specified, the service account name will be used. @param flinkPod the Flink pod to parse the service account @return the parsed service account
getServiceAccount
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
Apache-2.0
public static String tryToGetPrettyPrintYaml(KubernetesResource kubernetesResource) { try { return yamlMapper .writerWithDefaultPrettyPrinter() .writeValueAsString(kubernetesResource); } catch (Exception ex) { LOG.debug( "Failed to get the pretty print yaml, fallback to {}", kubernetesResource, ex); return kubernetesResource.toString(); } }
Try to get the pretty print yaml for Kubernetes resource. @param kubernetesResource kubernetes resource @return the pretty print yaml, or the {@link KubernetesResource#toString()} if parse failed.
tryToGetPrettyPrintYaml
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
Apache-2.0
public static String getNamespacedServiceName(Service service) { return service.getMetadata().getName() + "." + service.getMetadata().getNamespace(); }
Generate namespaced name of the service.
getNamespacedServiceName
java
apache/flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/KubernetesUtils.java
Apache-2.0
@Test void testHeapMemoryPropertyWithUnitMB() throws Exception { final String[] args = new String[] { "-e", KubernetesSessionClusterExecutor.NAME, "-D" + JobManagerOptions.TOTAL_PROCESS_MEMORY.key() + "=1024m", "-D" + TaskManagerOptions.TOTAL_PROCESS_MEMORY.key() + "=2048m" }; final KubernetesSessionCli cli = createFlinkKubernetesCustomCliWithJmAndTmTotalMemory(1024); final Configuration executorConfig = cli.getEffectiveConfiguration(args); final ClusterClientFactory<String> clientFactory = getClusterClientFactory(executorConfig); final ClusterSpecification clusterSpecification = clientFactory.getClusterSpecification(executorConfig); assertThat(clusterSpecification.getMasterMemoryMB()).isEqualTo(1024); assertThat(clusterSpecification.getTaskManagerMemoryMB()).isEqualTo(2048); }
Tests the specifying heap memory with unit (MB) for job manager and task manager.
testHeapMemoryPropertyWithUnitMB
java
apache/flink
flink-kubernetes/src/test/java/org/apache/flink/kubernetes/cli/KubernetesSessionCliTest.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/test/java/org/apache/flink/kubernetes/cli/KubernetesSessionCliTest.java
Apache-2.0
@Test void testHeapMemoryPropertyWithArbitraryUnit() throws Exception { final String[] args = new String[] { "-e", KubernetesSessionClusterExecutor.NAME, "-D" + JobManagerOptions.TOTAL_PROCESS_MEMORY.key() + "=1g", "-D" + TaskManagerOptions.TOTAL_PROCESS_MEMORY.key() + "=3g" }; final KubernetesSessionCli cli = createFlinkKubernetesCustomCliWithJmAndTmTotalMemory(1024); final Configuration executorConfig = cli.getEffectiveConfiguration(args); final ClusterClientFactory<String> clientFactory = getClusterClientFactory(executorConfig); final ClusterSpecification clusterSpecification = clientFactory.getClusterSpecification(executorConfig); assertThat(clusterSpecification.getMasterMemoryMB()).isEqualTo(1024); assertThat(clusterSpecification.getTaskManagerMemoryMB()).isEqualTo(3072); }
Tests the specifying heap memory with arbitrary unit for job manager and task manager.
testHeapMemoryPropertyWithArbitraryUnit
java
apache/flink
flink-kubernetes/src/test/java/org/apache/flink/kubernetes/cli/KubernetesSessionCliTest.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/test/java/org/apache/flink/kubernetes/cli/KubernetesSessionCliTest.java
Apache-2.0
@Override protected Pod getResultPod(FlinkPod podTemplate) throws Exception { final KubernetesJobManagerParameters kubernetesJobManagerParameters = new KubernetesJobManagerParameters( flinkConfig, new KubernetesClusterClientFactory().getClusterSpecification(flinkConfig)); final KubernetesJobManagerSpecification kubernetesJobManagerSpecification = KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification( podTemplate, kubernetesJobManagerParameters); final PodTemplateSpec podTemplateSpec = kubernetesJobManagerSpecification.getDeployment().getSpec().getTemplate(); return new PodBuilder() .withMetadata(podTemplateSpec.getMetadata()) .withSpec(podTemplateSpec.getSpec()) .build(); }
General tests for the {@link KubernetesJobManagerFactory} with pod template.
getResultPod
java
apache/flink
flink-kubernetes/src/test/java/org/apache/flink/kubernetes/kubeclient/factory/KubernetesJobManagerFactoryWithPodTemplateTest.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/test/java/org/apache/flink/kubernetes/kubeclient/factory/KubernetesJobManagerFactoryWithPodTemplateTest.java
Apache-2.0
@Override protected Pod getResultPod(FlinkPod podTemplate) { return KubernetesTaskManagerFactory.buildTaskManagerKubernetesPod( podTemplate, KubernetesTestUtils.createTaskManagerParameters( flinkConfig, "taskmanager-" + UUID.randomUUID().toString())) .getInternalResource(); }
General tests for the {@link KubernetesTaskManagerFactory} with pod template.
getResultPod
java
apache/flink
flink-kubernetes/src/test/java/org/apache/flink/kubernetes/kubeclient/factory/KubernetesTaskManagerFactoryWithPodTemplateTest.java
https://github.com/apache/flink/blob/master/flink-kubernetes/src/test/java/org/apache/flink/kubernetes/kubeclient/factory/KubernetesTaskManagerFactoryWithPodTemplateTest.java
Apache-2.0
public static <T> PatternStream<T> pattern(DataStream<T> input, Pattern<T, ?> pattern) { return new PatternStream<>(input, pattern); }
Creates a {@link PatternStream} from an input data stream and a pattern. @param input DataStream containing the input events @param pattern Pattern specification which shall be detected @param <T> Type of the input events @return Resulting pattern stream
pattern
java
apache/flink
flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/CEP.java
https://github.com/apache/flink/blob/master/flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/CEP.java
Apache-2.0
public static <T> PatternStream<T> pattern( DataStream<T> input, Pattern<T, ?> pattern, EventComparator<T> comparator) { final PatternStream<T> stream = new PatternStream<>(input, pattern); return stream.withComparator(comparator); }
Creates a {@link PatternStream} from an input data stream and a pattern. @param input DataStream containing the input events @param pattern Pattern specification which shall be detected @param comparator Comparator to sort events with equal timestamps @param <T> Type of the input events @return Resulting pattern stream
pattern
java
apache/flink
flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/CEP.java
https://github.com/apache/flink/blob/master/flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/CEP.java
Apache-2.0
PatternProcessFunction<IN, OUT> build() { return new PatternTimeoutFlatSelectAdapter<>( flatSelectFunction, timeoutHandler, outputTag); }
Wraps {@link PatternFlatSelectFunction} and {@link PatternFlatTimeoutFunction} in a builder. The builder will create a {@link PatternProcessFunction} adapter that handles timed out partial matches as well.
build
java
apache/flink
flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/PatternProcessFunctionBuilder.java
https://github.com/apache/flink/blob/master/flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/PatternProcessFunctionBuilder.java
Apache-2.0
PatternProcessFunction<IN, OUT> build() { return new PatternTimeoutSelectAdapter<>(selectFunction, timeoutHandler, outputTag); }
Wraps {@link PatternSelectFunction} and {@link PatternTimeoutFunction} in a builder. The builder will create a {@link PatternProcessFunction} adapter that handles timed out partial matches as well.
build
java
apache/flink
flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/PatternProcessFunctionBuilder.java
https://github.com/apache/flink/blob/master/flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/PatternProcessFunctionBuilder.java
Apache-2.0
public PatternStream<T> sideOutputLateData(OutputTag<T> lateDataOutputTag) { return new PatternStream<>(builder.withLateDataOutputTag(lateDataOutputTag)); }
Send late arriving data to the side output identified by the given {@link OutputTag}. A record is considered late after the watermark has passed its timestamp. <p>You can get the stream of late data using {@link SingleOutputStreamOperator#getSideOutput(OutputTag)} on the {@link SingleOutputStreamOperator} resulting from the pattern processing operations.
sideOutputLateData
java
apache/flink
flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/PatternStream.java
https://github.com/apache/flink/blob/master/flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/PatternStream.java
Apache-2.0
public <R> SingleOutputStreamOperator<R> process( final PatternProcessFunction<T, R> patternProcessFunction) { final TypeInformation<R> returnType = TypeExtractor.getUnaryOperatorReturnType( patternProcessFunction, PatternProcessFunction.class, 0, 1, TypeExtractor.NO_INDEX, builder.getInputType(), null, false); return process(patternProcessFunction, returnType); }
Applies a process function to the detected pattern sequence. For each pattern sequence the provided {@link PatternProcessFunction} is called. In order to process timed out partial matches as well one can use {@link TimedOutPartialMatchHandler} as additional interface. @param patternProcessFunction The pattern process function which is called for each detected pattern sequence. @param <R> Type of the resulting elements @return {@link DataStream} which contains the resulting elements from the pattern process function.
process
java
apache/flink
flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/PatternStream.java
https://github.com/apache/flink/blob/master/flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/PatternStream.java
Apache-2.0
public <R> SingleOutputStreamOperator<R> process( final PatternProcessFunction<T, R> patternProcessFunction, final TypeInformation<R> outTypeInfo) { return builder.build(outTypeInfo, builder.clean(patternProcessFunction)); }
Applies a process function to the detected pattern sequence. For each pattern sequence the provided {@link PatternProcessFunction} is called. In order to process timed out partial matches as well one can use {@link TimedOutPartialMatchHandler} as additional interface. @param patternProcessFunction The pattern process function which is called for each detected pattern sequence. @param <R> Type of the resulting elements @param outTypeInfo Explicit specification of output type. @return {@link DataStream} which contains the resulting elements from the pattern process function.
process
java
apache/flink
flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/PatternStream.java
https://github.com/apache/flink/blob/master/flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/PatternStream.java
Apache-2.0