code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
public String getDefaultBucketName() { return defaultBucketName; }
Returns the internally used default bucket.
getDefaultBucketName
java
apache/flink
flink-filesystems/flink-s3-fs-base/src/test/java/org/apache/flink/fs/s3/common/MinioTestContainer.java
https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/test/java/org/apache/flink/fs/s3/common/MinioTestContainer.java
Apache-2.0
@Test void testEntropyInjectionConfig() throws Exception { final Configuration conf = new Configuration(); conf.setString("s3.entropy.key", "__entropy__"); conf.set(getIntConfigOption("s3.entropy.length"), 7); TestS3FileSystemFactory factory = new TestS3FileSystemFactory(); factory.configure(conf); FlinkS3FileSystem fs = (FlinkS3FileSystem) factory.create(new URI("s3://test")); assertThat(fs.getEntropyInjectionKey()).isEqualTo("__entropy__"); assertThat(fs.generateEntropy().length()).isEqualTo(7); }
Tests that the file system factory picks up the entropy configuration properly.
testEntropyInjectionConfig
java
apache/flink
flink-filesystems/flink-s3-fs-base/src/test/java/org/apache/flink/fs/s3/common/S3EntropyFsFactoryTest.java
https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/test/java/org/apache/flink/fs/s3/common/S3EntropyFsFactoryTest.java
Apache-2.0
@Test void testMultipleTempDirsConfig() throws Exception { final Configuration conf = new Configuration(); String dir1 = "/tmp/dir1"; String dir2 = "/tmp/dir2"; conf.setString("io.tmp.dirs", dir1 + "," + dir2); TestS3FileSystemFactory factory = new TestS3FileSystemFactory(); factory.configure(conf); FlinkS3FileSystem fs = (FlinkS3FileSystem) factory.create(new URI("s3://test")); assertThat(fs.getLocalTmpDir()).isEqualTo(dir1); }
Test validates that the produced by AbstractS3FileSystemFactory object will contains only first path from multiple paths in config.
testMultipleTempDirsConfig
java
apache/flink
flink-filesystems/flink-s3-fs-base/src/test/java/org/apache/flink/fs/s3/common/S3EntropyFsFactoryTest.java
https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/test/java/org/apache/flink/fs/s3/common/S3EntropyFsFactoryTest.java
Apache-2.0
@Override public void execute(Runnable command) { command.run(); }
A simple executor that executes the runnable on the main thread.
execute
java
apache/flink
flink-filesystems/flink-s3-fs-base/src/test/java/org/apache/flink/fs/s3/common/writer/RecoverableMultiPartUploadImplTest.java
https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/test/java/org/apache/flink/fs/s3/common/writer/RecoverableMultiPartUploadImplTest.java
Apache-2.0
@Override public String getScheme() { return "s3a"; }
Simple factory for the S3 file system, registered for the <tt>s3a://</tt> scheme.
getScheme
java
apache/flink
flink-filesystems/flink-s3-fs-hadoop/src/main/java/org/apache/flink/fs/s3hadoop/S3AFileSystemFactory.java
https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-hadoop/src/main/java/org/apache/flink/fs/s3hadoop/S3AFileSystemFactory.java
Apache-2.0
@Override public String serviceName() { return "s3-hadoop"; }
Delegation token provider for S3 Hadoop filesystems.
serviceName
java
apache/flink
flink-filesystems/flink-s3-fs-hadoop/src/main/java/org/apache/flink/fs/s3hadoop/token/S3HadoopDelegationTokenProvider.java
https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-hadoop/src/main/java/org/apache/flink/fs/s3hadoop/token/S3HadoopDelegationTokenProvider.java
Apache-2.0
@Test void testConfigKeysForwardingHadoopStyle() { Configuration conf = new Configuration(); conf.setString("fs.s3a.access.key", "test_access_key"); conf.setString("fs.s3a.secret.key", "test_secret_key"); checkHadoopAccessKeys(conf, "test_access_key", "test_secret_key"); }
Test forwarding of standard Hadoop-style credential keys.
testConfigKeysForwardingHadoopStyle
java
apache/flink
flink-filesystems/flink-s3-fs-hadoop/src/test/java/org/apache/flink/fs/s3hadoop/HadoopS3FileSystemTest.java
https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-hadoop/src/test/java/org/apache/flink/fs/s3hadoop/HadoopS3FileSystemTest.java
Apache-2.0
@Test void testConfigKeysForwardingShortHadoopStyle() { Configuration conf = new Configuration(); conf.setString("s3.access.key", "my_key_a"); conf.setString("s3.secret.key", "my_key_b"); checkHadoopAccessKeys(conf, "my_key_a", "my_key_b"); }
Test forwarding of shortened Hadoop-style credential keys.
testConfigKeysForwardingShortHadoopStyle
java
apache/flink
flink-filesystems/flink-s3-fs-hadoop/src/test/java/org/apache/flink/fs/s3hadoop/HadoopS3FileSystemTest.java
https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-hadoop/src/test/java/org/apache/flink/fs/s3hadoop/HadoopS3FileSystemTest.java
Apache-2.0
private static void checkHadoopAccessKeys( Configuration flinkConf, String accessKey, String secretKey) { HadoopConfigLoader configLoader = S3FileSystemFactory.createHadoopConfigLoader(); configLoader.setFlinkConfig(flinkConf); org.apache.hadoop.conf.Configuration hadoopConf = configLoader.getOrLoadHadoopConfig(); assertThat(hadoopConf.get("fs.s3a.access.key", null)).isEqualTo(accessKey); assertThat(hadoopConf.get("fs.s3a.secret.key", null)).isEqualTo(secretKey); }
Test forwarding of shortened Presto-style credential keys.
checkHadoopAccessKeys
java
apache/flink
flink-filesystems/flink-s3-fs-hadoop/src/test/java/org/apache/flink/fs/s3hadoop/HadoopS3FileSystemTest.java
https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-hadoop/src/test/java/org/apache/flink/fs/s3hadoop/HadoopS3FileSystemTest.java
Apache-2.0
private void deleteObject(Path path) throws IOException { boolean success = true; IOException actualException = null; try { // empty directories will cause this method to fail as well - checking for their // existence afterwards is a workaround to cover this use-case success = super.delete(path, false); } catch (IOException e) { actualException = e; } if (!success || actualException != null) { if (exists(path)) { throw Optional.ofNullable(actualException) .orElse( new IOException( path.getPath() + " could not be deleted for unknown reasons.")); } } }
Deletes the object referenced by the passed {@code path}. This method is used to work around the fact that Presto doesn't allow us to differentiate between deleting a non-existing object and some other errors. Therefore, a final check for existence is necessary in case of an error or false return value. @param path The path referring to the object that shall be deleted. @throws IOException if an error occurred while deleting the file other than the {@code path} referring to a non-empty directory.
deleteObject
java
apache/flink
flink-filesystems/flink-s3-fs-presto/src/main/java/org/apache/flink/fs/s3presto/FlinkS3PrestoFileSystem.java
https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-presto/src/main/java/org/apache/flink/fs/s3presto/FlinkS3PrestoFileSystem.java
Apache-2.0
@Override public String getScheme() { return "s3p"; }
Simple factory for the S3 file system, registered for the <tt>s3p://</tt> scheme.
getScheme
java
apache/flink
flink-filesystems/flink-s3-fs-presto/src/main/java/org/apache/flink/fs/s3presto/S3PFileSystemFactory.java
https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-presto/src/main/java/org/apache/flink/fs/s3presto/S3PFileSystemFactory.java
Apache-2.0
@Override public String serviceName() { return "s3-presto"; }
Delegation token provider for S3 Presto filesystems.
serviceName
java
apache/flink
flink-filesystems/flink-s3-fs-presto/src/main/java/org/apache/flink/fs/s3presto/token/S3PrestoDelegationTokenProvider.java
https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-presto/src/main/java/org/apache/flink/fs/s3presto/token/S3PrestoDelegationTokenProvider.java
Apache-2.0
public static AvroDeserializationSchema<GenericRecord> forGeneric(Schema schema) { return forGeneric(schema, AvroEncoding.BINARY); }
Creates {@link AvroDeserializationSchema} that produces {@link GenericRecord} using provided schema. @param schema schema of produced records @return deserialized record in form of {@link GenericRecord}
forGeneric
java
apache/flink
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroDeserializationSchema.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroDeserializationSchema.java
Apache-2.0
public static AvroDeserializationSchema<GenericRecord> forGeneric( Schema schema, AvroEncoding encoding) { return new AvroDeserializationSchema<>(GenericRecord.class, schema, encoding); }
Creates {@link AvroDeserializationSchema} that produces {@link GenericRecord} using provided schema. @param schema schema of produced records @param encoding Avro serialization approach to use for decoding @return deserialized record in form of {@link GenericRecord}
forGeneric
java
apache/flink
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroDeserializationSchema.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroDeserializationSchema.java
Apache-2.0
public static <T extends SpecificRecord> AvroDeserializationSchema<T> forSpecific( Class<T> tClass) { return forSpecific(tClass, AvroEncoding.BINARY); }
Creates {@link AvroDeserializationSchema} that produces classes that were generated from avro schema. @param tClass class of record to be produced @return deserialized record
forSpecific
java
apache/flink
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroDeserializationSchema.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroDeserializationSchema.java
Apache-2.0
public static <T extends SpecificRecord> AvroDeserializationSchema<T> forSpecific( Class<T> tClass, AvroEncoding encoding) { return new AvroDeserializationSchema<>(tClass, null, encoding); }
Creates {@link AvroDeserializationSchema} that produces classes that were generated from avro schema. @param tClass class of record to be produced @param encoding Avro serialization approach to use for decoding @return deserialized record
forSpecific
java
apache/flink
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroDeserializationSchema.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroDeserializationSchema.java
Apache-2.0
public void setReuseAvroValue(boolean reuseAvroValue) { this.reuseAvroValue = reuseAvroValue; }
Sets the flag whether to reuse the Avro value instance for all records. By default, the input format reuses the Avro value. @param reuseAvroValue True, if the input format should reuse the Avro value instance, false otherwise.
setReuseAvroValue
java
apache/flink
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroInputFormat.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroInputFormat.java
Apache-2.0
@Override public BulkWriter<T> create(FSDataOutputStream out) throws IOException { return new AvroBulkWriter<>(avroBuilder.createWriter(new CloseShieldOutputStream(out))); }
Creates a new AvroWriterFactory using the given builder to assemble the ParquetWriter.
create
java
apache/flink
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroWriterFactory.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroWriterFactory.java
Apache-2.0
public static <T extends SpecificRecordBase> AvroWriterFactory<T> forSpecificRecord( Class<T> type) { String schemaString = SpecificData.get().getSchema(type).toString(); AvroBuilder<T> builder = (out) -> createAvroDataFileWriter(schemaString, SpecificDatumWriter::new, out); return new AvroWriterFactory<>(builder); }
Creates an {@link AvroWriterFactory} for an Avro specific type. The Avro writers will use the schema of that specific type to build and write the records. @param type The class of the type to write.
forSpecificRecord
java
apache/flink
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroWriters.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroWriters.java
Apache-2.0
public static <T> Schema extractAvroSpecificSchema(Class<T> type, SpecificData specificData) { Optional<Schema> newSchemaOptional = tryExtractAvroSchemaViaInstance(type); return newSchemaOptional.orElseGet(() -> specificData.getSchema(type)); }
Extracts an Avro {@link Schema} from a {@link SpecificRecord}. We do this either via {@link SpecificData} or by instantiating a record and extracting the schema from the instance.
extractAvroSpecificSchema
java
apache/flink
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroFactory.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroFactory.java
Apache-2.0
public static <T extends SpecificData> SpecificData getSpecificDataForClass( Class<T> type, ClassLoader cl) { try { Field specificDataField = type.getDeclaredField("MODEL$"); specificDataField.setAccessible(true); return (SpecificData) specificDataField.get((Object) null); } catch (IllegalAccessException e) { throw new FlinkRuntimeException("Could not access the MODEL$ field of avro record", e); } catch (NoSuchFieldException e) { return new SpecificData(cl); } }
Creates a {@link SpecificData} object for a given class. Possibly uses the specific data from the generated class with logical conversions applied (avro >= 1.9.x). <p>Copied over from {@code SpecificData#getForClass(Class<T> c)} we do not use the method directly, because we want to be API backwards compatible with older Avro versions which did not have this method
getSpecificDataForClass
java
apache/flink
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroFactory.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroFactory.java
Apache-2.0
@SuppressWarnings("unchecked") public static <T extends SpecificRecord> TypeInformation<Row> convertToTypeInfo( Class<T> avroClass) { return convertToTypeInfo(avroClass, true); }
Converts an Avro class into a nested row structure with deterministic field order and data types that are compatible with Flink's Table & SQL API. @param avroClass Avro specific record that contains schema information @return type information matching the schema
convertToTypeInfo
java
apache/flink
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroSchemaConverter.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroSchemaConverter.java
Apache-2.0
@SuppressWarnings("unchecked") public static <T extends SpecificRecord> TypeInformation<Row> convertToTypeInfo( Class<T> avroClass, boolean legacyTimestampMapping) { Preconditions.checkNotNull(avroClass, "Avro specific record class must not be null."); // determine schema to retrieve deterministic field order final Schema schema = SpecificData.get().getSchema(avroClass); return (TypeInformation<Row>) convertToTypeInfo(schema, true); }
Converts an Avro class into a nested row structure with deterministic field order and data types that are compatible with Flink's Table & SQL API. @param avroClass Avro specific record that contains schema information @param legacyTimestampMapping legacy mapping of timestamp types @return type information matching the schema
convertToTypeInfo
java
apache/flink
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroSchemaConverter.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroSchemaConverter.java
Apache-2.0
@SuppressWarnings("unchecked") public static <T> TypeInformation<T> convertToTypeInfo(String avroSchemaString) { return convertToTypeInfo(avroSchemaString, true); }
Converts an Avro schema string into a nested row structure with deterministic field order and data types that are compatible with Flink's Table & SQL API. @param avroSchemaString Avro schema definition string @return type information matching the schema
convertToTypeInfo
java
apache/flink
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroSchemaConverter.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroSchemaConverter.java
Apache-2.0
public static DataType convertToDataType(String avroSchemaString) { return convertToDataType(avroSchemaString, true); }
Converts an Avro schema string into a nested row structure with deterministic field order and data types that are compatible with Flink's Table & SQL API. @param avroSchemaString Avro schema definition string @return data type matching the schema
convertToDataType
java
apache/flink
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroSchemaConverter.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroSchemaConverter.java
Apache-2.0
public static Schema convertToSchema(LogicalType logicalType, String rowName) { return convertToSchema(logicalType, rowName, true); }
Converts Flink SQL {@link LogicalType} (can be nested) into an Avro schema. <p>The "{rowName}_" is used as the nested row type name prefix in order to generate the right schema. Nested record type that only differs with type name is still compatible. @param logicalType logical type @param rowName the record name @return Avro's {@link Schema} matching this logical type.
convertToSchema
java
apache/flink
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroSchemaConverter.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroSchemaConverter.java
Apache-2.0
public void setBuffer(byte[] buf) { this.buf = buf; this.pos = 0; this.count = buf.length; }
Set buffer that can be read via the InputStream interface and reset the input stream. This has the same effect as creating a new ByteArrayInputStream with a new buffer. @param buf the new buffer to read.
setBuffer
java
apache/flink
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/utils/MutableByteArrayInputStream.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/utils/MutableByteArrayInputStream.java
Apache-2.0
@Override public String[] additionalProperties() { List<String> ret = new ArrayList<>(); ret.add("'format'='avro'"); return ret.toArray(new String[0]); }
ITCase to test avro format for {@link AvroFileSystemFormatFactory} in stream mode.
additionalProperties
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/AvroFilesystemStreamITCase.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/AvroFilesystemStreamITCase.java
Apache-2.0
@Test void testTypeExtraction() { try { InputFormat<MyAvroType, ?> format = new AvroInputFormat<MyAvroType>( new Path("file:///ignore/this/file"), MyAvroType.class); TypeInformation<?> typeInfoDirect = TypeExtractor.getInputFormatTypes(format); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<MyAvroType> input = env.createInput(format); TypeInformation<?> typeInfoDataSet = input.getType(); assertThat(typeInfoDirect).isInstanceOf(PojoTypeInfo.class); assertThat(typeInfoDataSet).isInstanceOf(PojoTypeInfo.class); assertThat(typeInfoDirect.getTypeClass()).isEqualTo(MyAvroType.class); assertThat(typeInfoDataSet.getTypeClass()).isEqualTo(MyAvroType.class); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Tests for the type extraction of the {@link AvroInputFormat}.
testTypeExtraction
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/AvroInputFormatTypeExtractionTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/AvroInputFormatTypeExtractionTest.java
Apache-2.0
private void writeDefaultKryoRegistrations(String filePath) throws IOException { final File file = new File(filePath); if (file.exists()) { assertThat(file.delete()).isTrue(); } final Kryo kryo = new KryoSerializer<>(Integer.class, new SerializerConfigImpl()).getKryo(); final int nextId = kryo.getNextRegistrationId(); try (BufferedWriter writer = new BufferedWriter(new FileWriter(file))) { for (int i = 0; i < nextId; i++) { Registration registration = kryo.getRegistration(i); String str = registration.getId() + "," + registration.getType().getName(); writer.write(str, 0, str.length()); writer.newLine(); } System.out.println("Created file with registrations at " + file.getAbsolutePath()); } }
Creates a Kryo serializer and writes the default registrations out to a comma separated file with one entry per line: <pre> id,class </pre> <p>The produced file is used to check that the registered IDs don't change in future Flink versions. <p>This method is not used in the tests, but documents how the test file has been created and can be used to re-create it if needed. @param filePath File path to write registrations to
writeDefaultKryoRegistrations
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/AvroKryoSerializerRegistrationsTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/AvroKryoSerializerRegistrationsTest.java
Apache-2.0
@Test void testDeserializeToGenericType() throws IOException { DatumReader<GenericData.Record> datumReader = new GenericDatumReader<>(userSchema); try (FileReader<GenericData.Record> dataFileReader = DataFileReader.openReader(testFile, datumReader)) { // initialize Record by reading it from disk (that's easier than creating it by hand) GenericData.Record rec = new GenericData.Record(userSchema); dataFileReader.next(rec); // check if record has been read correctly assertThat(rec).isNotNull(); assertThat(rec.get("name").toString()).isEqualTo(TEST_NAME); assertThat(rec.get("type_enum").toString()).isEqualTo(TEST_ENUM_COLOR.toString()); assertThat(rec.get("type_long_test")).isNull(); // it is null for the first record. // now serialize it with our framework: TypeInformation<GenericData.Record> te = TypeExtractor.createTypeInfo(GenericData.Record.class); ExecutionConfig ec = new ExecutionConfig(); assertThat(te).isExactlyInstanceOf(GenericTypeInfo.class); Serializers.recursivelyRegisterType( te.getTypeClass(), ec.getSerializerConfig(), new HashSet<>()); TypeSerializer<GenericData.Record> tser = te.createSerializer(ec.getSerializerConfig()); assertThat(ec.getSerializerConfig().getDefaultKryoSerializerClasses()) .hasSize(1) .containsEntry( Schema.class, AvroKryoSerializerUtils.AvroSchemaSerializer.class); ByteArrayOutputStream out = new ByteArrayOutputStream(); try (DataOutputViewStreamWrapper outView = new DataOutputViewStreamWrapper(out)) { tser.serialize(rec, outView); } GenericData.Record newRec; try (DataInputViewStreamWrapper inView = new DataInputViewStreamWrapper(new ByteArrayInputStream(out.toByteArray()))) { newRec = tser.deserialize(inView); } // check if it is still the same assertThat(newRec).isNotNull(); assertThat(newRec.get("name").toString()).isEqualTo(TEST_NAME); assertThat(newRec.get("type_enum").toString()).isEqualTo(TEST_ENUM_COLOR.toString()); assertThat(newRec.get("type_long_test")).isNull(); } }
Test if the Flink serialization is able to properly process GenericData.Record types. Usually users of Avro generate classes (POJOs) from Avro schemas. However, if generated classes are not available, one can also use GenericData.Record. It is an untyped key-value record which is using a schema to validate the correctness of the data. <p>It is not recommended to use GenericData.Record with Flink. Use generated POJOs instead.
testDeserializeToGenericType
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/AvroRecordInputFormatTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/AvroRecordInputFormatTest.java
Apache-2.0
@Test void testDeserializeToSpecificType() throws IOException { DatumReader<User> datumReader = new SpecificDatumReader<>(userSchema); try (FileReader<User> dataFileReader = DataFileReader.openReader(testFile, datumReader)) { User rec = dataFileReader.next(); // check if record has been read correctly assertThat(rec).isNotNull(); assertThat(rec.get("name").toString()).isEqualTo(TEST_NAME); assertThat(rec.get("type_enum").toString()).isEqualTo(TEST_ENUM_COLOR.toString()); // now serialize it with our framework: ExecutionConfig ec = new ExecutionConfig(); TypeInformation<User> te = TypeExtractor.createTypeInfo(User.class); assertThat(te).isExactlyInstanceOf(AvroTypeInfo.class); TypeSerializer<User> tser = te.createSerializer(ec.getSerializerConfig()); ByteArrayOutputStream out = new ByteArrayOutputStream(); try (DataOutputViewStreamWrapper outView = new DataOutputViewStreamWrapper(out)) { tser.serialize(rec, outView); } User newRec; try (DataInputViewStreamWrapper inView = new DataInputViewStreamWrapper(new ByteArrayInputStream(out.toByteArray()))) { newRec = tser.deserialize(inView); } // check if it is still the same assertThat(newRec).isNotNull(); assertThat(newRec.get("name").toString()).isEqualTo(TEST_NAME); assertThat(newRec.get("type_enum").toString()).isEqualTo(TEST_ENUM_COLOR.toString()); } }
This test validates proper serialization with specific (generated POJO) types.
testDeserializeToSpecificType
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/AvroRecordInputFormatTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/AvroRecordInputFormatTest.java
Apache-2.0
@Test void testDeserializationGenericRecord() throws IOException { Configuration parameters = new Configuration(); AvroInputFormat<GenericRecord> format = new AvroInputFormat<>(new Path(testFile.getAbsolutePath()), GenericRecord.class); doTestDeserializationGenericRecord(format, parameters); }
Test if the AvroInputFormat is able to properly read data from an Avro file as a GenericRecord.
testDeserializationGenericRecord
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/AvroRecordInputFormatTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/AvroRecordInputFormatTest.java
Apache-2.0
@SuppressWarnings("unchecked") private void doTestDeserializationGenericRecord( final AvroInputFormat<GenericRecord> format, final Configuration parameters) throws IOException { try { format.configure(parameters); FileInputSplit[] splits = format.createInputSplits(1); assertThat(splits).hasSize(1); format.open(splits[0]); GenericRecord u = format.nextRecord(null); assertThat(u).isNotNull(); assertThat(u.getSchema()).isEqualTo(userSchema); String name = u.get("name").toString(); assertThat(name).isEqualTo(TEST_NAME); // check arrays List<CharSequence> sl = (List<CharSequence>) u.get("type_array_string"); assertThat(sl.get(0).toString()).isEqualTo(TEST_ARRAY_STRING_1); assertThat(sl.get(1).toString()).isEqualTo(TEST_ARRAY_STRING_2); List<Boolean> bl = (List<Boolean>) u.get("type_array_boolean"); assertThat(bl).containsExactly(TEST_ARRAY_BOOLEAN_1, TEST_ARRAY_BOOLEAN_2); // check enums GenericData.EnumSymbol enumValue = (GenericData.EnumSymbol) u.get("type_enum"); assertThat(enumValue).isEqualTo(TEST_ENUM_COLOR); // check maps Map<CharSequence, Long> lm = (Map<CharSequence, Long>) u.get("type_map"); assertThat(lm) .containsOnly( entry(new Utf8(TEST_MAP_KEY1), TEST_MAP_VALUE1), entry(new Utf8(TEST_MAP_KEY2), TEST_MAP_VALUE2)); assertThat(format.reachedEnd()).as("expecting second element").isFalse(); assertThat(format.nextRecord(u)).as("expecting second element").isNotNull(); assertThat(format.nextRecord(u)).isNull(); assertThat(format.reachedEnd()).isTrue(); } finally { format.close(); } }
Helper method to test GenericRecord serialisation. @param format the format to test @param parameters the configuration to use @throws IOException thrown id there is a issue
doTestDeserializationGenericRecord
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/AvroRecordInputFormatTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/AvroRecordInputFormatTest.java
Apache-2.0
@Test void testDataTypeToSchemaToDataTypeNullable() { DataType dataType = DataTypes.ROW( DataTypes.FIELD("f_null", DataTypes.NULL()), DataTypes.FIELD("f_boolean", DataTypes.BOOLEAN()), // tinyint and smallint all convert to int DataTypes.FIELD("f_int", DataTypes.INT()), DataTypes.FIELD("f_bigint", DataTypes.BIGINT()), DataTypes.FIELD("f_float", DataTypes.FLOAT()), DataTypes.FIELD("f_double", DataTypes.DOUBLE()), // char converts to string DataTypes.FIELD("f_string", DataTypes.STRING()), // binary converts to bytes DataTypes.FIELD("f_varbinary", DataTypes.BYTES()), DataTypes.FIELD("f_timestamp", DataTypes.TIMESTAMP(3)), DataTypes.FIELD("f_date", DataTypes.DATE()), DataTypes.FIELD("f_time", DataTypes.TIME(3)), DataTypes.FIELD("f_decimal", DataTypes.DECIMAL(10, 0)), DataTypes.FIELD( "f_row", DataTypes.ROW( DataTypes.FIELD("f0", DataTypes.INT()), DataTypes.FIELD("f1", DataTypes.TIMESTAMP(3)))), // multiset converts to map // map key is always not null DataTypes.FIELD( "f_map", DataTypes.MAP(DataTypes.STRING().notNull(), DataTypes.INT())), DataTypes.FIELD("f_array", DataTypes.ARRAY(DataTypes.INT()))); Schema schema = AvroSchemaConverter.convertToSchema(dataType.getLogicalType()); DataType converted = AvroSchemaConverter.convertToDataType(schema.toString()); assertThat(converted).isEqualTo(dataType); }
Test convert nullable data type to Avro schema then converts back.
testDataTypeToSchemaToDataTypeNullable
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/typeutils/AvroSchemaConverterTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/typeutils/AvroSchemaConverterTest.java
Apache-2.0
@Test void testDataTypeToSchemaToDataTypeNonNullable() { DataType dataType = DataTypes.ROW( DataTypes.FIELD("f_boolean", DataTypes.BOOLEAN().notNull()), // tinyint and smallint all convert to int DataTypes.FIELD("f_int", DataTypes.INT().notNull()), DataTypes.FIELD("f_bigint", DataTypes.BIGINT().notNull()), DataTypes.FIELD("f_float", DataTypes.FLOAT().notNull()), DataTypes.FIELD("f_double", DataTypes.DOUBLE().notNull()), // char converts to string DataTypes.FIELD("f_string", DataTypes.STRING().notNull()), // binary converts to bytes DataTypes.FIELD("f_varbinary", DataTypes.BYTES().notNull()), DataTypes.FIELD("f_timestamp", DataTypes.TIMESTAMP(3).notNull()), DataTypes.FIELD("f_date", DataTypes.DATE().notNull()), DataTypes.FIELD("f_time", DataTypes.TIME(3).notNull()), DataTypes.FIELD("f_decimal", DataTypes.DECIMAL(10, 0).notNull()), DataTypes.FIELD( "f_row", DataTypes.ROW( DataTypes.FIELD( "f0", DataTypes.INT().notNull()), DataTypes.FIELD( "f1", DataTypes.TIMESTAMP(3).notNull())) .notNull()), // multiset converts to map // map key is always not null DataTypes.FIELD( "f_map", DataTypes.MAP( DataTypes.STRING().notNull(), DataTypes.INT().notNull()) .notNull()), DataTypes.FIELD( "f_array", DataTypes.ARRAY(DataTypes.INT().notNull()).notNull())) .notNull(); Schema schema = AvroSchemaConverter.convertToSchema(dataType.getLogicalType()); DataType converted = AvroSchemaConverter.convertToDataType(schema.toString()); assertThat(converted).isEqualTo(dataType); }
Test convert non-nullable data type to Avro schema then converts back.
testDataTypeToSchemaToDataTypeNonNullable
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/typeutils/AvroSchemaConverterTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/typeutils/AvroSchemaConverterTest.java
Apache-2.0
@Test void testSchemaToDataTypeToSchemaNullable() { String schemaStr = "{\n" + " \"type\" : \"record\",\n" + " \"name\" : \"record\",\n" + " \"namespace\" : \"org.apache.flink.avro.generated\",\n" + " \"fields\" : [ {\n" + " \"name\" : \"f_null\",\n" + " \"type\" : \"null\",\n" + " \"default\" : null\n" + " }, {\n" + " \"name\" : \"f_boolean\",\n" + " \"type\" : [ \"null\", \"boolean\" ],\n" + " \"default\" : null\n" + " }, {\n" + " \"name\" : \"f_int\",\n" + " \"type\" : [ \"null\", \"int\" ],\n" + " \"default\" : null\n" + " }, {\n" + " \"name\" : \"f_bigint\",\n" + " \"type\" : [ \"null\", \"long\" ],\n" + " \"default\" : null\n" + " }, {\n" + " \"name\" : \"f_float\",\n" + " \"type\" : [ \"null\", \"float\" ],\n" + " \"default\" : null\n" + " }, {\n" + " \"name\" : \"f_double\",\n" + " \"type\" : [ \"null\", \"double\" ],\n" + " \"default\" : null\n" + " }, {\n" + " \"name\" : \"f_string\",\n" + " \"type\" : [ \"null\", \"string\" ],\n" + " \"default\" : null\n" + " }, {\n" + " \"name\" : \"f_varbinary\",\n" + " \"type\" : [ \"null\", \"bytes\" ],\n" + " \"default\" : null\n" + " }, {\n" + " \"name\" : \"f_timestamp\",\n" + " \"type\" : [ \"null\", {\n" + " \"type\" : \"long\",\n" + " \"logicalType\" : \"timestamp-millis\"\n" + " } ],\n" + " \"default\" : null\n" + " }, {\n" + " \"name\" : \"f_date\",\n" + " \"type\" : [ \"null\", {\n" + " \"type\" : \"int\",\n" + " \"logicalType\" : \"date\"\n" + " } ],\n" + " \"default\" : null\n" + " }, {\n" + " \"name\" : \"f_time\",\n" + " \"type\" : [ \"null\", {\n" + " \"type\" : \"int\",\n" + " \"logicalType\" : \"time-millis\"\n" + " } ],\n" + " \"default\" : null\n" + " }, {\n" + " \"name\" : \"f_decimal\",\n" + " \"type\" : [ \"null\", {\n" + " \"type\" : \"bytes\",\n" + " \"logicalType\" : \"decimal\",\n" + " \"precision\" : 10,\n" + " \"scale\" : 0\n" + " } ],\n" + " \"default\" : null\n" + " }, {\n" + " \"name\" : \"f_row\",\n" + " \"type\" : [ \"null\", {\n" + " \"type\" : \"record\",\n" + " \"name\" : \"record_f_row\",\n" + " \"fields\" : [ {\n" + " \"name\" : \"f0\",\n" + " \"type\" : [ \"null\", \"int\" ],\n" + " \"default\" : null\n" + " }, {\n" + " \"name\" : \"f1\",\n" + " \"type\" : [ \"null\", {\n" + " \"type\" : \"long\",\n" + " \"logicalType\" : \"timestamp-millis\"\n" + " } ],\n" + " \"default\" : null\n" + " } ]\n" + " } ],\n" + " \"default\" : null\n" + " }, {\n" + " \"name\" : \"f_map\",\n" + " \"type\" : [ \"null\", {\n" + " \"type\" : \"map\",\n" + " \"values\" : [ \"null\", \"int\" ]\n" + " } ],\n" + " \"default\" : null\n" + " }, {\n" + " \"name\" : \"f_array\",\n" + " \"type\" : [ \"null\", {\n" + " \"type\" : \"array\",\n" + " \"items\" : [ \"null\", \"int\" ]\n" + " } ],\n" + " \"default\" : null\n" + " } ]\n" + "}"; DataType dataType = AvroSchemaConverter.convertToDataType(schemaStr); Schema schema = AvroSchemaConverter.convertToSchema(dataType.getLogicalType()); assertThat(schema).isEqualTo(new Schema.Parser().parse(schemaStr)); }
Test convert nullable Avro schema to data type then converts back.
testSchemaToDataTypeToSchemaNullable
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/typeutils/AvroSchemaConverterTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/typeutils/AvroSchemaConverterTest.java
Apache-2.0
public Collection<TestSpecification<?, ?>> createTestSpecifications(FlinkVersion flinkVersion) throws Exception { ArrayList<TestSpecification<?, ?>> testSpecifications = new ArrayList<>(); testSpecifications.add( new TestSpecification<>( "generic-avro-serializer", flinkVersion, GenericAvroSerializerSetup.class, GenericAvroSerializerVerifier.class)); testSpecifications.add( new TestSpecification<>( "specific-avro-serializer", flinkVersion, SpecificAvroSerializerSetup.class, SpecificAvroSerializerVerifier.class)); return testSpecifications; }
Tests based on {@link TypeSerializerUpgradeTestBase} for the {@link AvroSerializer}.
createTestSpecifications
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/typeutils/AvroSerializerUpgradeTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/typeutils/AvroSerializerUpgradeTest.java
Apache-2.0
public static Tuple3<Class<? extends SpecificRecord>, SpecificRecord, Row> getSpecificTestData() { final Address addr = Address.newBuilder() .setNum(42) .setStreet("Main Street 42") .setCity("Test City") .setState("Test State") .setZip("12345") .build(); final Row rowAddr = new Row(5); rowAddr.setField(0, 42); rowAddr.setField(1, "Main Street 42"); rowAddr.setField(2, "Test City"); rowAddr.setField(3, "Test State"); rowAddr.setField(4, "12345"); final User user = User.newBuilder() .setName("Charlie") .setFavoriteNumber(null) .setFavoriteColor("blue") .setTypeLongTest(1337L) .setTypeDoubleTest(1.337d) .setTypeNullTest(null) .setTypeBoolTest(false) .setTypeArrayString(Arrays.asList("hello", "world")) .setTypeArrayBoolean(Arrays.asList(true, true, false)) .setTypeNullableArray(null) .setTypeEnum(Colors.RED) .setTypeMap(Collections.singletonMap("test", 12L)) .setTypeFixed( new Fixed16( new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 })) .setTypeUnion(12.0) .setTypeNested(addr) .setTypeBytes(ByteBuffer.allocate(10)) .setTypeDate(LocalDate.parse("2014-03-01")) .setTypeTimeMillis(LocalTime.parse("12:12:12")) .setTypeTimeMicros( LocalTime.ofSecondOfDay(0).plus(123456L, ChronoUnit.MICROS)) .setTypeTimestampMillis(Instant.parse("2014-03-01T12:12:12.321Z")) .setTypeTimestampMicros( Instant.ofEpochSecond(0).plus(123456L, ChronoUnit.MICROS)) // byte array must contain the two's-complement representation of the // unscaled integer value in big-endian byte order .setTypeDecimalBytes( ByteBuffer.wrap( BigDecimal.valueOf(2000, 2).unscaledValue().toByteArray())) // array of length n can store at most // Math.floor(Math.log10(Math.pow(2, 8 * n - 1) - 1)) // base-10 digits of precision .setTypeDecimalFixed( new Fixed2( BigDecimal.valueOf(2000, 2).unscaledValue().toByteArray())) .build(); final Row rowUser = new Row(23); rowUser.setField(0, "Charlie"); rowUser.setField(1, null); rowUser.setField(2, "blue"); rowUser.setField(3, 1337L); rowUser.setField(4, 1.337d); rowUser.setField(5, null); rowUser.setField(6, false); rowUser.setField(7, new String[] {"hello", "world"}); rowUser.setField(8, new Boolean[] {true, true, false}); rowUser.setField(9, null); rowUser.setField(10, "RED"); rowUser.setField(11, Collections.singletonMap("test", 12L)); rowUser.setField(12, new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); rowUser.setField(13, 12.0); rowUser.setField(14, rowAddr); rowUser.setField(15, new byte[10]); rowUser.setField(16, Date.valueOf("2014-03-01")); rowUser.setField(17, Time.valueOf("12:12:12")); rowUser.setField( 18, Time.valueOf(LocalTime.ofSecondOfDay(0).plus(123456L, ChronoUnit.MICROS))); rowUser.setField(19, Timestamp.valueOf("2014-03-01 12:12:12.321")); rowUser.setField( 20, Timestamp.from(Instant.ofEpochSecond(0).plus(123456L, ChronoUnit.MICROS))); rowUser.setField(21, BigDecimal.valueOf(2000, 2)); rowUser.setField(22, BigDecimal.valueOf(2000, 2)); final Tuple3<Class<? extends SpecificRecord>, SpecificRecord, Row> t = new Tuple3<>(); t.f0 = User.class; t.f1 = user; t.f2 = rowUser; return t; }
Tests all Avro data types as well as nested types for a specific record.
getSpecificTestData
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/utils/AvroTestUtils.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/utils/AvroTestUtils.java
Apache-2.0
public static Schema getLargeSchema() { SchemaBuilder.FieldAssembler<Schema> fields = SchemaBuilder.record("LargeAvroSchema") .namespace(AvroSerializerLargeGenericRecordTest.class.getName()) .fields(); for (int i = 0; i < 10000; ++i) { fields = fields.optionalString("field" + i); } Schema schema = fields.endRecord(); assert schema.toString().length() > 0xFFFF; return schema; }
Craft a large Avro Schema which contains more than 0xFFFF characters. <p>0xFFFF is the magical number that once a java string length is above it, then the serialization scheme changes
getLargeSchema
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/utils/AvroTestUtils.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/utils/AvroTestUtils.java
Apache-2.0
public static Schema getSmallSchema() { return new org.apache.avro.Schema.Parser() .parse( "{\"type\":\"record\",\"name\":\"Dummy\",\"namespace\":\"dummy\",\"fields\": " + "[{\"name\":\"afield\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}}]}"); }
Craft a small Avro Schema which contains less than 0xFFFF characters.
getSmallSchema
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/utils/AvroTestUtils.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/utils/AvroTestUtils.java
Apache-2.0
public static byte[] writeRecord(GenericRecord record, Schema schema) throws IOException { return writeRecord(record, schema, AvroEncoding.BINARY); }
Writes given record using specified schema. @param record record to serialize @param schema schema to use for serialization @return serialized record
writeRecord
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/utils/AvroTestUtils.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/utils/AvroTestUtils.java
Apache-2.0
public static byte[] writeRecord(GenericRecord record, Schema schema, AvroEncoding encoding) throws IOException { ByteArrayOutputStream stream = new ByteArrayOutputStream(); Encoder encoder = createEncoder(encoding, schema, stream); new GenericDatumWriter<>(schema).write(record, encoder); encoder.flush(); return stream.toByteArray(); }
Writes given record using specified schema. @param record record to serialize @param schema schema to use for serialization @param encoding serialization approach to use @return serialized record
writeRecord
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/utils/AvroTestUtils.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/utils/AvroTestUtils.java
Apache-2.0
public static <T extends SpecificRecord> byte[] writeRecord(T record, AvroEncoding encoding) throws IOException { ByteArrayOutputStream stream = new ByteArrayOutputStream(); Encoder encoder = createEncoder(encoding, record.getSchema(), stream); @SuppressWarnings("unchecked") SpecificDatumWriter<T> writer = new SpecificDatumWriter<>((Class<T>) record.getClass()); writer.write(record, encoder); encoder.flush(); return stream.toByteArray(); }
Writes given specific record. @param record record to serialize @return serialized record
writeRecord
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/utils/AvroTestUtils.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/utils/AvroTestUtils.java
Apache-2.0
public static Encoder createEncoder( AvroEncoding encoding, Schema schema, OutputStream outputStream) throws IOException { if (encoding == AvroEncoding.JSON) { return EncoderFactory.get().jsonEncoder(schema, outputStream); } else { return EncoderFactory.get().binaryEncoder(outputStream, null); } }
Creates an Avro encoder using the requested serialization approach.
createEncoder
java
apache/flink
flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/utils/AvroTestUtils.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/utils/AvroTestUtils.java
Apache-2.0
public static ConfluentRegistryAvroDeserializationSchema<GenericRecord> forGeneric( Schema schema, String url) { return forGeneric(schema, url, DEFAULT_IDENTITY_MAP_CAPACITY); }
Creates {@link ConfluentRegistryAvroDeserializationSchema} that produces {@link GenericRecord} using the provided reader schema and looks up the writer schema in the Confluent Schema Registry. <p>By default, this method supports up to 1000 cached schema versions. @param schema schema of produced records @param url url of schema registry to connect @return deserialized record in form of {@link GenericRecord}
forGeneric
java
apache/flink
flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroDeserializationSchema.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroDeserializationSchema.java
Apache-2.0
public static ConfluentRegistryAvroDeserializationSchema<GenericRecord> forGeneric( Schema schema, String url, int identityMapCapacity) { return forGeneric(schema, url, identityMapCapacity, null); }
Creates {@link ConfluentRegistryAvroDeserializationSchema} that produces {@link GenericRecord} using the provided reader schema and looks up the writer schema in the Confluent Schema Registry. @param schema schema of produced records @param url url of schema registry to connect @param identityMapCapacity maximum number of cached schema versions @return deserialized record in form of {@link GenericRecord}
forGeneric
java
apache/flink
flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroDeserializationSchema.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroDeserializationSchema.java
Apache-2.0
public static ConfluentRegistryAvroDeserializationSchema<GenericRecord> forGeneric( Schema schema, String url, @Nullable Map<String, ?> registryConfigs) { return forGeneric(schema, url, DEFAULT_IDENTITY_MAP_CAPACITY, registryConfigs); }
Creates {@link ConfluentRegistryAvroDeserializationSchema} that produces {@link GenericRecord} using the provided reader schema and looks up the writer schema in the Confluent Schema Registry. <p>By default, this method supports up to 1000 cached schema versions. @param schema schema of produced records @param url URL of schema registry to connect @param registryConfigs map with additional schema registry configs (for example SSL properties) @return deserialized record in form of {@link GenericRecord}
forGeneric
java
apache/flink
flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroDeserializationSchema.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroDeserializationSchema.java
Apache-2.0
public static ConfluentRegistryAvroDeserializationSchema<GenericRecord> forGeneric( Schema schema, String url, int identityMapCapacity, @Nullable Map<String, ?> registryConfigs) { return new ConfluentRegistryAvroDeserializationSchema<>( GenericRecord.class, schema, new CachedSchemaCoderProvider(null, url, identityMapCapacity, registryConfigs)); }
Creates {@link ConfluentRegistryAvroDeserializationSchema} that produces {@link GenericRecord} using the provided reader schema and looks up the writer schema in the Confluent Schema Registry. @param schema schema of produced records @param url URL of schema registry to connect @param identityMapCapacity maximum number of cached schema versions @param registryConfigs map with additional schema registry configs (for example SSL properties) @return deserialized record in form of {@link GenericRecord}
forGeneric
java
apache/flink
flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroDeserializationSchema.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroDeserializationSchema.java
Apache-2.0
public static <T extends SpecificRecord> ConfluentRegistryAvroDeserializationSchema<T> forSpecific(Class<T> tClass, String url) { return forSpecific(tClass, url, DEFAULT_IDENTITY_MAP_CAPACITY, null); }
Creates {@link AvroDeserializationSchema} that produces classes that were generated from Avro schema and looks up the writer schema in the Confluent Schema Registry. <p>By default, this method supports up to 1000 cached schema versions. @param tClass class of record to be produced @param url url of schema registry to connect @return deserialized record
forSpecific
java
apache/flink
flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroDeserializationSchema.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroDeserializationSchema.java
Apache-2.0
public static <T extends SpecificRecord> ConfluentRegistryAvroDeserializationSchema<T> forSpecific( Class<T> tClass, String url, int identityMapCapacity) { return forSpecific(tClass, url, identityMapCapacity, null); }
Creates {@link AvroDeserializationSchema} that produces classes that were generated from Avro schema and looks up the writer schema in the Confluent Schema Registry. @param tClass class of record to be produced @param url url of schema registry to connect @param identityMapCapacity maximum number of cached schema versions @return deserialized record
forSpecific
java
apache/flink
flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroDeserializationSchema.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroDeserializationSchema.java
Apache-2.0
public static <T extends SpecificRecord> ConfluentRegistryAvroDeserializationSchema<T> forSpecific( Class<T> tClass, String url, @Nullable Map<String, ?> registryConfigs) { return forSpecific(tClass, url, DEFAULT_IDENTITY_MAP_CAPACITY, registryConfigs); }
Creates {@link AvroDeserializationSchema} that produces classes that were generated from Avro schema and looks up the writer schema in the Confluent Schema Registry. <p>By default, this method supports up to 1000 cached schema versions. @param tClass class of record to be produced @param url URL of schema registry to connect @param registryConfigs map with additional schema registry configs (for example SSL properties) @return deserialized record
forSpecific
java
apache/flink
flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroDeserializationSchema.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroDeserializationSchema.java
Apache-2.0
public static <T extends SpecificRecord> ConfluentRegistryAvroDeserializationSchema<T> forSpecific( Class<T> tClass, String url, int identityMapCapacity, @Nullable Map<String, ?> registryConfigs) { return new ConfluentRegistryAvroDeserializationSchema<>( tClass, null, new CachedSchemaCoderProvider(null, url, identityMapCapacity, registryConfigs)); }
Creates {@link AvroDeserializationSchema} that produces classes that were generated from Avro schema and looks up the writer schema in the Confluent Schema Registry. @param tClass class of record to be produced @param url URL of schema registry to connect @param identityMapCapacity maximum number of cached schema versions @param registryConfigs map with additional schema registry configs (for example SSL properties) @return deserialized record
forSpecific
java
apache/flink
flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroDeserializationSchema.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroDeserializationSchema.java
Apache-2.0
public static <T extends SpecificRecord> ConfluentRegistryAvroSerializationSchema<T> forSpecific( Class<T> tClass, String subject, String schemaRegistryUrl) { return forSpecific(tClass, subject, schemaRegistryUrl, null); }
Creates {@link AvroSerializationSchema} that produces byte arrays that were generated from Avro schema and writes the writer schema to Confluent Schema Registry. @param tClass the type to be serialized @param subject subject of schema registry to produce @param schemaRegistryUrl URL of schema registry to connect @return serialized record
forSpecific
java
apache/flink
flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroSerializationSchema.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroSerializationSchema.java
Apache-2.0
public static <T extends SpecificRecord> ConfluentRegistryAvroSerializationSchema<T> forSpecific( Class<T> tClass, String subject, String schemaRegistryUrl, @Nullable Map<String, ?> registryConfigs) { return new ConfluentRegistryAvroSerializationSchema<>( tClass, null, new CachedSchemaCoderProvider( subject, schemaRegistryUrl, DEFAULT_IDENTITY_MAP_CAPACITY, registryConfigs)); }
Creates {@link AvroSerializationSchema} that produces byte arrays that were generated from Avro schema and writes the writer schema to Confluent Schema Registry. @param tClass the type to be serialized @param subject subject of schema registry to produce @param schemaRegistryUrl URL of schema registry to connect @param registryConfigs map with additional schema registry configs (for example SSL properties) @return serialized record
forSpecific
java
apache/flink
flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroSerializationSchema.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroSerializationSchema.java
Apache-2.0
public static ConfluentRegistryAvroSerializationSchema<GenericRecord> forGeneric( String subject, Schema schema, String schemaRegistryUrl) { return forGeneric(subject, schema, schemaRegistryUrl, null); }
Creates {@link AvroSerializationSchema} that produces byte arrays that were generated from Avro schema and writes the writer schema to Confluent Schema Registry. @param subject subject of schema registry to produce @param schema schema that will be used for serialization @param schemaRegistryUrl URL of schema registry to connect @return serialized record
forGeneric
java
apache/flink
flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroSerializationSchema.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroSerializationSchema.java
Apache-2.0
public static ConfluentRegistryAvroSerializationSchema<GenericRecord> forGeneric( String subject, Schema schema, String schemaRegistryUrl, @Nullable Map<String, ?> registryConfigs) { return new ConfluentRegistryAvroSerializationSchema<>( GenericRecord.class, schema, new CachedSchemaCoderProvider( subject, schemaRegistryUrl, DEFAULT_IDENTITY_MAP_CAPACITY, registryConfigs)); }
Creates {@link AvroSerializationSchema} that produces byte arrays that were generated from Avro schema and writes the writer schema to Confluent Schema Registry. @param subject subject of schema registry to produce @param schema schema that will be used for serialization @param schemaRegistryUrl URL of schema registry to connect @param registryConfigs map with additional schema registry configs (for example SSL properties) @return serialized record
forGeneric
java
apache/flink
flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroSerializationSchema.java
https://github.com/apache/flink/blob/master/flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentRegistryAvroSerializationSchema.java
Apache-2.0
public CompressWriterFactory<IN> withHadoopCompression(String codecName) throws IOException { return withHadoopCompression(codecName, new Configuration()); }
Compresses the data using the provided Hadoop {@link CompressionCodec}. @param codecName Simple/complete name or alias of the CompressionCodec @return the instance of CompressionWriterFactory @throws IOException
withHadoopCompression
java
apache/flink
flink-formats/flink-compress/src/main/java/org/apache/flink/formats/compress/CompressWriterFactory.java
https://github.com/apache/flink/blob/master/flink-formats/flink-compress/src/main/java/org/apache/flink/formats/compress/CompressWriterFactory.java
Apache-2.0
public static <IN> CompressWriterFactory<IN> forExtractor(Extractor<IN> extractor) { return new CompressWriterFactory<>(extractor); }
Convenience builder for creating {@link CompressWriterFactory} instances.
forExtractor
java
apache/flink
flink-formats/flink-compress/src/main/java/org/apache/flink/formats/compress/CompressWriters.java
https://github.com/apache/flink/blob/master/flink-formats/flink-compress/src/main/java/org/apache/flink/formats/compress/CompressWriters.java
Apache-2.0
@Override public byte[] extract(T element) { return (element.toString() + System.lineSeparator()).getBytes(); }
A {@link Extractor} implementation that extracts element to string with line separator. @param <T> The type of element to extract from.
extract
java
apache/flink
flink-formats/flink-compress/src/main/java/org/apache/flink/formats/compress/extractor/DefaultExtractor.java
https://github.com/apache/flink/blob/master/flink-formats/flink-compress/src/main/java/org/apache/flink/formats/compress/extractor/DefaultExtractor.java
Apache-2.0
@Override public void open(FileInputSplit split) throws IOException { super.open(split); csvInputStream = stream; long csvStart = splitStart; if (splitStart != 0) { csvStart = findNextLineStartOffset(); } if (splitLength != READ_WHOLE_SPLIT_FLAG) { stream.seek(splitStart + splitLength); long nextLineStartOffset = findNextLineStartOffset(); stream.seek(csvStart); csvInputStream = new BoundedInputStream(stream, nextLineStartOffset - csvStart); } else { stream.seek(csvStart); } }
Input format that reads csv. This abstract class is responsible for cutting the boundary of InputSplit.
open
java
apache/flink
flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/AbstractCsvInputFormat.java
https://github.com/apache/flink/blob/master/flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/AbstractCsvInputFormat.java
Apache-2.0
private long findNextLineStartOffset() throws IOException { boolean usesEscapeChar = csvSchema.usesEscapeChar(); byte[] escapeBytes = Character.toString((char) csvSchema.getEscapeChar()) .getBytes(StandardCharsets.UTF_8); long startPos = stream.getPos(); byte b; while ((b = (byte) stream.read()) != -1) { if (b == '\r' || b == '\n') { // If there may be escape tags ahead if (usesEscapeChar && stream.getPos() - startPos <= escapeBytes.length) { long front = stream.getPos() - escapeBytes.length - 1; if (front > 0) { stream.seek(front); byte[] readBytes = new byte[escapeBytes.length]; stream.read(readBytes); // we have judge front must bigger than zero stream.read(); // back to current next one if (Arrays.equals(escapeBytes, readBytes)) { // equal, we should skip this one line separator continue; } } } long pos = stream.getPos(); // deal with "\r\n", next one maybe '\n', so we need skip it. if (b == '\r' && (byte) stream.read() == '\n') { return stream.getPos(); } else { return pos; } } else if (usesEscapeChar && b == escapeBytes[0]) { boolean equal = true; for (int i = 1; i < escapeBytes.length; i++) { if ((byte) stream.read() != escapeBytes[i]) { equal = false; break; } } if (equal) { // equal, we should skip next one stream.skip(1); } } } return stream.getPos(); }
Find next legal line separator to return next offset (first byte offset of next line). <p>NOTE: Because of the particularity of UTF-8 encoding, we can determine the number of bytes of this character only by comparing the first byte, so we do not need to traverse M*N in comparison.
findNextLineStartOffset
java
apache/flink
flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/AbstractCsvInputFormat.java
https://github.com/apache/flink/blob/master/flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/AbstractCsvInputFormat.java
Apache-2.0
static <T, R, C> CsvBulkWriter<T, R, C> forSchema( CsvMapper mapper, CsvSchema schema, Converter<T, R, C> converter, @Nullable C converterContext, FSDataOutputStream stream) { return new CsvBulkWriter<>(mapper, schema, converter, converterContext, stream); }
Builds a writer with Jackson schema and a type converter. @param mapper The specialized mapper for producing CSV. @param schema The schema that defined the mapping properties. @param converter The type converter that converts incoming elements of type {@code <T>} into elements of type JsonNode. @param stream The output stream. @param <T> The type of the elements accepted by this writer. @param <C> The type of the converter context. @param <R> The type of the elements produced by this writer.
forSchema
java
apache/flink
flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvBulkWriter.java
https://github.com/apache/flink/blob/master/flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvBulkWriter.java
Apache-2.0
static void validateFormatOptions(ReadableConfig tableOptions) { final boolean hasQuoteCharacter = tableOptions.getOptional(QUOTE_CHARACTER).isPresent(); final boolean isDisabledQuoteCharacter = tableOptions.get(DISABLE_QUOTE_CHARACTER); if (isDisabledQuoteCharacter && hasQuoteCharacter) { throw new ValidationException( "Format cannot define a quote character and disabled quote character at the same time."); } // Validate the option value must be a single char. validateCharacterVal(tableOptions, FIELD_DELIMITER, true); validateCharacterVal(tableOptions, ARRAY_ELEMENT_DELIMITER); validateCharacterVal(tableOptions, QUOTE_CHARACTER); validateCharacterVal(tableOptions, ESCAPE_CHARACTER); }
A class with common CSV format constants and utility methods.
validateFormatOptions
java
apache/flink
flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvCommons.java
https://github.com/apache/flink/blob/master/flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvCommons.java
Apache-2.0
private static void validateCharacterVal( ReadableConfig tableOptions, ConfigOption<String> option) { validateCharacterVal(tableOptions, option, false); }
Validates the option {@code option} value must be a Character.
validateCharacterVal
java
apache/flink
flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvCommons.java
https://github.com/apache/flink/blob/master/flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvCommons.java
Apache-2.0
public static <T> CsvReaderFormat<T> forSchema( CsvSchema schema, TypeInformation<T> typeInformation) { return forSchema(JacksonMapperFactory::createCsvMapper, ignored -> schema, typeInformation); }
Builds a new {@code CsvReaderFormat} using a {@code CsvSchema}. @param schema The Jackson CSV schema configured for parsing specific CSV files. @param typeInformation The Flink type descriptor of the returned elements. @param <T> The type of the returned elements.
forSchema
java
apache/flink
flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvReaderFormat.java
https://github.com/apache/flink/blob/master/flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvReaderFormat.java
Apache-2.0
public static <T> CsvReaderFormat<T> forSchema( SerializableSupplier<CsvMapper> mapperFactory, SerializableFunction<CsvMapper, CsvSchema> schemaGenerator, TypeInformation<T> typeInformation) { return new CsvReaderFormat<>( mapperFactory, schemaGenerator, typeInformation.getTypeClass(), (value, context) -> value, typeInformation, false); }
Builds a new {@code CsvReaderFormat} using a {@code CsvSchema} generator and {@code CsvMapper} factory. @param mapperFactory The factory creating the {@code CsvMapper}. @param schemaGenerator A generator that creates and configures the Jackson CSV schema for parsing specific CSV files, from a mapper created by the mapper factory. @param typeInformation The Flink type descriptor of the returned elements. @param <T> The type of the returned elements.
forSchema
java
apache/flink
flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvReaderFormat.java
https://github.com/apache/flink/blob/master/flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvReaderFormat.java
Apache-2.0
public static <T> CsvReaderFormat<T> forPojo(Class<T> pojoType) { return forSchema( JacksonMapperFactory::createCsvMapper, mapper -> mapper.schemaFor(pojoType).withoutQuoteChar(), TypeInformation.of(pojoType)); }
Builds a new {@code CsvReaderFormat} for reading CSV files mapped to the provided POJO class definition. Produced reader uses default mapper and schema settings, use {@code forSchema} if you need customizations. @param pojoType The type class of the POJO. @param <T> The type of the returned elements.
forPojo
java
apache/flink
flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvReaderFormat.java
https://github.com/apache/flink/blob/master/flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvReaderFormat.java
Apache-2.0
public static CsvSchema convert(RowTypeInfo rowType) { final Builder builder = new CsvSchema.Builder(); final String[] fields = rowType.getFieldNames(); final TypeInformation<?>[] types = rowType.getFieldTypes(); for (int i = 0; i < rowType.getArity(); i++) { builder.addColumn(new Column(i, fields[i], convertType(fields[i], types[i]))); } return builder.build(); }
Convert {@link RowTypeInfo} to {@link CsvSchema}.
convert
java
apache/flink
flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvRowSchemaConverter.java
https://github.com/apache/flink/blob/master/flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvRowSchemaConverter.java
Apache-2.0
public static CsvSchema convert(RowType rowType) { Builder builder = new CsvSchema.Builder(); List<RowType.RowField> fields = rowType.getFields(); for (int i = 0; i < rowType.getFieldCount(); i++) { String fieldName = fields.get(i).getName(); LogicalType fieldType = fields.get(i).getType(); builder.addColumn(new Column(i, fieldName, convertType(fieldName, fieldType))); } return builder.build(); }
Convert {@link RowType} to {@link CsvSchema}.
convert
java
apache/flink
flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvRowSchemaConverter.java
https://github.com/apache/flink/blob/master/flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvRowSchemaConverter.java
Apache-2.0
private static CsvSchema.ColumnType convertType(String fieldName, TypeInformation<?> info) { if (STRING_TYPES.contains(info)) { return CsvSchema.ColumnType.STRING; } else if (NUMBER_TYPES.contains(info)) { return CsvSchema.ColumnType.NUMBER; } else if (BOOLEAN_TYPES.contains(info)) { return CsvSchema.ColumnType.BOOLEAN; } else if (info instanceof ObjectArrayTypeInfo) { validateNestedField(fieldName, ((ObjectArrayTypeInfo) info).getComponentInfo()); return CsvSchema.ColumnType.ARRAY; } else if (info instanceof BasicArrayTypeInfo) { validateNestedField(fieldName, ((BasicArrayTypeInfo) info).getComponentInfo()); return CsvSchema.ColumnType.ARRAY; } else if (info instanceof RowTypeInfo) { final TypeInformation<?>[] types = ((RowTypeInfo) info).getFieldTypes(); for (TypeInformation<?> type : types) { validateNestedField(fieldName, type); } return CsvSchema.ColumnType.ARRAY; } else if (info instanceof PrimitiveArrayTypeInfo && ((PrimitiveArrayTypeInfo) info).getComponentType() == Types.BYTE) { return CsvSchema.ColumnType.STRING; } else { throw new IllegalArgumentException( "Unsupported type information '" + info.toString() + "' for field '" + fieldName + "'."); } }
Convert {@link TypeInformation} to {@link CsvSchema.ColumnType} based on Jackson's categories.
convertType
java
apache/flink
flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvRowSchemaConverter.java
https://github.com/apache/flink/blob/master/flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/CsvRowSchemaConverter.java
Apache-2.0
public static TableStats getTableStatistics(List<Path> files) { // For Csv format, it's a heavy operation to obtain accurate statistics by scanning all // files. So, We obtain the estimated statistics by sampling, the specific way is to // sample the first 100 lines and calculate their row size, then compare row size with // total file size to get the estimated row count. final int totalSampleLineCnt = 100; try { long totalFileSize = 0; int sampledRowCnt = 0; long sampledRowSize = 0; for (Path file : files) { FileSystem fs = FileSystem.get(file.toUri()); FileStatus status = fs.getFileStatus(file); totalFileSize += status.getLen(); // sample the line size if (sampledRowCnt < totalSampleLineCnt) { try (InputStreamReader isr = new InputStreamReader( Files.newInputStream(new File(file.toUri()).toPath())); BufferedReader br = new BufferedReader(isr)) { String line; while (sampledRowCnt < totalSampleLineCnt && (line = br.readLine()) != null) { sampledRowCnt += 1; sampledRowSize += (line.getBytes(StandardCharsets.UTF_8).length + 1); } } } } // If line break is "\r\n", br.readLine() will ignore '\n' which make sampledRowSize // smaller than totalFileSize. This will influence test result. if (sampledRowCnt < totalSampleLineCnt) { sampledRowSize = totalFileSize; } if (sampledRowSize == 0) { return TableStats.UNKNOWN; } int realSampledLineCnt = Math.min(totalSampleLineCnt, sampledRowCnt); long estimatedRowCount = totalFileSize * realSampledLineCnt / sampledRowSize; return new TableStats(estimatedRowCount); } catch (Exception e) { LOG.warn("Reporting statistics failed for Csv format: {}", e.getMessage()); return TableStats.UNKNOWN; } }
Utils for Csv format statistics report.
getTableStatistics
java
apache/flink
flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/util/CsvFormatStatisticsReportUtil.java
https://github.com/apache/flink/blob/master/flink-formats/flink-csv/src/main/java/org/apache/flink/formats/csv/util/CsvFormatStatisticsReportUtil.java
Apache-2.0
@Override public String[] additionalProperties() { List<String> ret = new ArrayList<>(); ret.add("'format'='csv'"); // for test purpose ret.add("'sink.rolling-policy.file-size'='1b'"); return ret.toArray(new String[0]); }
ITCase to test csv format for {@link CsvFileFormatFactory} for streaming sink.
additionalProperties
java
apache/flink
flink-formats/flink-csv/src/test/java/org/apache/flink/formats/csv/CsvFilesystemStreamSinkITCase.java
https://github.com/apache/flink/blob/master/flink-formats/flink-csv/src/test/java/org/apache/flink/formats/csv/CsvFilesystemStreamSinkITCase.java
Apache-2.0
public Path newFolder() { return new Path(new Path(miniCluster.getURI() + "/"), UUID.randomUUID().toString()); }
Utility class for testing with HDFS FileSystem.
newFolder
java
apache/flink
flink-formats/flink-hadoop-bulk/src/test/java/org/apache/flink/formats/hadoop/bulk/committer/cluster/HDFSCluster.java
https://github.com/apache/flink/blob/master/flink-formats/flink-hadoop-bulk/src/test/java/org/apache/flink/formats/hadoop/bulk/committer/cluster/HDFSCluster.java
Apache-2.0
public StreamingFileSink<IN> createSink( StreamingFileSink.BucketsBuilder< IN, ?, ? extends StreamingFileSink.BucketsBuilder<IN, ?, ?>> bucketsBuilder, long bucketCheckInterval) { return new StreamingFileSink<>(bucketsBuilder, bucketCheckInterval); }
Factory to create the streaming file sink.
createSink
java
apache/flink
flink-formats/flink-hadoop-bulk/src/test/java/org/apache/flink/streaming/api/functions/sink/filesystem/TestStreamingFileSinkFactory.java
https://github.com/apache/flink/blob/master/flink-formats/flink-hadoop-bulk/src/test/java/org/apache/flink/streaming/api/functions/sink/filesystem/TestStreamingFileSinkFactory.java
Apache-2.0
public static JsonFormatOptions.MapNullKeyMode getMapNullKeyMode(ReadableConfig config) { String mapNullKeyMode = config.get(MAP_NULL_KEY_MODE); switch (mapNullKeyMode.toUpperCase()) { case JSON_MAP_NULL_KEY_MODE_FAIL: return JsonFormatOptions.MapNullKeyMode.FAIL; case JSON_MAP_NULL_KEY_MODE_DROP: return JsonFormatOptions.MapNullKeyMode.DROP; case JSON_MAP_NULL_KEY_MODE_LITERAL: return JsonFormatOptions.MapNullKeyMode.LITERAL; default: throw new TableException( String.format( "Unsupported map null key handling mode '%s'. Validator should have checked that.", mapNullKeyMode)); } }
Creates handling mode for null key map data. <p>See {@link #JSON_MAP_NULL_KEY_MODE_FAIL}, {@link #JSON_MAP_NULL_KEY_MODE_DROP}, and {@link #JSON_MAP_NULL_KEY_MODE_LITERAL} for more information.
getMapNullKeyMode
java
apache/flink
flink-formats/flink-json/src/main/java/org/apache/flink/formats/json/JsonFormatOptionsUtil.java
https://github.com/apache/flink/blob/master/flink-formats/flink-json/src/main/java/org/apache/flink/formats/json/JsonFormatOptionsUtil.java
Apache-2.0
static void validateTimestampFormat(ReadableConfig tableOptions) { String timestampFormat = tableOptions.get(TIMESTAMP_FORMAT); if (!TIMESTAMP_FORMAT_ENUM.contains(timestampFormat)) { throw new ValidationException( String.format( "Unsupported value '%s' for %s. Supported values are [SQL, ISO-8601].", timestampFormat, TIMESTAMP_FORMAT.key())); } }
Validates timestamp format which value should be SQL or ISO-8601.
validateTimestampFormat
java
apache/flink
flink-formats/flink-json/src/main/java/org/apache/flink/formats/json/JsonFormatOptionsUtil.java
https://github.com/apache/flink/blob/master/flink-formats/flink-json/src/main/java/org/apache/flink/formats/json/JsonFormatOptionsUtil.java
Apache-2.0
public static Builder builder( DataType physicalDataType, List<ReadableMetadata> requestedMetadata, TypeInformation<RowData> producedTypeInfo) { return new Builder(physicalDataType, requestedMetadata, producedTypeInfo); }
Creates A builder for building a {@link CanalJsonDeserializationSchema}.
builder
java
apache/flink
flink-formats/flink-json/src/main/java/org/apache/flink/formats/json/canal/CanalJsonDeserializationSchema.java
https://github.com/apache/flink/blob/master/flink-formats/flink-json/src/main/java/org/apache/flink/formats/json/canal/CanalJsonDeserializationSchema.java
Apache-2.0
@Override public String[] additionalProperties() { List<String> ret = new ArrayList<>(); ret.add("'format'='json'"); // for test purpose ret.add("'sink.rolling-policy.file-size'='1b'"); return ret.toArray(new String[0]); }
Test checkpoint for file system table factory with json format.
additionalProperties
java
apache/flink
flink-formats/flink-json/src/test/java/org/apache/flink/formats/json/JsonFsStreamSinkITCase.java
https://github.com/apache/flink/blob/master/flink-formats/flink-json/src/test/java/org/apache/flink/formats/json/JsonFsStreamSinkITCase.java
Apache-2.0
public SerializationSchemaMatcher equalsTo(Row expected) { return new SerializationSchemaResultMatcher( serializationSchema, deserializationSchema, expected); }
Builder for {@link SerializationSchemaMatcher} that can assert results after serialize and deserialize.
equalsTo
java
apache/flink
flink-formats/flink-json/src/test/java/org/apache/flink/formats/utils/SerializationSchemaMatcher.java
https://github.com/apache/flink/blob/master/flink-formats/flink-json/src/test/java/org/apache/flink/formats/utils/SerializationSchemaMatcher.java
Apache-2.0
private Pool<OrcReaderBatch<T, BatchT>> createPoolOfBatches( final SplitT split, final int numBatches) { final Pool<OrcReaderBatch<T, BatchT>> pool = new Pool<>(numBatches); for (int i = 0; i < numBatches; i++) { final OrcVectorizedBatchWrapper<BatchT> orcBatch = shim.createBatchWrapper(schema, batchSize); final OrcReaderBatch<T, BatchT> batch = createReaderBatch(split, orcBatch, pool.recycler(), batchSize); pool.add(batch); } return pool; }
Gets the type produced by this format.
createPoolOfBatches
java
apache/flink
flink-formats/flink-orc/src/main/java/org/apache/flink/orc/AbstractOrcFileInputFormat.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/AbstractOrcFileInputFormat.java
Apache-2.0
public OrcVectorizedBatchWrapper<BatchT> orcVectorizedRowBatch() { return orcVectorizedRowBatch; }
Gets the ORC VectorizedRowBatch structure from this batch.
orcVectorizedRowBatch
java
apache/flink
flink-formats/flink-orc/src/main/java/org/apache/flink/orc/AbstractOrcFileInputFormat.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/AbstractOrcFileInputFormat.java
Apache-2.0
@Override public RecordIterator<RowData> convertAndGetIterator( final OrcVectorizedBatchWrapper<BatchT> orcBatch, final long startingOffset) { // no copying from the ORC column vectors to the Flink columns vectors necessary, // because they point to the same data arrays internally design int batchSize = orcBatch.size(); flinkColumnBatch.setNumRows(batchSize); result.set(batchSize, startingOffset, 0); return result; }
One batch of ORC columnar vectors and Flink column vectors.
convertAndGetIterator
java
apache/flink
flink-formats/flink-orc/src/main/java/org/apache/flink/orc/OrcColumnarRowInputFormat.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/OrcColumnarRowInputFormat.java
Apache-2.0
Object castLiteral(Serializable literal) { switch (literalType) { case LONG: if (literal instanceof Byte) { return new Long((Byte) literal); } else if (literal instanceof Short) { return new Long((Short) literal); } else if (literal instanceof Integer) { return new Long((Integer) literal); } else if (literal instanceof Long) { return literal; } else { throw new IllegalArgumentException( "A predicate on a LONG column requires an integer " + "literal, i.e., Byte, Short, Integer, or Long."); } case FLOAT: if (literal instanceof Float) { return new Double((Float) literal); } else if (literal instanceof Double) { return literal; } else if (literal instanceof BigDecimal) { return ((BigDecimal) literal).doubleValue(); } else { throw new IllegalArgumentException( "A predicate on a FLOAT column requires a floating " + "literal, i.e., Float or Double."); } case STRING: if (literal instanceof String) { return literal; } else { throw new IllegalArgumentException( "A predicate on a STRING column requires a floating " + "literal, i.e., Float or Double."); } case BOOLEAN: if (literal instanceof Boolean) { return literal; } else { throw new IllegalArgumentException( "A predicate on a BOOLEAN column requires a Boolean literal."); } case DATE: if (literal instanceof Date) { return literal; } else { throw new IllegalArgumentException( "A predicate on a DATE column requires a java.sql.Date literal."); } case TIMESTAMP: if (literal instanceof Timestamp) { return literal; } else { throw new IllegalArgumentException( "A predicate on a TIMESTAMP column requires a java.sql.Timestamp literal."); } case DECIMAL: if (literal instanceof BigDecimal) { return new HiveDecimalWritable(HiveDecimal.create((BigDecimal) literal)); } else { throw new IllegalArgumentException( "A predicate on a DECIMAL column requires a BigDecimal literal."); } default: throw new IllegalArgumentException("Unknown literal type " + literalType); } }
A filter predicate that can be evaluated by the OrcInputFormat.
castLiteral
java
apache/flink
flink-formats/flink-orc/src/main/java/org/apache/flink/orc/OrcFilters.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/OrcFilters.java
Apache-2.0
static OrcShim<VectorizedRowBatch> defaultShim() { return new OrcShimV230(); }
Default with orc dependent, we should use v2.3.0.
defaultShim
java
apache/flink
flink-formats/flink-orc/src/main/java/org/apache/flink/orc/shim/OrcShim.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/shim/OrcShim.java
Apache-2.0
public static boolean[] computeProjectionMask(TypeDescription schema, int[] selectedFields) { // mask with all fields of the schema boolean[] projectionMask = new boolean[schema.getMaximumId() + 1]; // for each selected field for (int inIdx : selectedFields) { // set all nested fields of a selected field to true TypeDescription fieldSchema = schema.getChildren().get(inIdx); for (int i = fieldSchema.getId(); i <= fieldSchema.getMaximumId(); i++) { projectionMask[i] = true; } } return projectionMask; }
Computes the ORC projection mask of the fields to include from the selected fields.rowOrcInputFormat.nextRecord(null). @return The ORC projection mask.
computeProjectionMask
java
apache/flink
flink-formats/flink-orc/src/main/java/org/apache/flink/orc/shim/OrcShimV200.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/shim/OrcShimV200.java
Apache-2.0
@Override public boolean nextBatch(RecordReader reader, VectorizedRowBatch rowBatch) throws IOException { return reader.nextBatch(rowBatch); }
Shim orc for Hive version 2.1.0 and upper versions.
nextBatch
java
apache/flink
flink-formats/flink-orc/src/main/java/org/apache/flink/orc/shim/OrcShimV210.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/shim/OrcShimV210.java
Apache-2.0
@Override public ArrayData getArray(int i) { long offset = hiveVector.offsets[i]; long length = hiveVector.lengths[i]; return new ColumnarArrayData(flinkVector, (int) offset, (int) length); }
This column vector is used to adapt hive's ListColumnVector to Flink's ArrayColumnVector.
getArray
java
apache/flink
flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/OrcArrayColumnVector.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/OrcArrayColumnVector.java
Apache-2.0
@Override public MapData getMap(int i) { long offset = hiveVector.offsets[i]; long length = hiveVector.lengths[i]; return new ColumnarMapData(keyFlinkVector, valueFlinkVector, (int) offset, (int) length); }
This column vector is used to adapt hive's MapColumnVector to Flink's MapColumnVector.
getMap
java
apache/flink
flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/OrcMapColumnVector.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/OrcMapColumnVector.java
Apache-2.0
@Override public ColumnarRowData getRow(int i) { this.columnarRowData.setRowId(i); return this.columnarRowData; }
This column vector is used to adapt hive's StructColumnVector to Flink's RowColumnVector.
getRow
java
apache/flink
flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/OrcRowColumnVector.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/OrcRowColumnVector.java
Apache-2.0
private static RowData convert(ArrayData arrayData, LogicalType arrayFieldType) { GenericRowData rowData = new GenericRowData(arrayData.size()); ArrayData.ElementGetter elementGetter = ArrayData.createElementGetter(arrayFieldType); for (int i = 0; i < arrayData.size(); i++) { rowData.setField(i, elementGetter.getElementOrNull(arrayData, i)); } return rowData; }
Converting ArrayData to RowData for calling {@link RowDataVectorizer#setColumn(int, ColumnVector, LogicalType, RowData, int)} recursively with array. @param arrayData input ArrayData. @param arrayFieldType LogicalType of input ArrayData. @return RowData.
convert
java
apache/flink
flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/RowDataVectorizer.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/RowDataVectorizer.java
Apache-2.0
public TypeDescription getSchema() { return this.schema; }
Provides the ORC schema. @return the ORC schema
getSchema
java
apache/flink
flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/Vectorizer.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/Vectorizer.java
Apache-2.0
public void setWriter(Writer writer) { this.writer = writer; }
Users are not supposed to use this method since this is intended to be used only by the {@link OrcBulkWriter}. @param writer the underlying ORC Writer.
setWriter
java
apache/flink
flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/Vectorizer.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/Vectorizer.java
Apache-2.0
public void addUserMetadata(String key, ByteBuffer value) { this.writer.addUserMetadata(key, value); }
Adds arbitrary user metadata to the outgoing ORC file. <p>Users who want to dynamically add new metadata either based on either the input or from an external system can do so by calling <code>addUserMetadata(...)</code> inside the overridden vectorize() method. @param key a key to label the data with. @param value the contents of the metadata.
addUserMetadata
java
apache/flink
flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/Vectorizer.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/Vectorizer.java
Apache-2.0
@Override public void vectorize(Record element, VectorizedRowBatch batch) throws IOException { BytesColumnVector stringVector = (BytesColumnVector) batch.cols[0]; LongColumnVector intColVector = (LongColumnVector) batch.cols[1]; int row = batch.size++; stringVector.setVal(row, element.getName().getBytes(StandardCharsets.UTF_8)); intColVector.vector[row] = element.getAge(); this.addUserMetadata( OrcBulkWriterTestUtil.USER_METADATA_KEY, OrcBulkWriterTestUtil.USER_METADATA_VALUE); }
A Vectorizer implementation used for tests. <p>It transforms an input element which is of type {@link Record} to a VectorizedRowBatch.
vectorize
java
apache/flink
flink-formats/flink-orc/src/test/java/org/apache/flink/orc/vector/RecordVectorizer.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/test/java/org/apache/flink/orc/vector/RecordVectorizer.java
Apache-2.0
private static ArrayData readList(ListColumnVector listVector, int row) { int offset = (int) listVector.offsets[row]; StructColumnVector structChild = (StructColumnVector) listVector.child; BytesColumnVector valueChild = (BytesColumnVector) structChild.fields[0]; StringData value1 = readStringData(valueChild, offset); GenericRowData arrayValue1 = new GenericRowData(1); arrayValue1.setField(0, value1); StringData value2 = readStringData(valueChild, offset + 1); GenericRowData arrayValue2 = new GenericRowData(1); arrayValue2.setField(0, (value2)); return new GenericArrayData(new Object[] {arrayValue1, arrayValue2}); }
Read ListColumnVector with specify schema {@literal array<struct<_col2_col0:string>>}.
readList
java
apache/flink
flink-formats/flink-orc/src/test/java/org/apache/flink/orc/writer/OrcBulkRowDataWriterTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/test/java/org/apache/flink/orc/writer/OrcBulkRowDataWriterTest.java
Apache-2.0
@Test void testOrcBulkWriter(@TempDir File outDir) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); final Properties writerProps = new Properties(); writerProps.setProperty("orc.compress", "LZ4"); final OrcBulkWriterFactory<Record> factory = new OrcBulkWriterFactory<>( new RecordVectorizer(schema), writerProps, new Configuration()); env.setParallelism(1); env.enableCheckpointing(100); DataStream<Record> stream = env.fromSource( TestDataGenerators.fromDataWithSnapshotsLatch( testData, TypeInformation.of(Record.class)), WatermarkStrategy.noWatermarks(), "Test Source"); stream.map(str -> str) .addSink( StreamingFileSink.forBulkFormat(new Path(outDir.toURI()), factory) .withBucketAssigner(new UniqueBucketAssigner<>("test")) .build()); env.execute(); OrcBulkWriterTestUtil.validate(outDir, testData); }
Integration test for writing data in ORC bulk format using StreamingFileSink.
testOrcBulkWriter
java
apache/flink
flink-formats/flink-orc/src/test/java/org/apache/flink/orc/writer/OrcBulkWriterITCase.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/test/java/org/apache/flink/orc/writer/OrcBulkWriterITCase.java
Apache-2.0
@Test void testOrcBulkWriter(@TempDir File outDir) throws Exception { final Properties writerProps = new Properties(); writerProps.setProperty("orc.compress", "LZ4"); final OrcBulkWriterFactory<Record> writer = new OrcBulkWriterFactory<>( new RecordVectorizer(schema), writerProps, new Configuration()); StreamingFileSink<Record> sink = StreamingFileSink.forBulkFormat(new Path(outDir.toURI()), writer) .withBucketAssigner(new UniqueBucketAssigner<>("test")) .withBucketCheckInterval(10000) .build(); try (OneInputStreamOperatorTestHarness<Record, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink), 1, 1, 0)) { testHarness.setup(); testHarness.open(); int time = 0; for (final Record record : input) { testHarness.processElement(record, ++time); } testHarness.snapshot(1, ++time); testHarness.notifyOfCompletedCheckpoint(1); OrcBulkWriterTestUtil.validate(outDir, input); } }
Unit test for the ORC BulkWriter implementation.
testOrcBulkWriter
java
apache/flink
flink-formats/flink-orc/src/test/java/org/apache/flink/orc/writer/OrcBulkWriterTest.java
https://github.com/apache/flink/blob/master/flink-formats/flink-orc/src/test/java/org/apache/flink/orc/writer/OrcBulkWriterTest.java
Apache-2.0
public static <SplitT extends FileSourceSplit> ParquetColumnarRowInputFormat<SplitT> createPartitionedFormat( Configuration hadoopConfig, RowType producedRowType, TypeInformation<RowData> producedTypeInfo, List<String> partitionKeys, PartitionFieldExtractor<SplitT> extractor, int batchSize, boolean isUtcTimestamp, boolean isCaseSensitive) { // TODO FLINK-25113 all this partition keys code should be pruned from the parquet format, // because now FileSystemTableSource uses FileInfoExtractorBulkFormat for reading partition // keys. RowType projectedRowType = new RowType( producedRowType.getFields().stream() .filter(field -> !partitionKeys.contains(field.getName())) .collect(Collectors.toList())); List<String> projectedNames = projectedRowType.getFieldNames(); ColumnBatchFactory<SplitT> factory = (SplitT split, ColumnVector[] parquetVectors) -> { // create and initialize the row batch ColumnVector[] vectors = new ColumnVector[producedRowType.getFieldCount()]; for (int i = 0; i < vectors.length; i++) { RowType.RowField field = producedRowType.getFields().get(i); vectors[i] = partitionKeys.contains(field.getName()) ? createVectorFromConstant( field.getType(), extractor.extract( split, field.getName(), field.getType()), batchSize) : parquetVectors[projectedNames.indexOf(field.getName())]; } return new VectorizedColumnBatch(vectors); }; return new ParquetColumnarRowInputFormat<>( hadoopConfig, projectedRowType, producedTypeInfo, factory, batchSize, isUtcTimestamp, isCaseSensitive); }
Create a partitioned {@link ParquetColumnarRowInputFormat}, the partition columns can be generated by {@link Path}.
createPartitionedFormat
java
apache/flink
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/ParquetColumnarRowInputFormat.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/ParquetColumnarRowInputFormat.java
Apache-2.0
@Nullable @Override public RecordIterator<T> readBatch() throws IOException { final ParquetReaderBatch<T> batch = getCachedEntry(); final long rowsReturnedBefore = rowsReturned; if (!nextBatch(batch)) { batch.recycle(); return null; } final RecordIterator<T> records = batch.convertAndGetIterator(rowsReturnedBefore); // this may leave an exhausted iterator, which is a valid result for this method // and is not interpreted as end-of-input or anything skipRecord(records); return records; }
For each request column, the reader to read this column. This is NULL if this column is missing from the file, in which case we populate the attribute with NULL.
readBatch
java
apache/flink
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/ParquetVectorizedInputFormat.java
https://github.com/apache/flink/blob/master/flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/ParquetVectorizedInputFormat.java
Apache-2.0