index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro/mapreduce/TestKeyValueInput.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import static org.junit.jupiter.api.Assertions.*;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.hadoop.io.AvroKeyValue;
import org.apache.avro.io.DatumReader;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.avro.specific.SpecificDatumReader;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
/**
* Tests that Avro container files of generic records with two fields 'key' and
* 'value' can be read by the AvroKeyValueInputFormat.
*/
public class TestKeyValueInput {
@TempDir
public File mTempDir;
/**
* Creates an Avro file of <docid, text> pairs to use for test input:
*
* +-----+-----------------------+ | KEY | VALUE |
* +-----+-----------------------+ | 1 | "apple banana carrot" | | 2 | "apple
* banana" | | 3 | "apple" | +-----+-----------------------+
*
* @return The avro file.
*/
private File createInputFile() throws IOException {
Schema keyValueSchema = AvroKeyValue.getSchema(Schema.create(Schema.Type.INT), Schema.create(Schema.Type.STRING));
AvroKeyValue<Integer, CharSequence> record1 = new AvroKeyValue<>(new GenericData.Record(keyValueSchema));
record1.setKey(1);
record1.setValue("apple banana carrot");
AvroKeyValue<Integer, CharSequence> record2 = new AvroKeyValue<>(new GenericData.Record(keyValueSchema));
record2.setKey(2);
record2.setValue("apple banana");
AvroKeyValue<Integer, CharSequence> record3 = new AvroKeyValue<>(new GenericData.Record(keyValueSchema));
record3.setKey(3);
record3.setValue("apple");
return AvroFiles.createFile(new File(mTempDir, "inputKeyValues.avro"), keyValueSchema, record1.get(), record2.get(),
record3.get());
}
/** A mapper for indexing documents. */
public static class IndexMapper extends Mapper<AvroKey<Integer>, AvroValue<CharSequence>, Text, IntWritable> {
@Override
protected void map(AvroKey<Integer> docid, AvroValue<CharSequence> body, Context context)
throws IOException, InterruptedException {
for (String token : body.datum().toString().split(" ")) {
context.write(new Text(token), new IntWritable(docid.datum()));
}
}
}
/** A reducer for aggregating token to docid mapping into a hitlist. */
public static class IndexReducer extends Reducer<Text, IntWritable, Text, AvroValue<List<Integer>>> {
@Override
protected void reduce(Text token, Iterable<IntWritable> docids, Context context)
throws IOException, InterruptedException {
List<Integer> hitlist = new ArrayList<>();
for (IntWritable docid : docids) {
hitlist.add(docid.get());
}
context.write(token, new AvroValue<>(hitlist));
}
}
@Test
void keyValueInput() throws ClassNotFoundException, IOException, InterruptedException {
// Create a test input file.
File inputFile = createInputFile();
// Configure the job input.
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job, new Path(inputFile.getAbsolutePath()));
job.setInputFormatClass(AvroKeyValueInputFormat.class);
AvroJob.setInputKeySchema(job, Schema.create(Schema.Type.INT));
AvroJob.setInputValueSchema(job, Schema.create(Schema.Type.STRING));
// Configure a mapper.
job.setMapperClass(IndexMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// Configure a reducer.
job.setReducerClass(IndexReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(AvroValue.class);
AvroJob.setOutputValueSchema(job, Schema.createArray(Schema.create(Schema.Type.INT)));
// Configure the output format.
job.setOutputFormatClass(AvroKeyValueOutputFormat.class);
Path outputPath = new Path(mTempDir.getPath(), "out-index");
FileOutputFormat.setOutputPath(job, outputPath);
// Run the job.
assertTrue(job.waitForCompletion(true));
// Verify that the output Avro container file has the expected data.
File avroFile = new File(outputPath.toString(), "part-r-00000.avro");
DatumReader<GenericRecord> datumReader = new SpecificDatumReader<>(
AvroKeyValue.getSchema(Schema.create(Schema.Type.STRING), Schema.createArray(Schema.create(Schema.Type.INT))));
DataFileReader<GenericRecord> avroFileReader = new DataFileReader<>(avroFile, datumReader);
assertTrue(avroFileReader.hasNext());
AvroKeyValue<CharSequence, List<Integer>> appleRecord = new AvroKeyValue<>(avroFileReader.next());
assertNotNull(appleRecord.get());
assertEquals("apple", appleRecord.getKey().toString());
List<Integer> appleDocs = appleRecord.getValue();
assertEquals(3, appleDocs.size());
assertTrue(appleDocs.contains(1));
assertTrue(appleDocs.contains(2));
assertTrue(appleDocs.contains(3));
assertTrue(avroFileReader.hasNext());
AvroKeyValue<CharSequence, List<Integer>> bananaRecord = new AvroKeyValue<>(avroFileReader.next());
assertNotNull(bananaRecord.get());
assertEquals("banana", bananaRecord.getKey().toString());
List<Integer> bananaDocs = bananaRecord.getValue();
assertEquals(2, bananaDocs.size());
assertTrue(bananaDocs.contains(1));
assertTrue(bananaDocs.contains(2));
assertTrue(avroFileReader.hasNext());
AvroKeyValue<CharSequence, List<Integer>> carrotRecord = new AvroKeyValue<>(avroFileReader.next());
assertEquals("carrot", carrotRecord.getKey().toString());
List<Integer> carrotDocs = carrotRecord.getValue();
assertEquals(1, carrotDocs.size());
assertTrue(carrotDocs.contains(1));
assertFalse(avroFileReader.hasNext());
avroFileReader.close();
}
@Test
void keyValueInputMapOnly() throws ClassNotFoundException, IOException, InterruptedException {
// Create a test input file.
File inputFile = createInputFile();
// Configure the job input.
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job, new Path(inputFile.getAbsolutePath()));
job.setInputFormatClass(AvroKeyValueInputFormat.class);
AvroJob.setInputKeySchema(job, Schema.create(Schema.Type.INT));
AvroJob.setInputValueSchema(job, Schema.create(Schema.Type.STRING));
// Configure the identity mapper.
AvroJob.setMapOutputKeySchema(job, Schema.create(Schema.Type.INT));
AvroJob.setMapOutputValueSchema(job, Schema.create(Schema.Type.STRING));
// Configure zero reducers.
job.setNumReduceTasks(0);
job.setOutputKeyClass(AvroKey.class);
job.setOutputValueClass(AvroValue.class);
// Configure the output format.
job.setOutputFormatClass(AvroKeyValueOutputFormat.class);
Path outputPath = new Path(mTempDir.getPath(), "out-index");
FileOutputFormat.setOutputPath(job, outputPath);
// Run the job.
assertTrue(job.waitForCompletion(true));
// Verify that the output Avro container file has the expected data.
File avroFile = new File(outputPath.toString(), "part-m-00000.avro");
DatumReader<GenericRecord> datumReader = new SpecificDatumReader<>(
AvroKeyValue.getSchema(Schema.create(Schema.Type.INT), Schema.create(Schema.Type.STRING)));
DataFileReader<GenericRecord> avroFileReader = new DataFileReader<>(avroFile, datumReader);
assertTrue(avroFileReader.hasNext());
AvroKeyValue<Integer, CharSequence> record1 = new AvroKeyValue<>(avroFileReader.next());
assertNotNull(record1.get());
assertEquals(1, record1.getKey().intValue());
assertEquals("apple banana carrot", record1.getValue().toString());
assertTrue(avroFileReader.hasNext());
AvroKeyValue<Integer, CharSequence> record2 = new AvroKeyValue<>(avroFileReader.next());
assertNotNull(record2.get());
assertEquals(2, record2.getKey().intValue());
assertEquals("apple banana", record2.getValue().toString());
assertTrue(avroFileReader.hasNext());
AvroKeyValue<Integer, CharSequence> record3 = new AvroKeyValue<>(avroFileReader.next());
assertNotNull(record3.get());
assertEquals(3, record3.getKey().intValue());
assertEquals("apple", record3.getValue().toString());
assertFalse(avroFileReader.hasNext());
avroFileReader.close();
}
}
| 7,000 |
0 | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro/mapreduce/TestAvroKeyRecordWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.*;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import org.apache.avro.Schema;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.file.DataFileStream;
import org.apache.avro.generic.GenericData;
import org.apache.avro.io.DatumReader;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.FsInput;
import org.apache.avro.reflect.ReflectData;
import org.apache.avro.specific.SpecificDatumReader;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.junit.jupiter.api.Test;
public class TestAvroKeyRecordWriter {
@Test
void write() throws IOException {
Schema writerSchema = Schema.create(Schema.Type.INT);
GenericData dataModel = new ReflectData();
CodecFactory compressionCodec = CodecFactory.nullCodec();
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
TaskAttemptContext context = mock(TaskAttemptContext.class);
// Write an avro container file with two records: 1 and 2.
AvroKeyRecordWriter<Integer> recordWriter = new AvroKeyRecordWriter<>(writerSchema, dataModel, compressionCodec,
outputStream);
recordWriter.write(new AvroKey<>(1), NullWritable.get());
recordWriter.write(new AvroKey<>(2), NullWritable.get());
recordWriter.close(context);
// Verify that the file was written as expected.
InputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
Schema readerSchema = Schema.create(Schema.Type.INT);
DatumReader<Integer> datumReader = new SpecificDatumReader<>(readerSchema);
DataFileStream<Integer> dataFileReader = new DataFileStream<>(inputStream, datumReader);
assertTrue(dataFileReader.hasNext()); // Record 1.
assertEquals(1, dataFileReader.next().intValue());
assertTrue(dataFileReader.hasNext()); // Record 2.
assertEquals(2, dataFileReader.next().intValue());
assertFalse(dataFileReader.hasNext()); // No more records.
dataFileReader.close();
verify(context, never()).getConfiguration();
}
@Test
void sycnableWrite() throws IOException {
Schema writerSchema = Schema.create(Schema.Type.INT);
GenericData dataModel = new ReflectData();
CodecFactory compressionCodec = CodecFactory.nullCodec();
FileOutputStream outputStream = new FileOutputStream(new File("target/temp.avro"));
TaskAttemptContext context = mock(TaskAttemptContext.class);
// Write an avro container file with two records: 1 and 2.
AvroKeyRecordWriter<Integer> recordWriter = new AvroKeyRecordWriter<>(writerSchema, dataModel, compressionCodec,
outputStream);
long positionOne = recordWriter.sync();
recordWriter.write(new AvroKey<>(1), NullWritable.get());
long positionTwo = recordWriter.sync();
recordWriter.write(new AvroKey<>(2), NullWritable.get());
recordWriter.close(context);
// Verify that the file was written as expected.
Configuration conf = new Configuration();
conf.set("fs.default.name", "file:///");
Path avroFile = new Path("target/temp.avro");
DataFileReader<GenericData.Record> dataFileReader = new DataFileReader<>(new FsInput(avroFile, conf),
new SpecificDatumReader<>());
dataFileReader.seek(positionTwo);
assertTrue(dataFileReader.hasNext()); // Record 2.
assertEquals(2, dataFileReader.next());
dataFileReader.seek(positionOne);
assertTrue(dataFileReader.hasNext()); // Record 1.
assertEquals(1, dataFileReader.next());
dataFileReader.close();
verify(context, never()).getConfiguration();
}
}
| 7,001 |
0 | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro/mapreduce/TestAvroKeyOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.*;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.avro.Schema;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.file.DataFileConstants;
import org.apache.avro.generic.GenericData;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.reflect.ReflectData;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import org.mockito.ArgumentCaptor;
public class TestAvroKeyOutputFormat {
private static final String SYNC_INTERVAL_KEY = org.apache.avro.mapred.AvroOutputFormat.SYNC_INTERVAL_KEY;
private static final int TEST_SYNC_INTERVAL = 12345;
@TempDir
public File mTempDir;
@Test
void withNullCodec() throws IOException {
Configuration conf = new Configuration();
conf.setInt(SYNC_INTERVAL_KEY, TEST_SYNC_INTERVAL);
testGetRecordWriter(conf, CodecFactory.nullCodec(), TEST_SYNC_INTERVAL);
}
@Test
void withDeflateCodec() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean("mapred.output.compress", true);
conf.setInt(org.apache.avro.mapred.AvroOutputFormat.DEFLATE_LEVEL_KEY, 3);
testGetRecordWriter(conf, CodecFactory.deflateCodec(3), DataFileConstants.DEFAULT_SYNC_INTERVAL);
}
@Test
void withSnappyCode() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean("mapred.output.compress", true);
conf.set(AvroJob.CONF_OUTPUT_CODEC, DataFileConstants.SNAPPY_CODEC);
conf.setInt(SYNC_INTERVAL_KEY, TEST_SYNC_INTERVAL);
testGetRecordWriter(conf, CodecFactory.snappyCodec(), TEST_SYNC_INTERVAL);
}
@Test
void withBZip2Code() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean("mapred.output.compress", true);
conf.set(AvroJob.CONF_OUTPUT_CODEC, DataFileConstants.BZIP2_CODEC);
testGetRecordWriter(conf, CodecFactory.bzip2Codec(), DataFileConstants.DEFAULT_SYNC_INTERVAL);
}
@Test
void withZstandardCode() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean("mapred.output.compress", true);
conf.set(AvroJob.CONF_OUTPUT_CODEC, DataFileConstants.ZSTANDARD_CODEC);
testGetRecordWriter(conf, CodecFactory.zstandardCodec(3), DataFileConstants.DEFAULT_SYNC_INTERVAL);
}
@Test
void withDeflateCodeWithHadoopConfig() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean("mapred.output.compress", true);
conf.set("mapred.output.compression.codec", "org.apache.hadoop.io.compress.DeflateCodec");
conf.setInt(org.apache.avro.mapred.AvroOutputFormat.DEFLATE_LEVEL_KEY, -1);
conf.setInt(SYNC_INTERVAL_KEY, TEST_SYNC_INTERVAL);
testGetRecordWriter(conf, CodecFactory.deflateCodec(-1), TEST_SYNC_INTERVAL);
}
@Test
void withSnappyCodeWithHadoopConfig() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean("mapred.output.compress", true);
conf.set("mapred.output.compression.codec", "org.apache.hadoop.io.compress.SnappyCodec");
testGetRecordWriter(conf, CodecFactory.snappyCodec(), DataFileConstants.DEFAULT_SYNC_INTERVAL);
}
@Test
void withBZip2CodeWithHadoopConfig() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean("mapred.output.compress", true);
conf.set("mapred.output.compression.codec", "org.apache.hadoop.io.compress.BZip2Codec");
conf.setInt(SYNC_INTERVAL_KEY, TEST_SYNC_INTERVAL);
testGetRecordWriter(conf, CodecFactory.bzip2Codec(), TEST_SYNC_INTERVAL);
}
/**
* Tests that the record writer is constructed and returned correctly from the
* output format.
*/
private void testGetRecordWriter(Configuration conf, CodecFactory expectedCodec, int expectedSyncInterval)
throws IOException {
// Configure a mock task attempt context.
Job job = Job.getInstance(conf);
job.getConfiguration().set("mapred.output.dir", mTempDir.getPath());
Schema writerSchema = Schema.create(Schema.Type.INT);
AvroJob.setOutputKeySchema(job, writerSchema);
TaskAttemptContext context = mock(TaskAttemptContext.class);
when(context.getConfiguration()).thenReturn(job.getConfiguration());
when(context.getTaskAttemptID()).thenReturn(TaskAttemptID.forName("attempt_200707121733_0001_m_000000_0"));
when(context.getNumReduceTasks()).thenReturn(1);
// Create a mock record writer.
@SuppressWarnings("unchecked")
RecordWriter<AvroKey<Integer>, NullWritable> expectedRecordWriter = mock(RecordWriter.class);
AvroKeyOutputFormat.RecordWriterFactory recordWriterFactory = mock(AvroKeyOutputFormat.RecordWriterFactory.class);
// when the record writer factory to be called with appropriate parameters.
ArgumentCaptor<CodecFactory> capturedCodecFactory = ArgumentCaptor.forClass(CodecFactory.class);
when(recordWriterFactory.create(eq(writerSchema), any(GenericData.class), capturedCodecFactory.capture(), // Capture
// for
// comparison
// later.
any(OutputStream.class), eq(expectedSyncInterval))).thenReturn(expectedRecordWriter);
AvroKeyOutputFormat<Integer> outputFormat = new AvroKeyOutputFormat<>(recordWriterFactory);
RecordWriter<AvroKey<Integer>, NullWritable> recordWriter = outputFormat.getRecordWriter(context);
// Make sure the expected codec was used.
assertNotNull(capturedCodecFactory.getValue());
assertEquals(expectedCodec.toString(), capturedCodecFactory.getValue().toString());
verify(context, atLeastOnce()).getConfiguration();
verify(recordWriterFactory).create(eq(writerSchema), any(ReflectData.class), any(CodecFactory.class),
any(OutputStream.class), anyInt());
assertNotNull(recordWriter);
assertSame(expectedRecordWriter, recordWriter);
}
}
| 7,002 |
0 | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro/mapreduce/TestCombineAvroKeyValueFileInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapreduce;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.hadoop.io.AvroKeyValue;
import org.apache.avro.io.DatumReader;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.avro.specific.SpecificDatumReader;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.*;
public class TestCombineAvroKeyValueFileInputFormat {
/** A temporary directory for test data. */
@TempDir
public File mTempDir;
/**
* Verifies that avro records can be read in multi files.
*/
@Test
void readRecords() throws IOException, InterruptedException, ClassNotFoundException {
Schema keyValueSchema = AvroKeyValue.getSchema(Schema.create(Schema.Type.INT), Schema.create(Schema.Type.STRING));
AvroKeyValue<Integer, CharSequence> record1 = new AvroKeyValue<>(new GenericData.Record(keyValueSchema));
record1.setKey(1);
record1.setValue("apple banana carrot");
AvroFiles.createFile(new File(mTempDir, "combineSplit00.avro"), keyValueSchema, record1.get());
AvroKeyValue<Integer, CharSequence> record2 = new AvroKeyValue<>(new GenericData.Record(keyValueSchema));
record2.setKey(2);
record2.setValue("apple banana");
AvroFiles.createFile(new File(mTempDir, "combineSplit01.avro"), keyValueSchema, record2.get());
// Configure the job input.
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job, new Path(mTempDir.getAbsolutePath()));
job.setInputFormatClass(CombineAvroKeyValueFileInputFormat.class);
AvroJob.setInputKeySchema(job, Schema.create(Schema.Type.INT));
AvroJob.setInputValueSchema(job, Schema.create(Schema.Type.STRING));
// Configure the identity mapper.
AvroJob.setMapOutputKeySchema(job, Schema.create(Schema.Type.INT));
AvroJob.setMapOutputValueSchema(job, Schema.create(Schema.Type.STRING));
// Configure zero reducers.
job.setNumReduceTasks(0);
job.setOutputKeyClass(AvroKey.class);
job.setOutputValueClass(AvroValue.class);
// Configure the output format.
job.setOutputFormatClass(AvroKeyValueOutputFormat.class);
Path outputPath = new Path(mTempDir.getPath(), "out");
FileOutputFormat.setOutputPath(job, outputPath);
// Run the job.
assertTrue(job.waitForCompletion(true));
// Verify that the output Avro container file has the expected data.
File avroFile = new File(outputPath.toString(), "part-m-00000.avro");
DatumReader<GenericRecord> datumReader = new SpecificDatumReader<>(
AvroKeyValue.getSchema(Schema.create(Schema.Type.INT), Schema.create(Schema.Type.STRING)));
DataFileReader<GenericRecord> avroFileReader = new DataFileReader<>(avroFile, datumReader);
assertTrue(avroFileReader.hasNext());
while (avroFileReader.hasNext()) {
AvroKeyValue<Integer, CharSequence> mapRecord1 = new AvroKeyValue<>(avroFileReader.next());
assertNotNull(mapRecord1.get());
if (mapRecord1.getKey().intValue() == 1) {
assertEquals("apple banana carrot", mapRecord1.getValue().toString());
} else if (mapRecord1.getKey().intValue() == 2) {
assertEquals("apple banana", mapRecord1.getValue().toString());
} else {
fail("Unknown key " + mapRecord1.getKey().intValue());
}
}
avroFileReader.close();
}
}
| 7,003 |
0 | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro/mapreduce/TestFsInput.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.hamcrest.MatcherAssert.assertThat;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.nio.charset.StandardCharsets;
import org.apache.avro.mapred.FsInput;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
public class TestFsInput {
private static File file;
private static final String FILE_CONTENTS = "abcdefghijklmnopqrstuvwxyz";
private Configuration conf;
private FsInput fsInput;
@TempDir
public File DIR;
@BeforeEach
public void setUp() throws Exception {
conf = new Configuration();
conf.set("fs.default.name", "file:///");
file = new File(DIR, "file.txt");
try (
PrintWriter out = new PrintWriter(new OutputStreamWriter(new FileOutputStream(file), StandardCharsets.UTF_8))) {
out.print(FILE_CONTENTS);
}
fsInput = new FsInput(new Path(file.getPath()), conf);
}
@AfterEach
public void tearDown() throws Exception {
if (fsInput != null) {
fsInput.close();
}
}
@Test
void configurationConstructor() throws Exception {
try (FsInput in = new FsInput(new Path(file.getPath()), conf)) {
int expectedByteCount = 1;
byte[] readBytes = new byte[expectedByteCount];
int actualByteCount = fsInput.read(readBytes, 0, expectedByteCount);
assertThat(actualByteCount, is(equalTo(expectedByteCount)));
}
}
@Test
void fileSystemConstructor() throws Exception {
Path path = new Path(file.getPath());
FileSystem fs = path.getFileSystem(conf);
try (FsInput in = new FsInput(path, fs)) {
int expectedByteCount = 1;
byte[] readBytes = new byte[expectedByteCount];
int actualByteCount = fsInput.read(readBytes, 0, expectedByteCount);
assertThat(actualByteCount, is(equalTo(expectedByteCount)));
}
}
@Test
void length() throws IOException {
assertEquals(fsInput.length(), FILE_CONTENTS.length());
}
@Test
void read() throws Exception {
byte[] expectedBytes = FILE_CONTENTS.getBytes(StandardCharsets.UTF_8);
byte[] actualBytes = new byte[expectedBytes.length];
int actualByteCount = fsInput.read(actualBytes, 0, actualBytes.length);
assertThat(actualBytes, is(equalTo(expectedBytes)));
assertThat(actualByteCount, is(equalTo(expectedBytes.length)));
}
@Test
void seek() throws Exception {
int seekPos = FILE_CONTENTS.length() / 2;
byte[] fileContentBytes = FILE_CONTENTS.getBytes(StandardCharsets.UTF_8);
byte expectedByte = fileContentBytes[seekPos];
fsInput.seek(seekPos);
byte[] readBytes = new byte[1];
fsInput.read(readBytes, 0, 1);
byte actualByte = readBytes[0];
assertThat(actualByte, is(equalTo(expectedByte)));
}
@Test
void tell() throws Exception {
long expectedTellPos = FILE_CONTENTS.length() / 2;
fsInput.seek(expectedTellPos);
long actualTellPos = fsInput.tell();
assertThat(actualTellPos, is(equalTo(expectedTellPos)));
}
}
| 7,004 |
0 | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro/mapreduce/TestWordCount.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import static org.junit.jupiter.api.Assertions.*;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.HashMap;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.avro.mapred.FsInput;
import org.apache.avro.specific.SpecificDatumReader;
import org.apache.avro.reflect.ReflectData;
import org.apache.avro.reflect.ReflectDatumReader;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
public class TestWordCount {
@TempDir
public File tmpFolder;
public static final Schema STATS_SCHEMA = new Schema.Parser().parse("{\"name\":\"stats\",\"type\":\"record\","
+ "\"fields\":[{\"name\":\"count\",\"type\":\"int\"}," + "{\"name\":\"name\",\"type\":\"string\"}]}");
public static class ReflectStats {
String name;
int count;
}
// permit data written as SpecficStats to be read as ReflectStats
private static Schema REFLECT_STATS_SCHEMA = ReflectData.get().getSchema(ReflectStats.class);
static {
REFLECT_STATS_SCHEMA.addAlias(TextStats.SCHEMA$.getFullName());
}
private static class LineCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
private IntWritable mOne;
@Override
protected void setup(Context context) {
mOne = new IntWritable(1);
}
@Override
protected void map(LongWritable fileByteOffset, Text line, Context context)
throws IOException, InterruptedException {
context.write(line, mOne);
}
}
private static class StatCountMapper extends Mapper<AvroKey<TextStats>, NullWritable, Text, IntWritable> {
private IntWritable mCount;
private Text mText;
@Override
protected void setup(Context context) {
mCount = new IntWritable(0);
mText = new Text("");
}
@Override
protected void map(AvroKey<TextStats> record, NullWritable ignore, Context context)
throws IOException, InterruptedException {
mCount.set(record.datum().getCount());
mText.set(record.datum().getName().toString());
context.write(mText, mCount);
}
}
private static class ReflectCountMapper extends Mapper<AvroKey<ReflectStats>, NullWritable, Text, IntWritable> {
private IntWritable mCount;
private Text mText;
@Override
protected void setup(Context context) {
mCount = new IntWritable(0);
mText = new Text("");
}
@Override
protected void map(AvroKey<ReflectStats> record, NullWritable ignore, Context context)
throws IOException, InterruptedException {
mCount.set(record.datum().count);
mText.set(record.datum().name);
context.write(mText, mCount);
}
}
private static class AvroSumReducer extends Reducer<Text, IntWritable, AvroKey<CharSequence>, AvroValue<Integer>> {
@Override
protected void reduce(Text key, Iterable<IntWritable> counts, Context context)
throws IOException, InterruptedException {
int sum = 0;
for (IntWritable count : counts) {
sum += count.get();
}
context.write(new AvroKey<>(key.toString()), new AvroValue<>(sum));
}
}
private static class GenericStatsReducer
extends Reducer<Text, IntWritable, AvroKey<GenericData.Record>, NullWritable> {
private AvroKey<GenericData.Record> mStats;
@Override
protected void setup(Context context) {
mStats = new AvroKey<>(null);
}
@Override
protected void reduce(Text line, Iterable<IntWritable> counts, Context context)
throws IOException, InterruptedException {
GenericData.Record record = new GenericData.Record(STATS_SCHEMA);
int sum = 0;
for (IntWritable count : counts) {
sum += count.get();
}
record.put("name", new Utf8(line.toString()));
record.put("count", sum);
mStats.datum(record);
context.write(mStats, NullWritable.get());
}
}
private static class SpecificStatsReducer extends Reducer<Text, IntWritable, AvroKey<TextStats>, NullWritable> {
private AvroKey<TextStats> mStats;
@Override
protected void setup(Context context) {
mStats = new AvroKey<>(null);
}
@Override
protected void reduce(Text line, Iterable<IntWritable> counts, Context context)
throws IOException, InterruptedException {
TextStats record = new TextStats();
record.setCount(0);
for (IntWritable count : counts) {
record.setCount(record.getCount() + count.get());
}
record.setName(line.toString());
mStats.datum(record);
context.write(mStats, NullWritable.get());
}
}
private static class ReflectStatsReducer extends Reducer<Text, IntWritable, AvroKey<ReflectStats>, NullWritable> {
private AvroKey<ReflectStats> mStats;
@Override
protected void setup(Context context) {
mStats = new AvroKey<>(null);
}
@Override
protected void reduce(Text line, Iterable<IntWritable> counts, Context context)
throws IOException, InterruptedException {
ReflectStats record = new ReflectStats();
record.count = 0;
for (IntWritable count : counts) {
record.count += count.get();
}
record.name = line.toString();
mStats.datum(record);
context.write(mStats, NullWritable.get());
}
}
private static class SortMapper extends Mapper<AvroKey<TextStats>, NullWritable, AvroKey<TextStats>, NullWritable> {
@Override
protected void map(AvroKey<TextStats> key, NullWritable value, Context context)
throws IOException, InterruptedException {
context.write(key, value);
}
}
private static class SortReducer extends Reducer<AvroKey<TextStats>, NullWritable, AvroKey<TextStats>, NullWritable> {
@Override
protected void reduce(AvroKey<TextStats> key, Iterable<NullWritable> ignore, Context context)
throws IOException, InterruptedException {
context.write(key, NullWritable.get());
}
}
@Test
void avroGenericOutput() throws Exception {
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job,
new Path(getClass().getResource("/org/apache/avro/mapreduce/mapreduce-test-input.txt").toURI().toString()));
job.setInputFormatClass(TextInputFormat.class);
job.setMapperClass(LineCountMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setReducerClass(GenericStatsReducer.class);
AvroJob.setOutputKeySchema(job, STATS_SCHEMA);
job.setOutputFormatClass(AvroKeyOutputFormat.class);
Path outputPath = new Path(tmpFolder.getPath() + "/out-generic");
FileOutputFormat.setOutputPath(job, outputPath);
assertTrue(job.waitForCompletion(true));
// Check that the results from the MapReduce were as expected.
FileSystem fileSystem = FileSystem.get(job.getConfiguration());
FileStatus[] outputFiles = fileSystem.globStatus(outputPath.suffix("/part-*"));
assertEquals(1, outputFiles.length);
DataFileReader<GenericData.Record> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new GenericDatumReader<>(STATS_SCHEMA));
Map<String, Integer> counts = new HashMap<>();
for (GenericData.Record record : reader) {
counts.put(((Utf8) record.get("name")).toString(), (Integer) record.get("count"));
}
reader.close();
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
}
@Test
void avroSpecificOutput() throws Exception {
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job,
new Path(getClass().getResource("/org/apache/avro/mapreduce/mapreduce-test-input.txt").toURI().toString()));
job.setInputFormatClass(TextInputFormat.class);
job.setMapperClass(LineCountMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setReducerClass(SpecificStatsReducer.class);
AvroJob.setOutputKeySchema(job, TextStats.SCHEMA$);
job.setOutputFormatClass(AvroKeyOutputFormat.class);
Path outputPath = new Path(tmpFolder.getPath() + "/out-specific");
FileOutputFormat.setOutputPath(job, outputPath);
assertTrue(job.waitForCompletion(true));
// Check that the results from the MapReduce were as expected.
FileSystem fileSystem = FileSystem.get(job.getConfiguration());
FileStatus[] outputFiles = fileSystem.globStatus(outputPath.suffix("/part-*"));
assertEquals(1, outputFiles.length);
DataFileReader<TextStats> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new SpecificDatumReader<>());
Map<String, Integer> counts = new HashMap<>();
for (TextStats record : reader) {
counts.put(record.getName().toString(), record.getCount());
}
reader.close();
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
}
@Test
void avroReflectOutput() throws Exception {
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job,
new Path(getClass().getResource("/org/apache/avro/mapreduce/mapreduce-test-input.txt").toURI().toString()));
job.setInputFormatClass(TextInputFormat.class);
job.setMapperClass(LineCountMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setReducerClass(ReflectStatsReducer.class);
AvroJob.setOutputKeySchema(job, REFLECT_STATS_SCHEMA);
job.setOutputFormatClass(AvroKeyOutputFormat.class);
Path outputPath = new Path(tmpFolder.getPath() + "/out-reflect");
FileOutputFormat.setOutputPath(job, outputPath);
assertTrue(job.waitForCompletion(true));
// Check that the results from the MapReduce were as expected.
FileSystem fileSystem = FileSystem.get(job.getConfiguration());
FileStatus[] outputFiles = fileSystem.globStatus(outputPath.suffix("/part-*"));
assertEquals(1, outputFiles.length);
DataFileReader<ReflectStats> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new ReflectDatumReader<>());
Map<String, Integer> counts = new HashMap<>();
for (ReflectStats record : reader) {
counts.put(record.name, record.count);
}
reader.close();
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
}
@Test
void avroInput() throws Exception {
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job,
new Path(getClass().getResource("/org/apache/avro/mapreduce/mapreduce-test-input.avro").toURI().toString()));
job.setInputFormatClass(AvroKeyInputFormat.class);
AvroJob.setInputKeySchema(job, TextStats.SCHEMA$);
job.setMapperClass(StatCountMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setReducerClass(SpecificStatsReducer.class);
AvroJob.setOutputKeySchema(job, TextStats.SCHEMA$);
job.setOutputFormatClass(AvroKeyOutputFormat.class);
Path outputPath = new Path(tmpFolder.getPath() + "/out-specific-input");
FileOutputFormat.setOutputPath(job, outputPath);
assertTrue(job.waitForCompletion(true));
// Check that the results from the MapReduce were as expected.
FileSystem fileSystem = FileSystem.get(job.getConfiguration());
FileStatus[] outputFiles = fileSystem.globStatus(outputPath.suffix("/part-*"));
assertEquals(1, outputFiles.length);
DataFileReader<TextStats> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new SpecificDatumReader<>());
Map<String, Integer> counts = new HashMap<>();
for (TextStats record : reader) {
counts.put(record.getName().toString(), record.getCount());
}
reader.close();
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
}
@Test
void reflectInput() throws Exception {
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job,
new Path(getClass().getResource("/org/apache/avro/mapreduce/mapreduce-test-input.avro").toURI().toString()));
job.setInputFormatClass(AvroKeyInputFormat.class);
AvroJob.setInputKeySchema(job, REFLECT_STATS_SCHEMA);
job.setMapperClass(ReflectCountMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setReducerClass(ReflectStatsReducer.class);
AvroJob.setOutputKeySchema(job, REFLECT_STATS_SCHEMA);
job.setOutputFormatClass(AvroKeyOutputFormat.class);
Path outputPath = new Path(tmpFolder.getPath() + "/out-reflect-input");
FileOutputFormat.setOutputPath(job, outputPath);
assertTrue(job.waitForCompletion(true));
// Check that the results from the MapReduce were as expected.
FileSystem fileSystem = FileSystem.get(job.getConfiguration());
FileStatus[] outputFiles = fileSystem.globStatus(outputPath.suffix("/part-*"));
assertEquals(1, outputFiles.length);
DataFileReader<ReflectStats> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new ReflectDatumReader<>());
Map<String, Integer> counts = new HashMap<>();
for (ReflectStats record : reader) {
counts.put(record.name, record.count);
}
reader.close();
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
}
@Test
void avroMapOutput() throws Exception {
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job,
new Path(getClass().getResource("/org/apache/avro/mapreduce/mapreduce-test-input.avro").toURI().toString()));
job.setInputFormatClass(AvroKeyInputFormat.class);
AvroJob.setInputKeySchema(job, TextStats.SCHEMA$);
job.setMapperClass(SortMapper.class);
AvroJob.setMapOutputKeySchema(job, TextStats.SCHEMA$);
job.setMapOutputValueClass(NullWritable.class);
job.setReducerClass(SortReducer.class);
AvroJob.setOutputKeySchema(job, TextStats.SCHEMA$);
job.setOutputFormatClass(AvroKeyOutputFormat.class);
Path outputPath = new Path(tmpFolder.getPath() + "/out-specific-input");
FileOutputFormat.setOutputPath(job, outputPath);
assertTrue(job.waitForCompletion(true));
// Check that the results from the MapReduce were as expected.
FileSystem fileSystem = FileSystem.get(job.getConfiguration());
FileStatus[] outputFiles = fileSystem.globStatus(outputPath.suffix("/part-*"));
assertEquals(1, outputFiles.length);
DataFileReader<TextStats> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new SpecificDatumReader<>());
Map<String, Integer> counts = new HashMap<>();
for (TextStats record : reader) {
counts.put(record.getName().toString(), record.getCount());
}
reader.close();
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
}
/**
* Tests the MR output to text files when using AvroKey and AvroValue records.
*/
@Test
void avroUsingTextFileOutput() throws Exception {
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job,
new Path(getClass().getResource("/org/apache/avro/mapreduce/mapreduce-test-input.txt").toURI().toString()));
job.setInputFormatClass(TextInputFormat.class);
job.setMapperClass(LineCountMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setReducerClass(AvroSumReducer.class);
AvroJob.setOutputKeySchema(job, Schema.create(Schema.Type.STRING));
AvroJob.setOutputValueSchema(job, Schema.create(Schema.Type.INT));
job.setOutputFormatClass(TextOutputFormat.class);
Path outputPath = new Path(tmpFolder.getPath() + "/out-text");
FileOutputFormat.setOutputPath(job, outputPath);
assertTrue(job.waitForCompletion(true));
// Check that the results from the MapReduce were as expected.
FileSystem fileSystem = FileSystem.get(job.getConfiguration());
FileStatus[] outputFiles = fileSystem.globStatus(outputPath.suffix("/part-*"));
assertEquals(1, outputFiles.length);
Path filePath = outputFiles[0].getPath();
InputStream inputStream = filePath.getFileSystem(job.getConfiguration()).open(filePath);
assertNotNull(inputStream);
try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream))) {
assertTrue(reader.ready());
assertEquals("apple\t3", reader.readLine());
assertEquals("banana\t2", reader.readLine());
assertEquals("carrot\t1", reader.readLine());
assertFalse(reader.ready());
}
}
}
| 7,005 |
0 | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro/mapreduce/TestAvroMultipleOutputsSyncable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapreduce;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.FsInput;
import org.apache.avro.specific.SpecificDatumReader;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
public class TestAvroMultipleOutputsSyncable {
@TempDir
public File tmpFolder;
public static final Schema STATS_SCHEMA = new Schema.Parser().parse("{\"name\":\"stats\",\"type\":\"record\","
+ "\"fields\":[{\"name\":\"count\",\"type\":\"int\"}," + "{\"name\":\"name\",\"type\":\"string\"}]}");
public static final Schema STATS_SCHEMA_2 = new Schema.Parser().parse("{\"name\":\"stats\",\"type\":\"record\","
+ "\"fields\":[{\"name\":\"count1\",\"type\":\"int\"}," + "{\"name\":\"name1\",\"type\":\"string\"}]}");
private static class LineCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
private IntWritable mOne;
@Override
protected void setup(Context context) {
mOne = new IntWritable(1);
}
@Override
protected void map(LongWritable fileByteOffset, Text line, Context context)
throws IOException, InterruptedException {
context.write(line, mOne);
}
}
private static class StatCountMapper extends Mapper<AvroKey<TextStats>, NullWritable, Text, IntWritable> {
private IntWritable mCount;
private Text mText;
@Override
protected void setup(Context context) {
mCount = new IntWritable(0);
mText = new Text("");
}
@Override
protected void map(AvroKey<TextStats> record, NullWritable ignore, Context context)
throws IOException, InterruptedException {
mCount.set(record.datum().getCount());
mText.set(record.datum().getName().toString());
context.write(mText, mCount);
}
}
private static class GenericStatsReducer
extends Reducer<Text, IntWritable, AvroKey<GenericData.Record>, NullWritable> {
private AvroKey<GenericData.Record> mStats;
private AvroMultipleOutputs amos;
@Override
protected void setup(Context context) {
mStats = new AvroKey<>(null);
amos = new AvroMultipleOutputs(context);
}
@Override
protected void reduce(Text line, Iterable<IntWritable> counts, Context context)
throws IOException, InterruptedException {
GenericData.Record record = new GenericData.Record(STATS_SCHEMA);
GenericData.Record record2 = new GenericData.Record(STATS_SCHEMA_2);
int sum = 0;
for (IntWritable count : counts) {
sum += count.get();
}
record.put("name", new Utf8(line.toString()));
record.put("count", sum);
mStats.datum(record);
context.write(mStats, NullWritable.get());
amos.sync("myavro", "myavro");
amos.write("myavro", mStats, NullWritable.get());
record2.put("name1", new Utf8(line.toString()));
record2.put("count1", sum);
mStats.datum(record2);
amos.write(mStats, NullWritable.get(), STATS_SCHEMA_2, null, "testnewwrite2");
amos.sync("myavro1", "myavro1");
amos.write("myavro1", mStats);
amos.write(mStats, NullWritable.get(), STATS_SCHEMA, null, "testnewwrite");
amos.write(mStats, NullWritable.get(), "testwritenonschema");
}
@Override
protected void cleanup(Context context) throws IOException, InterruptedException {
amos.close();
}
}
private static class SpecificStatsReducer extends Reducer<Text, IntWritable, AvroKey<TextStats>, NullWritable> {
private AvroKey<TextStats> mStats;
private AvroMultipleOutputs amos;
@Override
protected void setup(Context context) {
mStats = new AvroKey<>(null);
amos = new AvroMultipleOutputs(context);
}
@Override
protected void reduce(Text line, Iterable<IntWritable> counts, Context context)
throws IOException, InterruptedException {
TextStats record = new TextStats();
record.setCount(0);
for (IntWritable count : counts) {
record.setCount(record.getCount() + count.get());
}
record.setName(line.toString());
mStats.datum(record);
context.write(mStats, NullWritable.get());
amos.sync("myavro3", "myavro3");
amos.write("myavro3", mStats, NullWritable.get());
}
@Override
protected void cleanup(Context context) throws IOException, InterruptedException {
amos.close();
}
}
private static class SortMapper extends Mapper<AvroKey<TextStats>, NullWritable, AvroKey<TextStats>, NullWritable> {
@Override
protected void map(AvroKey<TextStats> key, NullWritable value, Context context)
throws IOException, InterruptedException {
context.write(key, value);
}
}
private static class SortReducer extends Reducer<AvroKey<TextStats>, NullWritable, AvroKey<TextStats>, NullWritable> {
@Override
protected void reduce(AvroKey<TextStats> key, Iterable<NullWritable> ignore, Context context)
throws IOException, InterruptedException {
context.write(key, NullWritable.get());
}
}
@Test
void avroGenericOutput() throws Exception {
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job,
new Path(getClass().getResource("/org/apache/avro/mapreduce/mapreduce-test-input.txt").toURI().toString()));
job.setInputFormatClass(TextInputFormat.class);
job.setMapperClass(LineCountMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setReducerClass(GenericStatsReducer.class);
AvroJob.setOutputKeySchema(job, STATS_SCHEMA);
AvroMultipleOutputs.addNamedOutput(job, "myavro", AvroKeyOutputFormat.class, STATS_SCHEMA, null);
AvroMultipleOutputs.addNamedOutput(job, "myavro1", AvroKeyOutputFormat.class, STATS_SCHEMA_2);
job.setOutputFormatClass(AvroKeyOutputFormat.class);
Path outputPath = new Path(tmpFolder.getPath() + "/out");
outputPath.getFileSystem(job.getConfiguration()).delete(outputPath, true);
FileOutputFormat.setOutputPath(job, outputPath);
assertTrue(job.waitForCompletion(true));
// Check that the results from the MapReduce were as expected.
FileSystem fileSystem = FileSystem.get(job.getConfiguration());
FileStatus[] outputFiles = fileSystem.globStatus(outputPath.suffix("/myavro-r-00000.avro"));
assertEquals(1, outputFiles.length);
DataFileReader<GenericData.Record> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new GenericDatumReader<>(STATS_SCHEMA));
Map<String, Integer> counts = new HashMap<>();
for (GenericData.Record record : reader) {
counts.put(((Utf8) record.get("name")).toString(), (Integer) record.get("count"));
}
reader.close();
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
outputFiles = fileSystem.globStatus(outputPath.suffix("/myavro1-r-00000.avro"));
assertEquals(1, outputFiles.length);
reader = new DataFileReader<>(new FsInput(outputFiles[0].getPath(), job.getConfiguration()),
new GenericDatumReader<>(STATS_SCHEMA_2));
counts = new HashMap<>();
for (GenericData.Record record : reader) {
counts.put(((Utf8) record.get("name1")).toString(), (Integer) record.get("count1"));
}
reader.close();
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
outputFiles = fileSystem.globStatus(outputPath.suffix("/testnewwrite-r-00000.avro"));
assertEquals(1, outputFiles.length);
reader = new DataFileReader<>(new FsInput(outputFiles[0].getPath(), job.getConfiguration()),
new GenericDatumReader<>(STATS_SCHEMA));
counts = new HashMap<>();
for (GenericData.Record record : reader) {
counts.put(((Utf8) record.get("name")).toString(), (Integer) record.get("count"));
}
reader.close();
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
outputFiles = fileSystem.globStatus(outputPath.suffix("/testnewwrite2-r-00000.avro"));
assertEquals(1, outputFiles.length);
reader = new DataFileReader<>(new FsInput(outputFiles[0].getPath(), job.getConfiguration()),
new GenericDatumReader<>(STATS_SCHEMA_2));
counts = new HashMap<>();
for (GenericData.Record record : reader) {
counts.put(((Utf8) record.get("name1")).toString(), (Integer) record.get("count1"));
}
reader.close();
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
outputFiles = fileSystem.globStatus(outputPath.suffix("/testwritenonschema-r-00000.avro"));
assertEquals(1, outputFiles.length);
reader = new DataFileReader<>(new FsInput(outputFiles[0].getPath(), job.getConfiguration()),
new GenericDatumReader<>(STATS_SCHEMA));
counts = new HashMap<>();
for (GenericData.Record record : reader) {
counts.put(((Utf8) record.get("name")).toString(), (Integer) record.get("count"));
}
reader.close();
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
}
@Test
void avroSpecificOutput() throws Exception {
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job,
new Path(getClass().getResource("/org/apache/avro/mapreduce/mapreduce-test-input.txt").toURI().toString()));
job.setInputFormatClass(TextInputFormat.class);
job.setMapperClass(LineCountMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
AvroMultipleOutputs.addNamedOutput(job, "myavro3", AvroKeyOutputFormat.class, TextStats.SCHEMA$, null);
job.setReducerClass(SpecificStatsReducer.class);
AvroJob.setOutputKeySchema(job, TextStats.SCHEMA$);
job.setOutputFormatClass(AvroKeyOutputFormat.class);
Path outputPath = new Path(tmpFolder.getPath() + "/out-specific");
outputPath.getFileSystem(job.getConfiguration()).delete(outputPath, true);
FileOutputFormat.setOutputPath(job, outputPath);
assertTrue(job.waitForCompletion(true));
FileSystem fileSystem = FileSystem.get(job.getConfiguration());
FileStatus[] outputFiles = fileSystem.globStatus(outputPath.suffix("/myavro3-*"));
assertEquals(1, outputFiles.length);
DataFileReader<TextStats> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new SpecificDatumReader<>());
Map<String, Integer> counts = new HashMap<>();
for (TextStats record : reader) {
counts.put(record.getName().toString(), record.getCount());
}
reader.close();
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
}
@Test
void avroInput() throws Exception {
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job,
new Path(getClass().getResource("/org/apache/avro/mapreduce/mapreduce-test-input.avro").toURI().toString()));
job.setInputFormatClass(AvroKeyInputFormat.class);
AvroJob.setInputKeySchema(job, TextStats.SCHEMA$);
AvroMultipleOutputs.addNamedOutput(job, "myavro3", AvroKeyOutputFormat.class, TextStats.SCHEMA$, null);
job.setMapperClass(StatCountMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setReducerClass(SpecificStatsReducer.class);
AvroJob.setOutputKeySchema(job, TextStats.SCHEMA$);
job.setOutputFormatClass(AvroKeyOutputFormat.class);
Path outputPath = new Path(tmpFolder.getPath() + "/out-specific-input");
FileOutputFormat.setOutputPath(job, outputPath);
assertTrue(job.waitForCompletion(true));
// Check that the results from the MapReduce were as expected.
FileSystem fileSystem = FileSystem.get(job.getConfiguration());
FileStatus[] outputFiles = fileSystem.globStatus(outputPath.suffix("/myavro3-*"));
assertEquals(1, outputFiles.length);
DataFileReader<TextStats> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new SpecificDatumReader<>());
Map<String, Integer> counts = new HashMap<>();
for (TextStats record : reader) {
counts.put(record.getName().toString(), record.getCount());
}
reader.close();
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
}
@Test
void avroMapOutput() throws Exception {
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job,
new Path(getClass().getResource("/org/apache/avro/mapreduce/mapreduce-test-input.avro").toURI().toString()));
job.setInputFormatClass(AvroKeyInputFormat.class);
AvroJob.setInputKeySchema(job, TextStats.SCHEMA$);
job.setMapperClass(SortMapper.class);
AvroJob.setMapOutputKeySchema(job, TextStats.SCHEMA$);
job.setMapOutputValueClass(NullWritable.class);
job.setReducerClass(SortReducer.class);
AvroJob.setOutputKeySchema(job, TextStats.SCHEMA$);
job.setOutputFormatClass(AvroKeyOutputFormat.class);
Path outputPath = new Path(tmpFolder.getPath() + "/out-specific-input");
FileOutputFormat.setOutputPath(job, outputPath);
assertTrue(job.waitForCompletion(true));
// Check that the results from the MapReduce were as expected.
FileSystem fileSystem = FileSystem.get(job.getConfiguration());
FileStatus[] outputFiles = fileSystem.globStatus(outputPath.suffix("/part-*"));
assertEquals(1, outputFiles.length);
DataFileReader<TextStats> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new SpecificDatumReader<>());
Map<String, Integer> counts = new HashMap<>();
for (TextStats record : reader) {
counts.put(record.getName().toString(), record.getCount());
}
reader.close();
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
}
}
| 7,006 |
0 | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro/mapreduce/TestAvroMultipleOutputs.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapreduce;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.FsInput;
import org.apache.avro.specific.SpecificDatumReader;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
public class TestAvroMultipleOutputs {
@TempDir
public File DIR;
public static final Schema STATS_SCHEMA = new Schema.Parser().parse("{\"name\":\"stats\",\"type\":\"record\","
+ "\"fields\":[{\"name\":\"count\",\"type\":\"int\"}," + "{\"name\":\"name\",\"type\":\"string\"}]}");
public static final Schema STATS_SCHEMA_2 = new Schema.Parser().parse("{\"name\":\"stats\",\"type\":\"record\","
+ "\"fields\":[{\"name\":\"count1\",\"type\":\"int\"}," + "{\"name\":\"name1\",\"type\":\"string\"}]}");
private static class LineCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
private IntWritable mOne;
@Override
protected void setup(Context context) {
mOne = new IntWritable(1);
}
@Override
protected void map(LongWritable fileByteOffset, Text line, Context context)
throws IOException, InterruptedException {
context.write(line, mOne);
}
}
private static class StatCountMapper extends Mapper<AvroKey<TextStats>, NullWritable, Text, IntWritable> {
private IntWritable mCount;
private Text mText;
@Override
protected void setup(Context context) {
mCount = new IntWritable(0);
mText = new Text("");
}
@Override
protected void map(AvroKey<TextStats> record, NullWritable ignore, Context context)
throws IOException, InterruptedException {
mCount.set(record.datum().getCount());
mText.set(record.datum().getName().toString());
context.write(mText, mCount);
}
}
private static class GenericStatsReducer
extends Reducer<Text, IntWritable, AvroKey<GenericData.Record>, NullWritable> {
private AvroKey<GenericData.Record> mStats;
private AvroMultipleOutputs amos;
@Override
protected void setup(Context context) {
mStats = new AvroKey<>(null);
amos = new AvroMultipleOutputs(context);
}
@Override
protected void reduce(Text line, Iterable<IntWritable> counts, Context context)
throws IOException, InterruptedException {
GenericData.Record record = new GenericData.Record(STATS_SCHEMA);
GenericData.Record record2 = new GenericData.Record(STATS_SCHEMA_2);
int sum = 0;
for (IntWritable count : counts) {
sum += count.get();
}
record.put("name", new Utf8(line.toString()));
record.put("count", sum);
mStats.datum(record);
context.write(mStats, NullWritable.get());
amos.write("myavro", mStats, NullWritable.get());
record2.put("name1", new Utf8(line.toString()));
record2.put("count1", sum);
mStats.datum(record2);
amos.write(mStats, NullWritable.get(), STATS_SCHEMA_2, null, "testnewwrite2");
amos.write("myavro1", mStats);
amos.write(mStats, NullWritable.get(), STATS_SCHEMA, null, "testnewwrite");
amos.write(mStats, NullWritable.get(), "testwritenonschema");
}
@Override
protected void cleanup(Context context) throws IOException, InterruptedException {
amos.close();
}
}
private static class SpecificStatsReducer extends Reducer<Text, IntWritable, AvroKey<TextStats>, NullWritable> {
private AvroKey<TextStats> mStats;
private AvroMultipleOutputs amos;
@Override
protected void setup(Context context) {
mStats = new AvroKey<>(null);
amos = new AvroMultipleOutputs(context);
}
@Override
protected void reduce(Text line, Iterable<IntWritable> counts, Context context)
throws IOException, InterruptedException {
TextStats record = new TextStats();
record.setCount(0);
for (IntWritable count : counts) {
record.setCount(record.getCount() + count.get());
}
record.setName(line.toString());
mStats.datum(record);
context.write(mStats, NullWritable.get());
amos.write("myavro3", mStats, NullWritable.get());
}
@Override
protected void cleanup(Context context) throws IOException, InterruptedException {
amos.close();
}
}
private static class SortMapper extends Mapper<AvroKey<TextStats>, NullWritable, AvroKey<TextStats>, NullWritable> {
@Override
protected void map(AvroKey<TextStats> key, NullWritable value, Context context)
throws IOException, InterruptedException {
context.write(key, value);
}
}
private static class SortReducer extends Reducer<AvroKey<TextStats>, NullWritable, AvroKey<TextStats>, NullWritable> {
@Override
protected void reduce(AvroKey<TextStats> key, Iterable<NullWritable> ignore, Context context)
throws IOException, InterruptedException {
context.write(key, NullWritable.get());
}
}
@Test
void avroGenericOutput() throws Exception {
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job,
new Path(getClass().getResource("/org/apache/avro/mapreduce/mapreduce-test-input.txt").toURI().toString()));
job.setInputFormatClass(TextInputFormat.class);
job.setMapperClass(LineCountMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setReducerClass(GenericStatsReducer.class);
AvroJob.setOutputKeySchema(job, STATS_SCHEMA);
AvroMultipleOutputs.addNamedOutput(job, "myavro", AvroKeyOutputFormat.class, STATS_SCHEMA, null);
AvroMultipleOutputs.addNamedOutput(job, "myavro1", AvroKeyOutputFormat.class, STATS_SCHEMA_2);
job.setOutputFormatClass(AvroKeyOutputFormat.class);
Path outputPath = new Path(DIR.getPath() + "/testAvroGenericOutput");
outputPath.getFileSystem(job.getConfiguration()).delete(outputPath, true);
FileOutputFormat.setOutputPath(job, outputPath);
assertTrue(job.waitForCompletion(true));
// Check that the results from the MapReduce were as expected.
FileSystem fileSystem = FileSystem.get(job.getConfiguration());
FileStatus[] outputFiles = fileSystem.globStatus(outputPath.suffix("/myavro-r-00000.avro"));
assertEquals(1, outputFiles.length);
Map<String, Integer> counts = new HashMap<>();
try (DataFileReader<GenericData.Record> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new GenericDatumReader<>(STATS_SCHEMA))) {
for (GenericData.Record record : reader) {
counts.put(((Utf8) record.get("name")).toString(), (Integer) record.get("count"));
}
}
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
outputFiles = fileSystem.globStatus(outputPath.suffix("/myavro1-r-00000.avro"));
assertEquals(1, outputFiles.length);
counts.clear();
try (DataFileReader<GenericData.Record> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new GenericDatumReader<>(STATS_SCHEMA_2))) {
for (GenericData.Record record : reader) {
counts.put(((Utf8) record.get("name1")).toString(), (Integer) record.get("count1"));
}
}
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
outputFiles = fileSystem.globStatus(outputPath.suffix("/testnewwrite-r-00000.avro"));
assertEquals(1, outputFiles.length);
counts.clear();
try (DataFileReader<GenericData.Record> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new GenericDatumReader<>(STATS_SCHEMA))) {
for (GenericData.Record record : reader) {
counts.put(((Utf8) record.get("name")).toString(), (Integer) record.get("count"));
}
}
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
outputFiles = fileSystem.globStatus(outputPath.suffix("/testnewwrite2-r-00000.avro"));
assertEquals(1, outputFiles.length);
counts.clear();
try (DataFileReader<GenericData.Record> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new GenericDatumReader<>(STATS_SCHEMA_2))) {
for (GenericData.Record record : reader) {
counts.put(((Utf8) record.get("name1")).toString(), (Integer) record.get("count1"));
}
}
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
outputFiles = fileSystem.globStatus(outputPath.suffix("/testwritenonschema-r-00000.avro"));
assertEquals(1, outputFiles.length);
counts.clear();
try (DataFileReader<GenericData.Record> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new GenericDatumReader<>(STATS_SCHEMA))) {
for (GenericData.Record record : reader) {
counts.put(((Utf8) record.get("name")).toString(), (Integer) record.get("count"));
}
}
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
}
@Test
void avroSpecificOutput() throws Exception {
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job,
new Path(getClass().getResource("/org/apache/avro/mapreduce/mapreduce-test-input.txt").toURI().toString()));
job.setInputFormatClass(TextInputFormat.class);
job.setMapperClass(LineCountMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
AvroMultipleOutputs.addNamedOutput(job, "myavro3", AvroKeyOutputFormat.class, TextStats.SCHEMA$, null);
job.setReducerClass(SpecificStatsReducer.class);
AvroJob.setOutputKeySchema(job, TextStats.SCHEMA$);
job.setOutputFormatClass(AvroKeyOutputFormat.class);
Path outputPath = new Path(DIR.getPath() + "/testAvroSpecificOutput");
outputPath.getFileSystem(job.getConfiguration()).delete(outputPath, true);
FileOutputFormat.setOutputPath(job, outputPath);
assertTrue(job.waitForCompletion(true));
FileSystem fileSystem = FileSystem.get(job.getConfiguration());
FileStatus[] outputFiles = fileSystem.globStatus(outputPath.suffix("/myavro3-*"));
assertEquals(1, outputFiles.length);
Map<String, Integer> counts = new HashMap<>();
try (DataFileReader<TextStats> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new SpecificDatumReader<>())) {
for (TextStats record : reader) {
counts.put(record.getName().toString(), record.getCount());
}
}
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
}
@Test
void avroInput() throws Exception {
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job,
new Path(getClass().getResource("/org/apache/avro/mapreduce/mapreduce-test-input.avro").toURI().toString()));
job.setInputFormatClass(AvroKeyInputFormat.class);
AvroJob.setInputKeySchema(job, TextStats.SCHEMA$);
AvroMultipleOutputs.addNamedOutput(job, "myavro3", AvroKeyOutputFormat.class, TextStats.SCHEMA$, null);
job.setMapperClass(StatCountMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setReducerClass(SpecificStatsReducer.class);
AvroJob.setOutputKeySchema(job, TextStats.SCHEMA$);
job.setOutputFormatClass(AvroKeyOutputFormat.class);
Path outputPath = new Path(DIR.getPath() + "/testAvroInput");
FileOutputFormat.setOutputPath(job, outputPath);
assertTrue(job.waitForCompletion(true));
// Check that the results from the MapReduce were as expected.
FileSystem fileSystem = FileSystem.get(job.getConfiguration());
FileStatus[] outputFiles = fileSystem.globStatus(outputPath.suffix("/myavro3-*"));
assertEquals(1, outputFiles.length);
Map<String, Integer> counts = new HashMap<>();
try (DataFileReader<TextStats> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new SpecificDatumReader<>())) {
for (TextStats record : reader) {
counts.put(record.getName().toString(), record.getCount());
}
}
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
}
@Test
void avroMapOutput() throws Exception {
Job job = Job.getInstance();
FileInputFormat.setInputPaths(job,
new Path(getClass().getResource("/org/apache/avro/mapreduce/mapreduce-test-input.avro").toURI().toString()));
job.setInputFormatClass(AvroKeyInputFormat.class);
AvroJob.setInputKeySchema(job, TextStats.SCHEMA$);
job.setMapperClass(SortMapper.class);
AvroJob.setMapOutputKeySchema(job, TextStats.SCHEMA$);
job.setMapOutputValueClass(NullWritable.class);
job.setReducerClass(SortReducer.class);
AvroJob.setOutputKeySchema(job, TextStats.SCHEMA$);
job.setOutputFormatClass(AvroKeyOutputFormat.class);
Path outputPath = new Path(DIR.getPath() + "/testAvroMapOutput");
FileOutputFormat.setOutputPath(job, outputPath);
assertTrue(job.waitForCompletion(true));
// Check that the results from the MapReduce were as expected.
FileSystem fileSystem = FileSystem.get(job.getConfiguration());
FileStatus[] outputFiles = fileSystem.globStatus(outputPath.suffix("/part-*"));
assertEquals(1, outputFiles.length);
Map<String, Integer> counts = new HashMap<>();
try (DataFileReader<TextStats> reader = new DataFileReader<>(
new FsInput(outputFiles[0].getPath(), job.getConfiguration()), new SpecificDatumReader<>())) {
for (TextStats record : reader) {
counts.put(record.getName().toString(), record.getCount());
}
}
assertEquals(3, counts.get("apple").intValue());
assertEquals(2, counts.get("banana").intValue());
assertEquals(1, counts.get("carrot").intValue());
}
}
| 7,007 |
0 | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro/mapreduce/TestAvroKeyValueRecordWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.*;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.file.DataFileStream;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.hadoop.io.AvroDatumConverter;
import org.apache.avro.hadoop.io.AvroDatumConverterFactory;
import org.apache.avro.hadoop.io.AvroKeyValue;
import org.apache.avro.io.DatumReader;
import org.apache.avro.mapred.AvroValue;
import org.apache.avro.mapred.FsInput;
import org.apache.avro.reflect.ReflectData;
import org.apache.avro.reflect.ReflectDatumReader;
import org.apache.avro.specific.SpecificDatumReader;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.junit.jupiter.api.Test;
public class TestAvroKeyValueRecordWriter {
@Test
void writeRecords() throws IOException {
Job job = Job.getInstance();
AvroJob.setOutputValueSchema(job, TextStats.SCHEMA$);
TaskAttemptContext context = mock(TaskAttemptContext.class);
AvroDatumConverterFactory factory = new AvroDatumConverterFactory(job.getConfiguration());
AvroDatumConverter<Text, ?> keyConverter = factory.create(Text.class);
AvroValue<TextStats> avroValue = new AvroValue<>(null);
@SuppressWarnings("unchecked")
AvroDatumConverter<AvroValue<TextStats>, ?> valueConverter = factory
.create((Class<AvroValue<TextStats>>) avroValue.getClass());
CodecFactory compressionCodec = CodecFactory.nullCodec();
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
// Use a writer to generate a Avro container file in memory.
// Write two records: <'apple', TextStats('apple')> and <'banana',
// TextStats('banana')>.
AvroKeyValueRecordWriter<Text, AvroValue<TextStats>> writer = new AvroKeyValueRecordWriter<>(keyConverter,
valueConverter, new ReflectData(), compressionCodec, outputStream);
TextStats appleStats = new TextStats();
appleStats.setName("apple");
writer.write(new Text("apple"), new AvroValue<>(appleStats));
TextStats bananaStats = new TextStats();
bananaStats.setName("banana");
writer.write(new Text("banana"), new AvroValue<>(bananaStats));
writer.close(context);
ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
Schema readerSchema = AvroKeyValue.getSchema(Schema.create(Schema.Type.STRING), TextStats.SCHEMA$);
DatumReader<GenericRecord> datumReader = new SpecificDatumReader<>(readerSchema);
DataFileStream<GenericRecord> avroFileReader = new DataFileStream<>(inputStream, datumReader);
// Verify that the first record was written.
assertTrue(avroFileReader.hasNext());
AvroKeyValue<CharSequence, TextStats> firstRecord = new AvroKeyValue<>(avroFileReader.next());
assertNotNull(firstRecord.get());
assertEquals("apple", firstRecord.getKey().toString());
assertEquals("apple", firstRecord.getValue().getName().toString());
// Verify that the second record was written;
assertTrue(avroFileReader.hasNext());
AvroKeyValue<CharSequence, TextStats> secondRecord = new AvroKeyValue<>(avroFileReader.next());
assertNotNull(secondRecord.get());
assertEquals("banana", secondRecord.getKey().toString());
assertEquals("banana", secondRecord.getValue().getName().toString());
// That's all, folks.
assertFalse(avroFileReader.hasNext());
avroFileReader.close();
verify(context, never()).getConfiguration();
}
public static class R1 {
String attribute;
}
@Test
void usingReflection() throws Exception {
Job job = Job.getInstance();
Schema schema = ReflectData.get().getSchema(R1.class);
AvroJob.setOutputValueSchema(job, schema);
TaskAttemptContext context = mock(TaskAttemptContext.class);
R1 record = new R1();
record.attribute = "test";
AvroValue<R1> avroValue = new AvroValue<>(record);
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
AvroDatumConverterFactory factory = new AvroDatumConverterFactory(job.getConfiguration());
AvroDatumConverter<Text, ?> keyConverter = factory.create(Text.class);
@SuppressWarnings("unchecked")
AvroDatumConverter<AvroValue<R1>, R1> valueConverter = factory.create((Class<AvroValue<R1>>) avroValue.getClass());
AvroKeyValueRecordWriter<Text, AvroValue<R1>> writer = new AvroKeyValueRecordWriter<>(keyConverter, valueConverter,
new ReflectData(), CodecFactory.nullCodec(), outputStream);
writer.write(new Text("reflectionData"), avroValue);
writer.close(context);
ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
Schema readerSchema = AvroKeyValue.getSchema(Schema.create(Schema.Type.STRING), schema);
DatumReader<GenericRecord> datumReader = new ReflectDatumReader<>(readerSchema);
DataFileStream<GenericRecord> avroFileReader = new DataFileStream<>(inputStream, datumReader);
// Verify that the first record was written.
assertTrue(avroFileReader.hasNext());
// Verify that the record holds the same data that we've written
AvroKeyValue<CharSequence, R1> firstRecord = new AvroKeyValue<>(avroFileReader.next());
assertNotNull(firstRecord.get());
assertEquals("reflectionData", firstRecord.getKey().toString());
assertEquals(record.attribute, firstRecord.getValue().attribute);
avroFileReader.close();
verify(context, never()).getConfiguration();
}
@Test
void syncableWriteRecords() throws IOException {
Job job = Job.getInstance();
AvroJob.setOutputValueSchema(job, TextStats.SCHEMA$);
TaskAttemptContext context = mock(TaskAttemptContext.class);
AvroDatumConverterFactory factory = new AvroDatumConverterFactory(job.getConfiguration());
AvroDatumConverter<Text, ?> keyConverter = factory.create(Text.class);
AvroValue<TextStats> avroValue = new AvroValue<>(null);
@SuppressWarnings("unchecked")
AvroDatumConverter<AvroValue<TextStats>, ?> valueConverter = factory
.create((Class<AvroValue<TextStats>>) avroValue.getClass());
CodecFactory compressionCodec = CodecFactory.nullCodec();
FileOutputStream outputStream = new FileOutputStream(new File("target/temp.avro"));
// Write a marker followed by each record: <'apple', TextStats('apple')> and
// <'banana', TextStats('banana')>.
AvroKeyValueRecordWriter<Text, AvroValue<TextStats>> writer = new AvroKeyValueRecordWriter<>(keyConverter,
valueConverter, new ReflectData(), compressionCodec, outputStream);
TextStats appleStats = new TextStats();
appleStats.setName("apple");
long pointOne = writer.sync();
writer.write(new Text("apple"), new AvroValue<>(appleStats));
TextStats bananaStats = new TextStats();
bananaStats.setName("banana");
long pointTwo = writer.sync();
writer.write(new Text("banana"), new AvroValue<>(bananaStats));
writer.close(context);
Configuration conf = new Configuration();
conf.set("fs.default.name", "file:///");
Path avroFile = new Path("target/temp.avro");
DataFileReader<GenericData.Record> avroFileReader = new DataFileReader<>(new FsInput(avroFile, conf),
new SpecificDatumReader<>());
avroFileReader.seek(pointTwo);
// Verify that the second record was written;
assertTrue(avroFileReader.hasNext());
AvroKeyValue<CharSequence, TextStats> secondRecord = new AvroKeyValue<>(avroFileReader.next());
assertNotNull(secondRecord.get());
assertEquals("banana", secondRecord.getKey().toString());
assertEquals("banana", secondRecord.getValue().getName().toString());
avroFileReader.seek(pointOne);
// Verify that the first record was written.
assertTrue(avroFileReader.hasNext());
AvroKeyValue<CharSequence, TextStats> firstRecord = new AvroKeyValue<>(avroFileReader.next());
assertNotNull(firstRecord.get());
assertEquals("apple", firstRecord.getKey().toString());
assertEquals("apple", firstRecord.getValue().getName().toString());
// That's all, folks.
avroFileReader.close();
verify(context, never()).getConfiguration();
}
}
| 7,008 |
0 | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/test/java/org/apache/avro/mapreduce/TestAvroKeyValueRecordReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.*;
import java.io.File;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.file.SeekableFileInput;
import org.apache.avro.file.SeekableInput;
import org.apache.avro.generic.GenericData;
import org.apache.avro.hadoop.io.AvroKeyValue;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
public class TestAvroKeyValueRecordReader {
/** A temporary directory for test data. */
@TempDir
public File mTempDir;
/**
* Verifies that avro records can be read and progress is reported correctly.
*/
@Test
void readRecords() throws IOException, InterruptedException {
// Create the test avro file input with two records:
// 1. <"firstkey", 1>
// 2. <"second", 2>
Schema keyValueSchema = AvroKeyValue.getSchema(Schema.create(Schema.Type.STRING), Schema.create(Schema.Type.INT));
AvroKeyValue<CharSequence, Integer> firstInputRecord = new AvroKeyValue<>(new GenericData.Record(keyValueSchema));
firstInputRecord.setKey("first");
firstInputRecord.setValue(1);
AvroKeyValue<CharSequence, Integer> secondInputRecord = new AvroKeyValue<>(new GenericData.Record(keyValueSchema));
secondInputRecord.setKey("second");
secondInputRecord.setValue(2);
final SeekableInput avroFileInput = new SeekableFileInput(AvroFiles.createFile(
new File(mTempDir, "myInputFile.avro"), keyValueSchema, firstInputRecord.get(), secondInputRecord.get()));
// Create the record reader over the avro input file.
RecordReader<AvroKey<CharSequence>, AvroValue<Integer>> recordReader = new AvroKeyValueRecordReader<CharSequence, Integer>(
Schema.create(Schema.Type.STRING), Schema.create(Schema.Type.INT)) {
@Override
protected SeekableInput createSeekableInput(Configuration conf, Path path) throws IOException {
return avroFileInput;
}
};
// Set up the job configuration.
Configuration conf = new Configuration();
// Create a mock input split for this record reader.
FileSplit inputSplit = mock(FileSplit.class);
when(inputSplit.getPath()).thenReturn(new Path("/path/to/an/avro/file"));
when(inputSplit.getStart()).thenReturn(0L);
when(inputSplit.getLength()).thenReturn(avroFileInput.length());
// Create a mock task attempt context for this record reader.
TaskAttemptContext context = mock(TaskAttemptContext.class);
when(context.getConfiguration()).thenReturn(conf);
// Initialize the record reader.
recordReader.initialize(inputSplit, context);
assertEquals(0.0f, recordReader.getProgress(), 0.0f, "Progress should be zero before any records are read");
// Some variables to hold the records.
AvroKey<CharSequence> key;
AvroValue<Integer> value;
// Read the first record.
assertTrue(recordReader.nextKeyValue(), "Expected at least one record");
key = recordReader.getCurrentKey();
value = recordReader.getCurrentValue();
assertNotNull(key, "First record had null key");
assertNotNull(value, "First record had null value");
assertEquals("first", key.datum().toString());
assertEquals(1, value.datum().intValue());
assertEquals(key, recordReader.getCurrentKey());
assertEquals(value, recordReader.getCurrentValue());
// Read the second record.
assertTrue(recordReader.nextKeyValue(), "Expected to read a second record");
key = recordReader.getCurrentKey();
value = recordReader.getCurrentValue();
assertNotNull(key, "Second record had null key");
assertNotNull(value, "Second record had null value");
assertEquals("second", key.datum().toString());
assertEquals(2, value.datum().intValue());
assertEquals(1.0f, recordReader.getProgress(), 0.0f, "Progress should be complete (2 out of 2 records processed)");
// There should be no more records.
assertFalse(recordReader.nextKeyValue(), "Expected only 2 records");
// Close the record reader.
recordReader.close();
// Verify the expected calls on the mocks.
verify(inputSplit).getPath();
verify(inputSplit, times(2)).getStart();
verify(inputSplit).getLength();
verify(context, atLeastOnce()).getConfiguration();
}
}
| 7,009 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroKeyComparator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.conf.Configuration;
import org.apache.avro.Schema;
import org.apache.avro.io.BinaryData;
import org.apache.avro.reflect.ReflectData;
/** The {@link RawComparator} used by jobs configured with {@link AvroJob}. */
public class AvroKeyComparator<T> extends Configured implements RawComparator<AvroWrapper<T>> {
private Schema schema;
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (conf != null)
schema = Pair.getKeySchema(AvroJob.getMapOutputSchema(conf));
}
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
return BinaryData.compare(b1, s1, l1, b2, s2, l2, schema);
}
@Override
public int compare(AvroWrapper<T> x, AvroWrapper<T> y) {
return ReflectData.get().compare(x.datum(), y.datum(), schema);
}
}
| 7,010 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/SequenceFileRecordReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.FileSplit;
/** A {@link org.apache.hadoop.mapred.RecordReader} for sequence files. */
public class SequenceFileRecordReader<K, V> extends AvroRecordReader<Pair<K, V>> {
public SequenceFileRecordReader(JobConf job, FileSplit split) throws IOException {
super(new SequenceFileReader<>(split.getPath().toUri(), job), split);
}
}
| 7,011 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/DelegatingMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.ReflectionUtils;
/**
* An {@link Mapper} that delegates behaviour of paths to multiple other
* mappers. Similar to {@link HadoopMapper}, but instantiates map classes in the
* map() call instead of during configure(), as we rely on the split object to
* provide us that information.
*
* @see {@link AvroMultipleInputs#addInputPath(JobConf, Path, Class, Schema)}
*/
class DelegatingMapper<IN, OUT, K, V, KO, VO> extends MapReduceBase
implements Mapper<AvroWrapper<IN>, NullWritable, KO, VO> {
AvroMapper<IN, OUT> mapper;
JobConf conf;
boolean isMapOnly;
AvroCollector<OUT> out;
@Override
public void configure(JobConf conf) {
this.conf = conf;
this.isMapOnly = conf.getNumReduceTasks() == 0;
}
@Override
public void map(AvroWrapper<IN> wrapper, NullWritable value, OutputCollector<KO, VO> collector, Reporter reporter)
throws IOException {
if (mapper == null) {
TaggedInputSplit is = (TaggedInputSplit) reporter.getInputSplit();
Class<? extends AvroMapper> mapperClass = is.getMapperClass();
mapper = (AvroMapper<IN, OUT>) ReflectionUtils.newInstance(mapperClass, conf);
}
if (out == null)
out = new MapCollector<OUT, K, V, KO, VO>(collector, isMapOnly);
mapper.map(wrapper.datum(), out, reporter);
}
}
| 7,012 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/Pair.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.util.List;
import java.util.Arrays;
import java.util.Map;
import java.util.WeakHashMap;
import java.nio.ByteBuffer;
import org.apache.avro.Schema;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Schema.Type;
import org.apache.avro.Schema.Field;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericContainer;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.specific.SpecificData.SchemaConstructable;
import org.apache.avro.reflect.ReflectData;
/** A key/value pair. */
public class Pair<K, V> implements IndexedRecord, Comparable<Pair>, SchemaConstructable {
private static final String PAIR = Pair.class.getName();
private static final String KEY = "key";
private static final String VALUE = "value";
private Schema schema;
private K key;
private V value;
public Pair(Schema schema) {
checkIsPairSchema(schema);
this.schema = schema;
}
public Pair(K key, Schema keySchema, V value, Schema valueSchema) {
this.schema = getPairSchema(keySchema, valueSchema);
this.key = key;
this.value = value;
}
private static void checkIsPairSchema(Schema schema) {
if (!PAIR.equals(schema.getFullName()))
throw new IllegalArgumentException("Not a Pair schema: " + schema);
}
/** Return a pair's key schema. */
public static Schema getKeySchema(Schema pair) {
checkIsPairSchema(pair);
return pair.getField(KEY).schema();
}
/** Return a pair's value schema. */
public static Schema getValueSchema(Schema pair) {
checkIsPairSchema(pair);
return pair.getField(VALUE).schema();
}
private static final Map<Schema, Map<Schema, Schema>> SCHEMA_CACHE = new WeakHashMap<>();
/** Get a pair schema. */
public static Schema getPairSchema(Schema key, Schema value) {
Map<Schema, Schema> valueSchemas;
synchronized (SCHEMA_CACHE) {
valueSchemas = SCHEMA_CACHE.computeIfAbsent(key, k -> new WeakHashMap<>());
Schema result;
result = valueSchemas.get(value);
if (result == null) {
result = makePairSchema(key, value);
valueSchemas.put(value, result);
}
return result;
}
}
private static Schema makePairSchema(Schema key, Schema value) {
Schema pair = Schema.createRecord(PAIR, null, null, false);
List<Field> fields = Arrays.asList(new Field(KEY, key, "", null),
new Field(VALUE, value, "", null, Field.Order.IGNORE));
pair.setFields(fields);
return pair;
}
@Override
public Schema getSchema() {
return schema;
}
/** Get the key. */
public K key() {
return key;
}
/** Set the key. */
public void key(K key) {
this.key = key;
}
/** Get the value. */
public V value() {
return value;
}
/** Set the value. */
public void value(V value) {
this.value = value;
}
/** Set both the key and value. */
public void set(K key, V value) {
this.key = key;
this.value = value;
}
@Override
public boolean equals(Object o) {
if (o == this)
return true; // identical object
if (!(o instanceof Pair))
return false; // not a pair
Pair that = (Pair) o;
if (!this.schema.equals(that.schema))
return false; // not the same schema
return this.compareTo(that) == 0;
}
@Override
public int hashCode() {
return GenericData.get().hashCode(this, schema);
}
@Override
public int compareTo(Pair that) {
return GenericData.get().compare(this, that, schema);
}
@Override
public String toString() {
return GenericData.get().toString(this);
}
@Override
public Object get(int i) {
switch (i) {
case 0:
return key;
case 1:
return value;
default:
throw new org.apache.avro.AvroRuntimeException("Bad index: " + i);
}
}
@Override
@SuppressWarnings("unchecked")
public void put(int i, Object o) {
switch (i) {
case 0:
this.key = (K) o;
break;
case 1:
this.value = (V) o;
break;
default:
throw new org.apache.avro.AvroRuntimeException("Bad index: " + i);
}
}
private static final Schema STRING_SCHEMA = Schema.create(Type.STRING);
private static final Schema BYTES_SCHEMA = Schema.create(Type.BYTES);
private static final Schema INT_SCHEMA = Schema.create(Type.INT);
private static final Schema LONG_SCHEMA = Schema.create(Type.LONG);
private static final Schema FLOAT_SCHEMA = Schema.create(Type.FLOAT);
private static final Schema DOUBLE_SCHEMA = Schema.create(Type.DOUBLE);
private static final Schema NULL_SCHEMA = Schema.create(Type.NULL);
@SuppressWarnings("unchecked")
public Pair(Object key, Object value) {
this((K) key, getSchema(key), (V) value, getSchema(value));
}
@SuppressWarnings("unchecked")
public Pair(Object key, GenericContainer value) {
this((K) key, getSchema(key), (V) value, value.getSchema());
}
@SuppressWarnings("unchecked")
public Pair(Object key, CharSequence value) {
this((K) key, getSchema(key), (V) value, STRING_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Object key, ByteBuffer value) {
this((K) key, getSchema(key), (V) value, BYTES_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Object key, Integer value) {
this((K) key, getSchema(key), (V) value, INT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Object key, Long value) {
this((K) key, getSchema(key), (V) value, LONG_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Object key, Float value) {
this((K) key, getSchema(key), (V) value, FLOAT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Object key, Double value) {
this((K) key, getSchema(key), (V) value, DOUBLE_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Object key, Void value) {
this((K) key, getSchema(key), (V) value, NULL_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(GenericContainer key, Object value) {
this((K) key, key.getSchema(), (V) value, getSchema(value));
}
@SuppressWarnings("unchecked")
public Pair(GenericContainer key, GenericContainer value) {
this((K) key, key.getSchema(), (V) value, value.getSchema());
}
@SuppressWarnings("unchecked")
public Pair(GenericContainer key, CharSequence value) {
this((K) key, key.getSchema(), (V) value, STRING_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(GenericContainer key, ByteBuffer value) {
this((K) key, key.getSchema(), (V) value, BYTES_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(GenericContainer key, Integer value) {
this((K) key, key.getSchema(), (V) value, INT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(GenericContainer key, Long value) {
this((K) key, key.getSchema(), (V) value, LONG_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(GenericContainer key, Float value) {
this((K) key, key.getSchema(), (V) value, FLOAT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(GenericContainer key, Double value) {
this((K) key, key.getSchema(), (V) value, DOUBLE_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(GenericContainer key, Void value) {
this((K) key, key.getSchema(), (V) value, NULL_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(CharSequence key, Object value) {
this((K) key, STRING_SCHEMA, (V) value, getSchema(value));
}
@SuppressWarnings("unchecked")
public Pair(CharSequence key, GenericContainer value) {
this((K) key, STRING_SCHEMA, (V) value, value.getSchema());
}
@SuppressWarnings("unchecked")
public Pair(CharSequence key, CharSequence value) {
this((K) key, STRING_SCHEMA, (V) value, STRING_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(CharSequence key, ByteBuffer value) {
this((K) key, STRING_SCHEMA, (V) value, BYTES_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(CharSequence key, Integer value) {
this((K) key, STRING_SCHEMA, (V) value, INT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(CharSequence key, Long value) {
this((K) key, STRING_SCHEMA, (V) value, LONG_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(CharSequence key, Float value) {
this((K) key, STRING_SCHEMA, (V) value, FLOAT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(CharSequence key, Double value) {
this((K) key, STRING_SCHEMA, (V) value, DOUBLE_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(CharSequence key, Void value) {
this((K) key, STRING_SCHEMA, (V) value, NULL_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(ByteBuffer key, Object value) {
this((K) key, BYTES_SCHEMA, (V) value, getSchema(value));
}
@SuppressWarnings("unchecked")
public Pair(ByteBuffer key, GenericContainer value) {
this((K) key, BYTES_SCHEMA, (V) value, value.getSchema());
}
@SuppressWarnings("unchecked")
public Pair(ByteBuffer key, CharSequence value) {
this((K) key, BYTES_SCHEMA, (V) value, STRING_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(ByteBuffer key, ByteBuffer value) {
this((K) key, BYTES_SCHEMA, (V) value, BYTES_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(ByteBuffer key, Integer value) {
this((K) key, BYTES_SCHEMA, (V) value, INT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(ByteBuffer key, Long value) {
this((K) key, BYTES_SCHEMA, (V) value, LONG_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(ByteBuffer key, Float value) {
this((K) key, BYTES_SCHEMA, (V) value, FLOAT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(ByteBuffer key, Double value) {
this((K) key, BYTES_SCHEMA, (V) value, DOUBLE_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(ByteBuffer key, Void value) {
this((K) key, BYTES_SCHEMA, (V) value, NULL_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Integer key, Object value) {
this((K) key, INT_SCHEMA, (V) value, getSchema(value));
}
@SuppressWarnings("unchecked")
public Pair(Integer key, GenericContainer value) {
this((K) key, INT_SCHEMA, (V) value, value.getSchema());
}
@SuppressWarnings("unchecked")
public Pair(Integer key, CharSequence value) {
this((K) key, INT_SCHEMA, (V) value, STRING_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Integer key, ByteBuffer value) {
this((K) key, INT_SCHEMA, (V) value, BYTES_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Integer key, Integer value) {
this((K) key, INT_SCHEMA, (V) value, INT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Integer key, Long value) {
this((K) key, INT_SCHEMA, (V) value, LONG_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Integer key, Float value) {
this((K) key, INT_SCHEMA, (V) value, FLOAT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Integer key, Double value) {
this((K) key, INT_SCHEMA, (V) value, DOUBLE_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Integer key, Void value) {
this((K) key, INT_SCHEMA, (V) value, NULL_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Long key, Object value) {
this((K) key, LONG_SCHEMA, (V) value, getSchema(value));
}
@SuppressWarnings("unchecked")
public Pair(Long key, GenericContainer value) {
this((K) key, LONG_SCHEMA, (V) value, value.getSchema());
}
@SuppressWarnings("unchecked")
public Pair(Long key, CharSequence value) {
this((K) key, LONG_SCHEMA, (V) value, STRING_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Long key, ByteBuffer value) {
this((K) key, LONG_SCHEMA, (V) value, BYTES_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Long key, Integer value) {
this((K) key, LONG_SCHEMA, (V) value, INT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Long key, Long value) {
this((K) key, LONG_SCHEMA, (V) value, LONG_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Long key, Float value) {
this((K) key, LONG_SCHEMA, (V) value, FLOAT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Long key, Double value) {
this((K) key, LONG_SCHEMA, (V) value, DOUBLE_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Long key, Void value) {
this((K) key, LONG_SCHEMA, (V) value, NULL_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Float key, Object value) {
this((K) key, FLOAT_SCHEMA, (V) value, getSchema(value));
}
@SuppressWarnings("unchecked")
public Pair(Float key, GenericContainer value) {
this((K) key, FLOAT_SCHEMA, (V) value, value.getSchema());
}
@SuppressWarnings("unchecked")
public Pair(Float key, CharSequence value) {
this((K) key, FLOAT_SCHEMA, (V) value, STRING_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Float key, ByteBuffer value) {
this((K) key, FLOAT_SCHEMA, (V) value, BYTES_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Float key, Integer value) {
this((K) key, FLOAT_SCHEMA, (V) value, INT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Float key, Long value) {
this((K) key, FLOAT_SCHEMA, (V) value, LONG_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Float key, Float value) {
this((K) key, FLOAT_SCHEMA, (V) value, FLOAT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Float key, Double value) {
this((K) key, FLOAT_SCHEMA, (V) value, DOUBLE_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Float key, Void value) {
this((K) key, FLOAT_SCHEMA, (V) value, NULL_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Double key, Object value) {
this((K) key, DOUBLE_SCHEMA, (V) value, getSchema(value));
}
@SuppressWarnings("unchecked")
public Pair(Double key, GenericContainer value) {
this((K) key, DOUBLE_SCHEMA, (V) value, value.getSchema());
}
@SuppressWarnings("unchecked")
public Pair(Double key, CharSequence value) {
this((K) key, DOUBLE_SCHEMA, (V) value, STRING_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Double key, ByteBuffer value) {
this((K) key, DOUBLE_SCHEMA, (V) value, BYTES_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Double key, Integer value) {
this((K) key, DOUBLE_SCHEMA, (V) value, INT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Double key, Long value) {
this((K) key, DOUBLE_SCHEMA, (V) value, LONG_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Double key, Float value) {
this((K) key, DOUBLE_SCHEMA, (V) value, FLOAT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Double key, Double value) {
this((K) key, DOUBLE_SCHEMA, (V) value, DOUBLE_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Double key, Void value) {
this((K) key, DOUBLE_SCHEMA, (V) value, NULL_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Void key, Object value) {
this((K) key, NULL_SCHEMA, (V) value, getSchema(value));
}
@SuppressWarnings("unchecked")
public Pair(Void key, GenericContainer value) {
this((K) key, NULL_SCHEMA, (V) value, value.getSchema());
}
@SuppressWarnings("unchecked")
public Pair(Void key, CharSequence value) {
this((K) key, NULL_SCHEMA, (V) value, STRING_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Void key, ByteBuffer value) {
this((K) key, NULL_SCHEMA, (V) value, BYTES_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Void key, Integer value) {
this((K) key, NULL_SCHEMA, (V) value, INT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Void key, Long value) {
this((K) key, NULL_SCHEMA, (V) value, LONG_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Void key, Float value) {
this((K) key, NULL_SCHEMA, (V) value, FLOAT_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Void key, Double value) {
this((K) key, NULL_SCHEMA, (V) value, DOUBLE_SCHEMA);
}
@SuppressWarnings("unchecked")
public Pair(Void key, Void value) {
this((K) key, NULL_SCHEMA, (V) value, NULL_SCHEMA);
}
private static Schema getSchema(Object o) {
try {
return ReflectData.get().getSchema(o.getClass());
} catch (AvroRuntimeException e) {
throw new AvroRuntimeException(
"Cannot infer schema for : " + o.getClass() + ". Must create Pair with explicit key and value schemas.", e);
}
}
// private static final String[][] TABLE = new String[][] {
// {"Object", "getSchema({0})"},
// {"GenericContainer", "{0}.getSchema()"},
// {"CharSequence", "STRING_SCHEMA"},
// {"ByteBuffer", "BYTES_SCHEMA"},
// {"Integer", "INT_SCHEMA"},
// {"Long", "LONG_SCHEMA"},
// {"Float", "FLOAT_SCHEMA"},
// {"Double", "DOUBLE_SCHEMA"},
// {"Void", "NULL_SCHEMA"},
// };
// private static String f(String pattern, String value) {
// return java.text.MessageFormat.format(pattern, value);
// }
// public static void main(String... args) throws Exception {
// StringBuffer b = new StringBuffer();
// for (String[] k : TABLE) {
// for (String[] v : TABLE) {
// b.append("@SuppressWarnings(\"unchecked\")\n");
// b.append("public Pair("+k[0]+" key, "+v[0]+" value) {\n");
// b.append(" this((K)key, "+f(k[1],"key")
// +", (V)value, "+f(v[1],"value")+");\n");
// b.append("}\n");
// }
// }
// System.out.println(b);
// }
}
| 7,013 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroTextOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import static org.apache.avro.mapred.AvroOutputFormat.EXT;
import java.io.IOException;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.reflect.ReflectDatumWriter;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.Progressable;
/**
* The equivalent of {@link org.apache.hadoop.mapred.TextOutputFormat} for
* writing to Avro Data Files with a <code>"bytes"</code> schema.
*/
public class AvroTextOutputFormat<K, V> extends FileOutputFormat<K, V> {
@Override
public RecordWriter<K, V> getRecordWriter(FileSystem ignore, JobConf job, String name, Progressable prog)
throws IOException {
Schema schema = Schema.create(Schema.Type.BYTES);
final byte[] keyValueSeparator = job.get("mapreduce.output.textoutputformat.separator", "\t")
.getBytes(StandardCharsets.UTF_8);
final DataFileWriter<ByteBuffer> writer = new DataFileWriter<>(new ReflectDatumWriter<>());
AvroOutputFormat.configureDataFileWriter(writer, job);
Path path = FileOutputFormat.getTaskOutputPath(job, name + EXT);
writer.create(schema, path.getFileSystem(job).create(path));
return new AvroTextRecordWriter(writer, keyValueSeparator);
}
class AvroTextRecordWriter implements RecordWriter<K, V> {
private final DataFileWriter<ByteBuffer> writer;
private final byte[] keyValueSeparator;
public AvroTextRecordWriter(DataFileWriter<ByteBuffer> writer, byte[] keyValueSeparator) {
this.writer = writer;
this.keyValueSeparator = keyValueSeparator;
}
@Override
public void write(K key, V value) throws IOException {
boolean nullKey = key == null || key instanceof NullWritable;
boolean nullValue = value == null || value instanceof NullWritable;
if (nullKey && nullValue) {
// NO-OP
} else if (!nullKey && nullValue) {
writer.append(toByteBuffer(key));
} else if (nullKey && !nullValue) {
writer.append(toByteBuffer(value));
} else {
writer.append(toByteBuffer(key, keyValueSeparator, value));
}
}
@Override
public void close(Reporter reporter) throws IOException {
writer.close();
}
private ByteBuffer toByteBuffer(Object o) throws IOException {
if (o instanceof Text) {
Text to = (Text) o;
return ByteBuffer.wrap(to.getBytes(), 0, to.getLength());
} else {
return ByteBuffer.wrap(o.toString().getBytes(StandardCharsets.UTF_8));
}
}
private ByteBuffer toByteBuffer(Object key, byte[] sep, Object value) throws IOException {
byte[] keyBytes, valBytes;
int keyLength, valLength;
if (key instanceof Text) {
Text tkey = (Text) key;
keyBytes = tkey.getBytes();
keyLength = tkey.getLength();
} else {
keyBytes = key.toString().getBytes(StandardCharsets.UTF_8);
keyLength = keyBytes.length;
}
if (value instanceof Text) {
Text tval = (Text) value;
valBytes = tval.getBytes();
valLength = tval.getLength();
} else {
valBytes = value.toString().getBytes(StandardCharsets.UTF_8);
valLength = valBytes.length;
}
ByteBuffer buf = ByteBuffer.allocate(keyLength + sep.length + valLength);
buf.put(keyBytes, 0, keyLength);
buf.put(sep);
buf.put(valBytes, 0, valLength);
((Buffer) buf).rewind();
return buf;
}
}
}
| 7,014 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/FsInput.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.avro.file.SeekableInput;
/** Adapt an {@link FSDataInputStream} to {@link SeekableInput}. */
public class FsInput implements Closeable, SeekableInput {
private final FSDataInputStream stream;
private final long len;
/** Construct given a path and a configuration. */
public FsInput(Path path, Configuration conf) throws IOException {
this(path, path.getFileSystem(conf));
}
/** Construct given a path and a {@code FileSystem}. */
public FsInput(Path path, FileSystem fileSystem) throws IOException {
this.len = fileSystem.getFileStatus(path).getLen();
this.stream = fileSystem.open(path);
}
@Override
public long length() {
return len;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
return stream.read(b, off, len);
}
@Override
public void seek(long p) throws IOException {
stream.seek(p);
}
@Override
public long tell() throws IOException {
return stream.getPos();
}
@Override
public void close() throws IOException {
stream.close();
}
}
| 7,015 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/HadoopCombiner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Bridge between a {@link org.apache.hadoop.mapred.Reducer} and an
* {@link AvroReducer} used when combining. When combining, map output pairs
* must be split before they're collected.
*/
class HadoopCombiner<K, V> extends HadoopReducerBase<K, V, Pair<K, V>, AvroKey<K>, AvroValue<V>> {
@Override
@SuppressWarnings("unchecked")
protected AvroReducer<K, V, Pair<K, V>> getReducer(JobConf conf) {
return ReflectionUtils.newInstance(conf.getClass(AvroJob.COMBINER, AvroReducer.class, AvroReducer.class), conf);
}
private class PairCollector extends AvroCollector<Pair<K, V>> {
private final AvroKey<K> keyWrapper = new AvroKey<>(null);
private final AvroValue<V> valueWrapper = new AvroValue<>(null);
private OutputCollector<AvroKey<K>, AvroValue<V>> collector;
public PairCollector(OutputCollector<AvroKey<K>, AvroValue<V>> collector) {
this.collector = collector;
}
@Override
public void collect(Pair<K, V> datum) throws IOException {
keyWrapper.datum(datum.key()); // split the Pair
valueWrapper.datum(datum.value());
collector.collect(keyWrapper, valueWrapper);
}
}
@Override
protected AvroCollector<Pair<K, V>> getCollector(OutputCollector<AvroKey<K>, AvroValue<V>> collector) {
return new PairCollector(collector);
}
}
| 7,016 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroUtf8InputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobConfigurable;
import org.apache.hadoop.mapred.LineRecordReader;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
/**
* An {@link org.apache.hadoop.mapred.InputFormat} for text files. Each line is
* a {@link Utf8} key; values are null.
*/
public class AvroUtf8InputFormat extends FileInputFormat<AvroWrapper<Utf8>, NullWritable> implements JobConfigurable {
static class Utf8LineRecordReader implements RecordReader<AvroWrapper<Utf8>, NullWritable> {
private LineRecordReader lineRecordReader;
private LongWritable currentKeyHolder = new LongWritable();
private Text currentValueHolder = new Text();
public Utf8LineRecordReader(Configuration job, FileSplit split) throws IOException {
this.lineRecordReader = new LineRecordReader(job, split);
}
@Override
public void close() throws IOException {
lineRecordReader.close();
}
@Override
public long getPos() throws IOException {
return lineRecordReader.getPos();
}
@Override
public float getProgress() throws IOException {
return lineRecordReader.getProgress();
}
@Override
public boolean next(AvroWrapper<Utf8> key, NullWritable value) throws IOException {
boolean success = lineRecordReader.next(currentKeyHolder, currentValueHolder);
if (success) {
key.datum(new Utf8(currentValueHolder.getBytes()).setByteLength(currentValueHolder.getLength()));
} else {
key.datum(null);
}
return success;
}
@Override
public AvroWrapper<Utf8> createKey() {
return new AvroWrapper<>(null);
}
@Override
public NullWritable createValue() {
return NullWritable.get();
}
}
private CompressionCodecFactory compressionCodecs = null;
@Override
public void configure(JobConf conf) {
compressionCodecs = new CompressionCodecFactory(conf);
}
@Override
protected boolean isSplitable(FileSystem fs, Path file) {
return compressionCodecs.getCodec(file) == null;
}
@Override
public RecordReader<AvroWrapper<Utf8>, NullWritable> getRecordReader(InputSplit split, JobConf job, Reporter reporter)
throws IOException {
reporter.setStatus(split.toString());
return new Utf8LineRecordReader(job, (FileSplit) split);
}
}
| 7,017 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/DelegatingInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.avro.Schema;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.lib.MultipleInputs;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An {@link InputFormat} that delegates read behavior of paths based on their
* associated avro schema.
*
* @see MultipleInputs#addInputPath(JobConf, Path, Class, Class)
*/
class DelegatingInputFormat<K, V> implements InputFormat<K, V> {
private static final Logger LOG = LoggerFactory.getLogger(DelegatingInputFormat.class);
@Override
public InputSplit[] getSplits(JobConf conf, int numSplits) throws IOException {
JobConf confCopy = new JobConf(conf);
List<InputSplit> splits = new ArrayList<>();
Map<Path, Class<? extends AvroMapper>> mapperMap = AvroMultipleInputs.getMapperTypeMap(conf);
Map<Path, Schema> schemaMap = AvroMultipleInputs.getInputSchemaMap(conf);
Map<Schema, List<Path>> schemaPaths = new HashMap<>();
// First, build a map of Schemas to Paths
for (Entry<Path, Schema> entry : schemaMap.entrySet()) {
if (!schemaPaths.containsKey(entry.getValue())) {
schemaPaths.put(entry.getValue(), new ArrayList<>());
LOG.info(entry.getValue().toString());
LOG.info(String.valueOf(entry.getKey()));
}
schemaPaths.get(entry.getValue()).add(entry.getKey());
}
for (Entry<Schema, List<Path>> schemaEntry : schemaPaths.entrySet()) {
Schema schema = schemaEntry.getKey();
LOG.info(schema.toString());
InputFormat format = ReflectionUtils.newInstance(AvroInputFormat.class, conf);
List<Path> paths = schemaEntry.getValue();
Map<Class<? extends AvroMapper>, List<Path>> mapperPaths = new HashMap<>();
// Now, for each set of paths that have a common Schema, build
// a map of Mappers to the paths they're used for
for (Path path : paths) {
Class<? extends AvroMapper> mapperClass = mapperMap.get(path);
if (!mapperPaths.containsKey(mapperClass)) {
mapperPaths.put(mapperClass, new ArrayList<>());
}
mapperPaths.get(mapperClass).add(path);
}
// Now each set of paths that has a common InputFormat and Mapper can
// be added to the same job, and split together.
for (Entry<Class<? extends AvroMapper>, List<Path>> mapEntry : mapperPaths.entrySet()) {
paths = mapEntry.getValue();
Class<? extends AvroMapper> mapperClass = mapEntry.getKey();
if (mapperClass == null) {
mapperClass = (Class<? extends AvroMapper>) conf.getMapperClass();
}
FileInputFormat.setInputPaths(confCopy, paths.toArray(new Path[0]));
// Get splits for each input path and tag with InputFormat
// and Mapper types by wrapping in a TaggedInputSplit.
InputSplit[] pathSplits = format.getSplits(confCopy, numSplits);
for (InputSplit pathSplit : pathSplits) {
splits.add(new TaggedInputSplit(pathSplit, conf, format.getClass(), mapperClass, schema));
}
}
}
return splits.toArray(new InputSplit[0]);
}
@SuppressWarnings("unchecked")
@Override
public RecordReader<K, V> getRecordReader(InputSplit split, JobConf conf, Reporter reporter) throws IOException {
// Find the Schema and then build the RecordReader from the
// TaggedInputSplit.
TaggedInputSplit taggedInputSplit = (TaggedInputSplit) split;
Schema schema = taggedInputSplit.getSchema();
AvroJob.setInputSchema(conf, schema);
InputFormat<K, V> inputFormat = (InputFormat<K, V>) ReflectionUtils
.newInstance(taggedInputSplit.getInputFormatClass(), conf);
return inputFormat.getRecordReader(taggedInputSplit.getInputSplit(), conf, reporter);
}
}
| 7,018 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/HadoopMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Bridge between a {@link org.apache.hadoop.mapred.Mapper} and an
* {@link AvroMapper}. Outputs are written directly when a job is map-only, but
* are otherwise assumed to be pairs that are split.
*/
class HadoopMapper<IN, OUT, K, V, KO, VO> extends MapReduceBase
implements Mapper<AvroWrapper<IN>, NullWritable, KO, VO> {
private AvroMapper<IN, OUT> mapper;
private MapCollector<OUT, K, V, KO, VO> out;
private boolean isMapOnly;
@Override
@SuppressWarnings("unchecked")
public void configure(JobConf conf) {
this.mapper = ReflectionUtils.newInstance(conf.getClass(AvroJob.MAPPER, AvroMapper.class, AvroMapper.class), conf);
this.isMapOnly = conf.getNumReduceTasks() == 0;
}
@Override
public void map(AvroWrapper<IN> wrapper, NullWritable value, OutputCollector<KO, VO> collector, Reporter reporter)
throws IOException {
if (this.out == null)
this.out = new MapCollector<>(collector, isMapOnly);
mapper.map(wrapper.datum(), out, reporter);
}
@Override
public void close() throws IOException {
this.mapper.close();
}
}
| 7,019 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroSerialization.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.io.serializer.Serialization;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.Serializer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.EncoderFactory;
/** The {@link Serialization} used by jobs configured with {@link AvroJob}. */
public class AvroSerialization<T> extends Configured implements Serialization<AvroWrapper<T>> {
@Override
public boolean accept(Class<?> c) {
return AvroWrapper.class.isAssignableFrom(c);
}
/**
* Returns the specified map output deserializer. Defaults to the final output
* deserializer if no map output schema was specified.
*/
@Override
public Deserializer<AvroWrapper<T>> getDeserializer(Class<AvroWrapper<T>> c) {
Configuration conf = getConf();
boolean isKey = AvroKey.class.isAssignableFrom(c);
Schema schema = isKey ? Pair.getKeySchema(AvroJob.getMapOutputSchema(conf))
: Pair.getValueSchema(AvroJob.getMapOutputSchema(conf));
GenericData dataModel = AvroJob.createMapOutputDataModel(conf);
DatumReader<T> datumReader = dataModel.createDatumReader(schema);
return new AvroWrapperDeserializer(datumReader, isKey);
}
private static final DecoderFactory FACTORY = DecoderFactory.get();
private class AvroWrapperDeserializer implements Deserializer<AvroWrapper<T>> {
private DatumReader<T> reader;
private BinaryDecoder decoder;
private boolean isKey;
public AvroWrapperDeserializer(DatumReader<T> reader, boolean isKey) {
this.reader = reader;
this.isKey = isKey;
}
@Override
public void open(InputStream in) {
this.decoder = FACTORY.directBinaryDecoder(in, decoder);
}
@Override
public AvroWrapper<T> deserialize(AvroWrapper<T> wrapper) throws IOException {
T datum = reader.read(wrapper == null ? null : wrapper.datum(), decoder);
if (wrapper == null) {
wrapper = isKey ? new AvroKey<>(datum) : new AvroValue<>(datum);
} else {
wrapper.datum(datum);
}
return wrapper;
}
@Override
public void close() throws IOException {
decoder.inputStream().close();
}
}
/** Returns the specified output serializer. */
@Override
public Serializer<AvroWrapper<T>> getSerializer(Class<AvroWrapper<T>> c) {
// AvroWrapper used for final output, AvroKey or AvroValue for map output
boolean isFinalOutput = c.equals(AvroWrapper.class);
Configuration conf = getConf();
Schema schema = isFinalOutput ? AvroJob.getOutputSchema(conf)
: (AvroKey.class.isAssignableFrom(c) ? Pair.getKeySchema(AvroJob.getMapOutputSchema(conf))
: Pair.getValueSchema(AvroJob.getMapOutputSchema(conf)));
GenericData dataModel = AvroJob.createDataModel(conf);
return new AvroWrapperSerializer(dataModel.createDatumWriter(schema));
}
private class AvroWrapperSerializer implements Serializer<AvroWrapper<T>> {
private DatumWriter<T> writer;
private OutputStream out;
private BinaryEncoder encoder;
public AvroWrapperSerializer(DatumWriter<T> writer) {
this.writer = writer;
}
@Override
public void open(OutputStream out) {
this.out = out;
this.encoder = new EncoderFactory().binaryEncoder(out, null);
}
@Override
public void serialize(AvroWrapper<T> wrapper) throws IOException {
writer.write(wrapper.datum(), encoder);
// would be a lot faster if the Serializer interface had a flush()
// method and the Hadoop framework called it when needed rather
// than for every record.
encoder.flush();
}
@Override
public void close() throws IOException {
out.close();
}
}
}
| 7,020 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroMultipleOutputs.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.List;
import java.util.Set;
import java.util.HashMap;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Collections;
import org.apache.hadoop.mapred.OutputFormat;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.Progressable;
import org.apache.avro.Schema;
import org.apache.hadoop.io.NullWritable;
/**
* The AvroMultipleOutputs class simplifies writing Avro output data to multiple
* outputs
*
* <p>
* Case one: writing to additional outputs other than the job default output.
*
* Each additional output, or named output, may be configured with its own
* <code>Schema</code> and <code>OutputFormat</code>. A named output can be a
* single file or a multi file. The later is refered as a multi named output
* which is an unbound set of files all sharing the same <code>Schema</code>.
* </p>
* <p>
* Case two: to write data to different files provided by user
* </p>
*
* <p>
* AvroMultipleOutputs supports counters, by default they are disabled. The
* counters group is the {@link AvroMultipleOutputs} class name. The names of
* the counters are the same as the output name. These count the number of
* records written to each output name. For multi named outputs the name of the
* counter is the concatenation of the named output, and underscore '_' and the
* multiname.
* </p>
*
* Usage pattern for job submission:
*
* <pre>
*
* JobConf job = new JobConf();
*
* FileInputFormat.setInputPath(job, inDir);
* FileOutputFormat.setOutputPath(job, outDir);
*
* job.setMapperClass(MyAvroMapper.class);
* job.setReducerClass(HadoopReducer.class);
* job.set("avro.reducer",MyAvroReducer.class);
* ...
*
* Schema schema;
* ...
* // Defines additional single output 'avro1' for the job
* AvroMultipleOutputs.addNamedOutput(job, "avro1", AvroOutputFormat.class,
* schema);
*
* // Defines additional output 'avro2' with different schema for the job
* AvroMultipleOutputs.addNamedOutput(job, "avro2",
* AvroOutputFormat.class,
* null); // if Schema is specified as null then the default output schema is used
* ...
*
* job.waitForCompletion(true);
* ...
* </pre>
* <p>
* Usage in Reducer:
*
* <pre>
*
* public class MyAvroReducer extends
* AvroReducer<K, V, OUT> {
* private MultipleOutputs amos;
*
*
* public void configure(JobConf conf) {
* ...
* amos = new AvroMultipleOutputs(conf);
* }
*
* public void reduce(K, Iterator<V> values,
* AvroCollector<OUT>, Reporter reporter)
* throws IOException {
* ...
* amos.collect("avro1", reporter,datum);
* amos.getCollector("avro2", "A", reporter).collect(datum);
* amos.collect("avro1",reporter,schema,datum,"testavrofile");// this create a file testavrofile and writes data with schema "schema" into it
* and uses other values from namedoutput "avro1" like outputclass etc.
* amos.collect("avro1",reporter,schema,datum,"testavrofile1");
* ...
* }
*
* public void close() throws IOException {
* amos.close();
* ...
* }
*
* }
* </pre>
*/
public class AvroMultipleOutputs {
private static final String NAMED_OUTPUTS = "mo.namedOutputs";
private static final String MO_PREFIX = "mo.namedOutput.";
private static final String FORMAT = ".avro";
private static final String MULTI = ".multi";
private static final String COUNTERS_ENABLED = "mo.counters";
/**
* Counters group used by the counters of MultipleOutputs.
*/
private static final String COUNTERS_GROUP = AvroMultipleOutputs.class.getName();
/**
* Checks if a named output is alreadyDefined or not.
*
* @param conf job conf
* @param namedOutput named output names
* @param alreadyDefined whether the existence/non-existence of the named output
* is to be checked
* @throws IllegalArgumentException if the output name is alreadyDefined or not
* depending on the value of the
* 'alreadyDefined' parameter
*/
private static void checkNamedOutput(JobConf conf, String namedOutput, boolean alreadyDefined) {
List<String> definedChannels = getNamedOutputsList(conf);
if (alreadyDefined && definedChannels.contains(namedOutput)) {
throw new IllegalArgumentException("Named output '" + namedOutput + "' already alreadyDefined");
} else if (!alreadyDefined && !definedChannels.contains(namedOutput)) {
throw new IllegalArgumentException("Named output '" + namedOutput + "' not defined");
}
}
/**
* Checks if a named output name is valid token.
*
* @param namedOutput named output Name
* @throws IllegalArgumentException if the output name is not valid.
*/
private static void checkTokenName(String namedOutput) {
if (namedOutput == null || namedOutput.length() == 0) {
throw new IllegalArgumentException("Name cannot be NULL or empty");
}
for (char ch : namedOutput.toCharArray()) {
if ((ch >= 'A') && (ch <= 'Z')) {
continue;
}
if ((ch >= 'a') && (ch <= 'z')) {
continue;
}
if ((ch >= '0') && (ch <= '9')) {
continue;
}
throw new IllegalArgumentException("Name cannot have a '" + ch + "' char");
}
}
/**
* Checks if a named output name is valid.
*
* @param namedOutput named output Name
* @throws IllegalArgumentException if the output name is not valid.
*/
private static void checkNamedOutputName(String namedOutput) {
checkTokenName(namedOutput);
// name cannot be the name used for the default output
if (namedOutput.equals("part")) {
throw new IllegalArgumentException("Named output name cannot be 'part'");
}
}
/**
* Returns list of channel names.
*
* @param conf job conf
* @return List of channel Names
*/
public static List<String> getNamedOutputsList(JobConf conf) {
List<String> names = new ArrayList<>();
StringTokenizer st = new StringTokenizer(conf.get(NAMED_OUTPUTS, ""), " ");
while (st.hasMoreTokens()) {
names.add(st.nextToken());
}
return names;
}
/**
* Returns if a named output is multiple.
*
* @param conf job conf
* @param namedOutput named output
* @return <code>true</code> if the name output is multi, <code>false</code> if
* it is single. If the name output is not defined it returns
* <code>false</code>
*/
public static boolean isMultiNamedOutput(JobConf conf, String namedOutput) {
checkNamedOutput(conf, namedOutput, false);
return conf.getBoolean(MO_PREFIX + namedOutput + MULTI, false);
}
/**
* Returns the named output OutputFormat.
*
* @param conf job conf
* @param namedOutput named output
* @return namedOutput OutputFormat
*/
public static Class<? extends OutputFormat> getNamedOutputFormatClass(JobConf conf, String namedOutput) {
checkNamedOutput(conf, namedOutput, false);
return conf.getClass(MO_PREFIX + namedOutput + FORMAT, null, OutputFormat.class);
}
/**
* Adds a named output for the job.
* <p/>
*
* @param conf job conf to add the named output
* @param namedOutput named output name, it has to be a word, letters and
* numbers only, cannot be the word 'part' as that is
* reserved for the default output.
* @param outputFormatClass OutputFormat class.
* @param schema Schema to used for this namedOutput
*/
public static void addNamedOutput(JobConf conf, String namedOutput, Class<? extends OutputFormat> outputFormatClass,
Schema schema) {
addNamedOutput(conf, namedOutput, false, outputFormatClass, schema);
}
/**
* Adds a multi named output for the job.
* <p/>
*
* @param conf job conf to add the named output
* @param namedOutput named output name, it has to be a word, letters and
* numbers only, cannot be the word 'part' as that is
* reserved for the default output.
* @param outputFormatClass OutputFormat class.
* @param schema Schema to used for this namedOutput
*/
public static void addMultiNamedOutput(JobConf conf, String namedOutput,
Class<? extends OutputFormat> outputFormatClass, Schema schema) {
addNamedOutput(conf, namedOutput, true, outputFormatClass, schema);
}
/**
* Adds a named output for the job.
* <p/>
*
* @param conf job conf to add the named output
* @param namedOutput named output name, it has to be a word, letters and
* numbers only, cannot be the word 'part' as that is
* reserved for the default output.
* @param multi indicates if the named output is multi
* @param outputFormatClass OutputFormat class.
* @param schema Schema to used for this namedOutput
*/
private static void addNamedOutput(JobConf conf, String namedOutput, boolean multi,
Class<? extends OutputFormat> outputFormatClass, Schema schema) {
checkNamedOutputName(namedOutput);
checkNamedOutput(conf, namedOutput, true);
if (schema != null)
conf.set(MO_PREFIX + namedOutput + ".schema", schema.toString());
conf.set(NAMED_OUTPUTS, conf.get(NAMED_OUTPUTS, "") + " " + namedOutput);
conf.setClass(MO_PREFIX + namedOutput + FORMAT, outputFormatClass, OutputFormat.class);
conf.setBoolean(MO_PREFIX + namedOutput + MULTI, multi);
}
/**
* Enables or disables counters for the named outputs.
* <p/>
* By default these counters are disabled.
* <p/>
* MultipleOutputs supports counters, by default the are disabled. The counters
* group is the {@link AvroMultipleOutputs} class name.
* </p>
* The names of the counters are the same as the named outputs. For multi named
* outputs the name of the counter is the concatenation of the named output, and
* underscore '_' and the multiname.
*
* @param conf job conf to enableadd the named output.
* @param enabled indicates if the counters will be enabled or not.
*/
public static void setCountersEnabled(JobConf conf, boolean enabled) {
conf.setBoolean(COUNTERS_ENABLED, enabled);
}
/**
* Returns if the counters for the named outputs are enabled or not.
* <p/>
* By default these counters are disabled.
* <p/>
* MultipleOutputs supports counters, by default the are disabled. The counters
* group is the {@link AvroMultipleOutputs} class name.
* </p>
* The names of the counters are the same as the named outputs. For multi named
* outputs the name of the counter is the concatenation of the named output, and
* underscore '_' and the multiname.
*
*
* @param conf job conf to enableadd the named output.
* @return TRUE if the counters are enabled, FALSE if they are disabled.
*/
public static boolean getCountersEnabled(JobConf conf) {
return conf.getBoolean(COUNTERS_ENABLED, false);
}
// instance code, to be used from Mapper/Reducer code
private JobConf conf;
private OutputFormat outputFormat;
private Set<String> namedOutputs;
private Map<String, RecordWriter> recordWriters;
private boolean countersEnabled;
/**
* Creates and initializes multiple named outputs support, it should be
* instantiated in the Mapper/Reducer configure method.
*
* @param job the job configuration object
*/
public AvroMultipleOutputs(JobConf job) {
this.conf = job;
outputFormat = new InternalFileOutputFormat();
namedOutputs = Collections.unmodifiableSet(new HashSet<>(AvroMultipleOutputs.getNamedOutputsList(job)));
recordWriters = new HashMap<>();
countersEnabled = getCountersEnabled(job);
}
/**
* Returns iterator with the defined name outputs.
*
* @return iterator with the defined named outputs
*/
public Iterator<String> getNamedOutputs() {
return namedOutputs.iterator();
}
// by being synchronized MultipleOutputTask can be use with a
// MultithreaderMapRunner.
private synchronized RecordWriter getRecordWriter(String namedOutput, String baseFileName, final Reporter reporter,
Schema schema) throws IOException {
RecordWriter writer = recordWriters.get(baseFileName);
if (writer == null) {
if (countersEnabled && reporter == null) {
throw new IllegalArgumentException("Counters are enabled, Reporter cannot be NULL");
}
if (schema != null)
conf.set(MO_PREFIX + namedOutput + ".schema", schema.toString());
JobConf jobConf = new JobConf(conf);
jobConf.set(InternalFileOutputFormat.CONFIG_NAMED_OUTPUT, namedOutput);
FileSystem fs = FileSystem.get(conf);
writer = outputFormat.getRecordWriter(fs, jobConf, baseFileName, reporter);
if (countersEnabled) {
if (reporter == null) {
throw new IllegalArgumentException("Counters are enabled, Reporter cannot be NULL");
}
writer = new RecordWriterWithCounter(writer, baseFileName, reporter);
}
recordWriters.put(baseFileName, writer);
}
return writer;
}
private static class RecordWriterWithCounter implements RecordWriter {
private RecordWriter writer;
private String counterName;
private Reporter reporter;
public RecordWriterWithCounter(RecordWriter writer, String counterName, Reporter reporter) {
this.writer = writer;
this.counterName = counterName;
this.reporter = reporter;
}
@SuppressWarnings({ "unchecked" })
@Override
public void write(Object key, Object value) throws IOException {
reporter.incrCounter(COUNTERS_GROUP, counterName, 1);
writer.write(key, value);
}
@Override
public void close(Reporter reporter) throws IOException {
writer.close(reporter);
}
}
/**
* Output Collector for the default schema.
* <p/>
*
* @param namedOutput the named output name
* @param reporter the reporter
* @param datum output data
* @throws IOException thrown if output collector could not be created
*/
public void collect(String namedOutput, Reporter reporter, Object datum) throws IOException {
getCollector(namedOutput, reporter).collect(datum);
}
/**
* OutputCollector with custom schema.
* <p/>
*
* @param namedOutput the named output name (this will the output file name)
* @param reporter the reporter
* @param datum output data
* @param schema schema to use for this output
* @throws IOException thrown if output collector could not be created
*/
public void collect(String namedOutput, Reporter reporter, Schema schema, Object datum) throws IOException {
getCollector(namedOutput, reporter, schema).collect(datum);
}
/**
* OutputCollector with custom schema and file name.
* <p/>
*
* @param namedOutput the named output name
* @param reporter the reporter
* @param baseOutputPath outputfile name to use.
* @param datum output data
* @param schema schema to use for this output
* @throws IOException thrown if output collector could not be created
*/
public void collect(String namedOutput, Reporter reporter, Schema schema, Object datum, String baseOutputPath)
throws IOException {
getCollector(namedOutput, null, reporter, baseOutputPath, schema).collect(datum);
}
/**
* Gets the output collector for a named output.
* <p/>
*
* @param namedOutput the named output name
* @param reporter the reporter
* @return the output collector for the given named output
* @throws IOException thrown if output collector could not be created
* @deprecated Use {@link #collect} method for collecting output
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public AvroCollector getCollector(String namedOutput, Reporter reporter) throws IOException {
return getCollector(namedOutput, null, reporter, namedOutput, null);
}
@SuppressWarnings("rawtypes")
private AvroCollector getCollector(String namedOutput, Reporter reporter, Schema schema) throws IOException {
return getCollector(namedOutput, null, reporter, namedOutput, schema);
}
/**
* Gets the output collector for a named output.
* <p/>
*
* @param namedOutput the named output name
* @param reporter the reporter
* @param multiName the multiname
* @return the output collector for the given named output
* @throws IOException thrown if output collector could not be created
*/
@SuppressWarnings("rawtypes")
public AvroCollector getCollector(String namedOutput, String multiName, Reporter reporter) throws IOException {
return getCollector(namedOutput, multiName, reporter, namedOutput, null);
}
/**
* Gets the output collector for a multi named output.
* <p/>
*
* @param namedOutput the named output name
* @param multiName the multi name part
* @param reporter the reporter
* @return the output collector for the given named output
* @throws IOException thrown if output collector could not be created
*/
@SuppressWarnings({ "unchecked" })
private AvroCollector getCollector(String namedOutput, String multiName, Reporter reporter, String baseOutputFileName,
Schema schema) throws IOException {
checkNamedOutputName(namedOutput);
if (!namedOutputs.contains(namedOutput)) {
throw new IllegalArgumentException("Undefined named output '" + namedOutput + "'");
}
boolean multi = isMultiNamedOutput(conf, namedOutput);
if (!multi && multiName != null) {
throw new IllegalArgumentException("Name output '" + namedOutput + "' has not been defined as multi");
}
if (multi) {
checkTokenName(multiName);
}
String baseFileName = (multi) ? namedOutput + "_" + multiName : baseOutputFileName;
final RecordWriter writer = getRecordWriter(namedOutput, baseFileName, reporter, schema);
return new AvroCollector() {
@SuppressWarnings({ "unchecked" })
@Override
public void collect(Object key) throws IOException {
AvroWrapper wrapper = new AvroWrapper(key);
writer.write(wrapper, NullWritable.get());
}
};
}
/**
* Closes all the opened named outputs.
* <p/>
* If overriden subclasses must invoke <code>super.close()</code> at the end of
* their <code>close()</code>
*
* @throws java.io.IOException thrown if any of the MultipleOutput files could
* not be closed properly.
*/
public void close() throws IOException {
for (RecordWriter writer : recordWriters.values()) {
writer.close(null);
}
}
private static class InternalFileOutputFormat extends FileOutputFormat<Object, Object> {
public static final String CONFIG_NAMED_OUTPUT = "mo.config.namedOutput";
@SuppressWarnings({ "unchecked", "deprecation" })
@Override
public RecordWriter<Object, Object> getRecordWriter(FileSystem fs, JobConf job, String baseFileName,
Progressable arg3) throws IOException {
String nameOutput = job.get(CONFIG_NAMED_OUTPUT, null);
String fileName = getUniqueName(job, baseFileName);
Schema schema = null;
String schemastr = job.get(MO_PREFIX + nameOutput + ".schema", null);
if (schemastr != null)
schema = Schema.parse(schemastr);
JobConf outputConf = new JobConf(job);
outputConf.setOutputFormat(getNamedOutputFormatClass(job, nameOutput));
boolean isMapOnly = job.getNumReduceTasks() == 0;
if (schema != null) {
if (isMapOnly)
AvroJob.setMapOutputSchema(outputConf, schema);
else
AvroJob.setOutputSchema(outputConf, schema);
}
OutputFormat outputFormat = outputConf.getOutputFormat();
return outputFormat.getRecordWriter(fs, outputConf, fileName, arg3);
}
}
}
| 7,021 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/SequenceFileReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import java.io.File;
import java.nio.ByteBuffer;
import java.util.Iterator;
import java.util.Map;
import java.util.HashMap;
import java.util.NoSuchElementException;
import java.net.URI;
import java.lang.reflect.Type;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.avro.Schema;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.file.FileReader;
import org.apache.avro.reflect.ReflectData;
/** A {@link FileReader} for sequence files. */
@SuppressWarnings(value = "unchecked")
public class SequenceFileReader<K, V> implements FileReader<Pair<K, V>> {
private SequenceFile.Reader reader;
private Schema schema;
private boolean ready = false; // true iff done & key are current
private boolean done = false; // true iff at EOF
private Writable key, spareKey, value;
private Converter<K> keyConverter = o -> (K) o;
private Converter<V> valConverter = o -> (V) o;
public SequenceFileReader(File file) throws IOException {
this(file.toURI(), new Configuration());
}
public SequenceFileReader(URI uri, Configuration c) throws IOException {
this(new SequenceFile.Reader(FileSystem.get(uri, c), new Path(uri.toString()), c), c);
}
public SequenceFileReader(SequenceFile.Reader reader, Configuration conf) {
this.reader = reader;
this.schema = Pair.getPairSchema(WritableData.get().getSchema(reader.getKeyClass()),
WritableData.get().getSchema(reader.getValueClass()));
this.key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
this.spareKey = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
this.value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
if (WRITABLE_CONVERTERS.containsKey(reader.getKeyClass()))
keyConverter = WRITABLE_CONVERTERS.get(reader.getKeyClass());
if (WRITABLE_CONVERTERS.containsKey(reader.getValueClass()))
valConverter = WRITABLE_CONVERTERS.get(reader.getValueClass());
}
@Override
public void close() throws IOException {
reader.close();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public Iterator<Pair<K, V>> iterator() {
return this;
}
@Override
public Schema getSchema() {
return schema;
}
private void prepare() throws IOException {
if (ready)
return;
this.done = !reader.next(key);
ready = true;
}
@Override
public boolean hasNext() {
try {
prepare();
return !done;
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
}
@Override
public Pair<K, V> next() {
try {
return next(null);
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
}
@Override
public Pair<K, V> next(Pair<K, V> reuse) throws IOException {
prepare();
if (!hasNext())
throw new NoSuchElementException();
Pair<K, V> result = reuse;
if (result == null)
result = new Pair<>(schema);
result.key(keyConverter.convert(key));
reader.getCurrentValue(value);
result.value(valConverter.convert(value));
// swap key and spareKey
Writable k = key;
key = spareKey;
spareKey = k;
ready = false;
return result;
}
@Override
public void sync(long position) throws IOException {
if (position > reader.getPosition())
reader.sync(position);
ready = false;
}
@Override
public boolean pastSync(long position) throws IOException {
return reader.getPosition() >= position && reader.syncSeen();
}
@Override
public long tell() throws IOException {
return reader.getPosition();
}
private static final Map<Type, Schema> WRITABLE_SCHEMAS = new HashMap<>();
static {
WRITABLE_SCHEMAS.put(NullWritable.class, Schema.create(Schema.Type.NULL));
WRITABLE_SCHEMAS.put(BooleanWritable.class, Schema.create(Schema.Type.BOOLEAN));
WRITABLE_SCHEMAS.put(IntWritable.class, Schema.create(Schema.Type.INT));
WRITABLE_SCHEMAS.put(LongWritable.class, Schema.create(Schema.Type.LONG));
WRITABLE_SCHEMAS.put(FloatWritable.class, Schema.create(Schema.Type.FLOAT));
WRITABLE_SCHEMAS.put(DoubleWritable.class, Schema.create(Schema.Type.DOUBLE));
WRITABLE_SCHEMAS.put(BytesWritable.class, Schema.create(Schema.Type.BYTES));
WRITABLE_SCHEMAS.put(Text.class, Schema.create(Schema.Type.STRING));
}
private static class WritableData extends ReflectData {
private static final WritableData INSTANCE = new WritableData();
protected WritableData() {
}
/** Return the singleton instance. */
public static WritableData get() {
return INSTANCE;
}
@Override
public Schema getSchema(java.lang.reflect.Type type) {
if (WRITABLE_SCHEMAS.containsKey(type))
return WRITABLE_SCHEMAS.get(type);
else
return super.getSchema(type);
}
}
private interface Converter<T> {
T convert(Writable o);
}
private static final Map<Type, Converter> WRITABLE_CONVERTERS = new HashMap<>();
static {
WRITABLE_CONVERTERS.put(NullWritable.class, (Converter<Void>) o -> null);
WRITABLE_CONVERTERS.put(BooleanWritable.class, (Converter<Boolean>) o -> ((BooleanWritable) o).get());
WRITABLE_CONVERTERS.put(IntWritable.class, (Converter<Integer>) o -> ((IntWritable) o).get());
WRITABLE_CONVERTERS.put(LongWritable.class, (Converter<Long>) o -> ((LongWritable) o).get());
WRITABLE_CONVERTERS.put(FloatWritable.class, (Converter<Float>) o -> ((FloatWritable) o).get());
WRITABLE_CONVERTERS.put(DoubleWritable.class, (Converter<Double>) o -> ((DoubleWritable) o).get());
WRITABLE_CONVERTERS.put(BytesWritable.class, (Converter<ByteBuffer>) o -> {
BytesWritable b = (BytesWritable) o;
return ByteBuffer.wrap(b.getBytes(), 0, b.getLength());
});
WRITABLE_CONVERTERS.put(Text.class, (Converter<String>) Object::toString);
}
}
| 7,022 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
/** The wrapper of keys for jobs configured with {@link AvroJob} . */
public class AvroKey<T> extends AvroWrapper<T> {
/** Wrap null. Construct {@link AvroKey} wrapping no key. */
public AvroKey() {
this(null);
}
/** Wrap a key. */
public AvroKey(T datum) {
super(datum);
}
}
| 7,023 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/TaggedInputSplit.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.util.ReflectionUtils;
/**
* An {@link InputSplit} that tags another InputSplit with extra data for use by
* {@link DelegatingInputFormat}s and {@link DelegatingMapper}s.
*/
class TaggedInputSplit implements Configurable, InputSplit {
private Class<? extends InputSplit> inputSplitClass;
private InputSplit inputSplit;
private Class<? extends InputFormat> inputFormatClass;
private Class<? extends AvroMapper> mapperClass;
private Schema schema;
private Schema.Parser schemaParser = new Schema.Parser();
private Configuration conf;
public TaggedInputSplit() {
// Default constructor.
}
/**
* Creates a new TaggedInputSplit.
*
* @param inputSplit The InputSplit to be tagged
* @param conf The configuration to use
* @param inputFormatClass The InputFormat class to use for this job
* @param mapperClass The Mapper class to use for this job
*/
public TaggedInputSplit(InputSplit inputSplit, Configuration conf, Class<? extends InputFormat> inputFormatClass,
Class<? extends AvroMapper> mapperClass, Schema inputSchema) {
this.inputSplitClass = inputSplit.getClass();
this.inputSplit = inputSplit;
this.conf = conf;
this.inputFormatClass = inputFormatClass;
this.mapperClass = mapperClass;
this.schema = inputSchema;
}
/**
* Retrieves the original InputSplit.
*
* @return The InputSplit that was tagged
*/
public InputSplit getInputSplit() {
return inputSplit;
}
/**
* Retrieves the InputFormat class to use for this split.
*
* @return The InputFormat class to use
*/
public Class<? extends InputFormat> getInputFormatClass() {
return inputFormatClass;
}
/**
* Retrieves the Mapper class to use for this split.
*
* @return The Mapper class to use
*/
public Class<? extends AvroMapper> getMapperClass() {
return mapperClass;
}
/**
* Retrieves the Schema to use for this split.
*
* @return The schema for record readers to use
*/
public Schema getSchema() {
return schema;
}
@Override
public long getLength() throws IOException {
return inputSplit.getLength();
}
@Override
public String[] getLocations() throws IOException {
return inputSplit.getLocations();
}
@SuppressWarnings("unchecked")
@Override
public void readFields(DataInput in) throws IOException {
inputSplitClass = (Class<? extends InputSplit>) readClass(in);
inputSplit = ReflectionUtils.newInstance(inputSplitClass, conf);
inputSplit.readFields(in);
inputFormatClass = (Class<? extends InputFormat>) readClass(in);
mapperClass = (Class<? extends AvroMapper>) readClass(in);
String schemaString = Text.readString(in);
schema = schemaParser.parse(schemaString);
}
private Class<?> readClass(DataInput in) throws IOException {
String className = Text.readString(in);
try {
return conf.getClassByName(className);
} catch (ClassNotFoundException e) {
throw new RuntimeException("readObject can't find class", e);
}
}
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, inputSplitClass.getName());
inputSplit.write(out);
Text.writeString(out, inputFormatClass.getName());
Text.writeString(out, mapperClass.getName());
Text.writeString(out, schema.toString());
}
@Override
public Configuration getConf() {
return conf;
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public String toString() {
return inputSplit.toString();
}
}
| 7,024 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroValue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
/** The wrapper of values for jobs configured with {@link AvroJob} . */
public class AvroValue<T> extends AvroWrapper<T> {
/** Wrap null. Construct {@link AvroValue} wrapping no value. */
public AvroValue() {
this(null);
}
/** Wrap a value. */
public AvroValue(T datum) {
super(datum);
}
}
| 7,025 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/SequenceFileInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.RecordReader;
/** An {@link org.apache.hadoop.mapred.InputFormat} for sequence files. */
public class SequenceFileInputFormat<K, V> extends FileInputFormat<AvroWrapper<Pair<K, V>>, NullWritable> {
@Override
public RecordReader<AvroWrapper<Pair<K, V>>, NullWritable> getRecordReader(InputSplit split, JobConf job,
Reporter reporter) throws IOException {
reporter.setStatus(split.toString());
return new SequenceFileRecordReader<>(job, (FileSplit) split);
}
}
| 7,026 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroMultipleInputs.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.util.Base64;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.SchemaParseException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class supports Avro-MapReduce jobs that have multiple input paths with a
* different {@link Schema} and {@link AvroMapper} for each path.
*
* <p>
* Usage:
* </p>
* <p>
* <strong>Case 1: (ReflectData based inputs)</strong>
* </p>
*
* <pre>
* // Enable ReflectData usage across job.
* AvroJob.setReflect(job);
*
* Schema type1Schema = ReflectData.get().getSchema(Type1Record.class)
* AvroMultipleInputs.addInputPath(job, inputPath1, type1Schema, Type1AvroMapper.class);
* </pre>
*
* Where Type1AvroMapper would be implemented as
*
* <pre>
* class Type1AvroMapper extends AvroMapper<Type1Record, Pair<ComparingKeyRecord, CommonValueRecord>>
* </pre>
*
* <pre>
* Schema type2Schema = ReflectData.get().getSchema(Type2Record.class)
* AvroMultipleInputs.addInputPath(job, inputPath2, type2Schema, Type2AvroMapper.class);
* </pre>
*
* Where Type2AvroMapper would be implemented as
*
* <pre>
* class Type2AvroMapper extends AvroMapper<Type2Record, Pair<ComparingKeyRecord, CommonValueRecord>>
* </pre>
*
* <p>
* <strong>Case 2: (SpecificData based inputs)</strong>
* </p>
*
* <pre>
* Schema type1Schema = Type1Record.SCHEMA$;
* AvroMultipleInputs.addInputPath(job, inputPath1, type1Schema, Type1AvroMapper.class);
* </pre>
*
* Where Type1AvroMapper would be implemented as
*
* <pre>
* class Type1AvroMapper extends AvroMapper<Type1Record, Pair<ComparingKeyRecord, CommonValueRecord>>
* </pre>
*
* <pre>
* Schema type2Schema = Type2Record.SCHEMA$;
* AvroMultipleInputs.addInputPath(job, inputPath2, type2Schema, Type2AvroMapper.class);
* </pre>
*
* Where Type2AvroMapper would be implemented as
*
* <pre>
* class Type2AvroMapper extends AvroMapper<Type2Record, Pair<ComparingKeyRecord, CommonValueRecord>>
* </pre>
*
* <p>
* <strong>Note on InputFormat:</strong> The InputFormat used will always be
* {@link AvroInputFormat} when using this class.
* </p>
* <p>
* <strong>Note on collector outputs:</strong> When using this class, you will
* need to ensure that the mapper implementations involved must all emit the
* same Key type and Value record types, as set by
* {@link AvroJob#setOutputSchema(JobConf, Schema)} or
* {@link AvroJob#setMapOutputSchema(JobConf, Schema)}.
* </p>
*/
public class AvroMultipleInputs {
private static final Logger LOG = LoggerFactory.getLogger(AvroMultipleInputs.class);
private static final String SCHEMA_KEY = "avro.mapreduce.input.multipleinputs.dir.schemas";
private static final String MAPPERS_KEY = "avro.mapreduce.input.multipleinputs.dir.mappers";
/**
* Add a {@link Path} with a custom {@link Schema} to the list of inputs for the
* map-reduce job.
*
* @param conf The configuration of the job
* @param path {@link Path} to be added to the list of inputs for the job
* @param inputSchema {@link Schema} class to use for this path
*/
private static void addInputPath(JobConf conf, Path path, Schema inputSchema) {
String schemaMapping = path.toString() + ";" + toBase64(inputSchema.toString());
String schemas = conf.get(SCHEMA_KEY);
conf.set(SCHEMA_KEY, schemas == null ? schemaMapping : schemas + "," + schemaMapping);
conf.setInputFormat(DelegatingInputFormat.class);
}
/**
* Add a {@link Path} with a custom {@link Schema} and {@link AvroMapper} to the
* list of inputs for the map-reduce job.
*
* @param conf The configuration of the job
* @param path {@link Path} to be added to the list of inputs for the job
* @param inputSchema {@link Schema} to use for this path
* @param mapperClass {@link AvroMapper} class to use for this path
*/
public static void addInputPath(JobConf conf, Path path, Class<? extends AvroMapper> mapperClass,
Schema inputSchema) {
addInputPath(conf, path, inputSchema);
String mapperMapping = path.toString() + ";" + mapperClass.getName();
LOG.info(mapperMapping);
String mappers = conf.get(MAPPERS_KEY);
conf.set(MAPPERS_KEY, mappers == null ? mapperMapping : mappers + "," + mapperMapping);
conf.setMapperClass(DelegatingMapper.class);
}
/**
* Retrieves a map of {@link Path}s to the {@link AvroMapper} class that should
* be used for them.
*
* @param conf The configuration of the job
* @see #addInputPath(JobConf, Path, Class, Schema)
* @return A map of paths-to-mappers for the job
*/
@SuppressWarnings("unchecked")
static Map<Path, Class<? extends AvroMapper>> getMapperTypeMap(JobConf conf) {
if (conf.get(MAPPERS_KEY) == null) {
return Collections.emptyMap();
}
Map<Path, Class<? extends AvroMapper>> m = new HashMap<>();
String[] pathMappings = conf.get(MAPPERS_KEY).split(",");
for (String pathMapping : pathMappings) {
String[] split = pathMapping.split(";");
Class<? extends AvroMapper> mapClass;
try {
mapClass = (Class<? extends AvroMapper>) conf.getClassByName(split[1]);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
m.put(new Path(split[0]), mapClass);
}
return m;
}
/**
* Retrieves a map of {@link Path}s to the {@link Schema} that should be used
* for them.
*
* @param conf The configuration of the job
* @see #addInputPath(JobConf, Path, Class, Schema)
* @return A map of paths to schemas for the job
*/
static Map<Path, Schema> getInputSchemaMap(JobConf conf) {
if (conf.get(SCHEMA_KEY) == null) {
return Collections.emptyMap();
}
Map<Path, Schema> m = new HashMap<>();
String[] schemaMappings = conf.get(SCHEMA_KEY).split(",");
Schema.Parser schemaParser = new Schema.Parser();
for (String schemaMapping : schemaMappings) {
String[] split = schemaMapping.split(";");
String schemaString = fromBase64(split[1]);
Schema inputSchema;
try {
inputSchema = schemaParser.parse(schemaString);
} catch (SchemaParseException e) {
throw new RuntimeException(e);
}
m.put(new Path(split[0]), inputSchema);
}
return m;
}
private static String toBase64(String rawString) {
final byte[] buf = rawString.getBytes(UTF_8);
return new String(Base64.getMimeEncoder().encode(buf), UTF_8);
}
private static String fromBase64(String base64String) {
final byte[] buf = base64String.getBytes(UTF_8);
return new String(Base64.getMimeDecoder().decode(buf), UTF_8);
}
}
| 7,027 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.Map;
import java.net.URLDecoder;
import java.nio.charset.StandardCharsets;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.util.Progressable;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.generic.GenericData;
import org.apache.avro.hadoop.file.HadoopCodecFactory;
import static org.apache.avro.file.DataFileConstants.DEFAULT_SYNC_INTERVAL;
import static org.apache.avro.file.DataFileConstants.DEFLATE_CODEC;
import static org.apache.avro.file.DataFileConstants.XZ_CODEC;
import static org.apache.avro.file.DataFileConstants.ZSTANDARD_CODEC;
import static org.apache.avro.file.CodecFactory.DEFAULT_DEFLATE_LEVEL;
import static org.apache.avro.file.CodecFactory.DEFAULT_XZ_LEVEL;
import static org.apache.avro.file.CodecFactory.DEFAULT_ZSTANDARD_LEVEL;
import static org.apache.avro.file.CodecFactory.DEFAULT_ZSTANDARD_BUFFERPOOL;
/**
* An {@link org.apache.hadoop.mapred.OutputFormat} for Avro data files.
* <p/>
* You can specify various options using Job Configuration properties. Look at
* the fields in {@link AvroJob} as well as this class to get an overview of the
* supported options.
*/
public class AvroOutputFormat<T> extends FileOutputFormat<AvroWrapper<T>, NullWritable> {
/** The file name extension for avro data files. */
public final static String EXT = ".avro";
/** The configuration key for Avro deflate level. */
public static final String DEFLATE_LEVEL_KEY = "avro.mapred.deflate.level";
/** The configuration key for Avro XZ level. */
public static final String XZ_LEVEL_KEY = "avro.mapred.xz.level";
/** The configuration key for Avro ZSTD level. */
public static final String ZSTD_LEVEL_KEY = "avro.mapred.zstd.level";
/** The configuration key for Avro ZSTD buffer pool. */
public static final String ZSTD_BUFFERPOOL_KEY = "avro.mapred.zstd.bufferpool";
/** The configuration key for Avro sync interval. */
public static final String SYNC_INTERVAL_KEY = "avro.mapred.sync.interval";
/** Enable output compression using the deflate codec and specify its level. */
public static void setDeflateLevel(JobConf job, int level) {
FileOutputFormat.setCompressOutput(job, true);
job.setInt(DEFLATE_LEVEL_KEY, level);
}
/**
* Set the sync interval to be used by the underlying {@link DataFileWriter}.
*/
public static void setSyncInterval(JobConf job, int syncIntervalInBytes) {
job.setInt(SYNC_INTERVAL_KEY, syncIntervalInBytes);
}
static <T> void configureDataFileWriter(DataFileWriter<T> writer, JobConf job) throws UnsupportedEncodingException {
CodecFactory factory = getCodecFactory(job);
if (factory != null) {
writer.setCodec(factory);
}
writer.setSyncInterval(job.getInt(SYNC_INTERVAL_KEY, DEFAULT_SYNC_INTERVAL));
// copy metadata from job
for (Map.Entry<String, String> e : job) {
if (e.getKey().startsWith(AvroJob.TEXT_PREFIX))
writer.setMeta(e.getKey().substring(AvroJob.TEXT_PREFIX.length()), e.getValue());
if (e.getKey().startsWith(AvroJob.BINARY_PREFIX))
writer.setMeta(e.getKey().substring(AvroJob.BINARY_PREFIX.length()),
URLDecoder.decode(e.getValue(), StandardCharsets.ISO_8859_1.name()).getBytes(StandardCharsets.ISO_8859_1));
}
}
/**
* This will select the correct compression codec from the JobConf. The order of
* selection is as follows:
* <ul>
* <li>If mapred.output.compress is true then look for codec otherwise no
* compression</li>
* <li>Use avro.output.codec if populated</li>
* <li>Next use mapred.output.compression.codec if populated</li>
* <li>If not default to Deflate Codec</li>
* </ul>
*/
static CodecFactory getCodecFactory(JobConf job) {
CodecFactory factory = null;
if (FileOutputFormat.getCompressOutput(job)) {
int deflateLevel = job.getInt(DEFLATE_LEVEL_KEY, DEFAULT_DEFLATE_LEVEL);
int xzLevel = job.getInt(XZ_LEVEL_KEY, DEFAULT_XZ_LEVEL);
int zstdLevel = job.getInt(ZSTD_LEVEL_KEY, DEFAULT_ZSTANDARD_LEVEL);
boolean zstdBufferPool = job.getBoolean(ZSTD_BUFFERPOOL_KEY, DEFAULT_ZSTANDARD_BUFFERPOOL);
String codecName = job.get(AvroJob.OUTPUT_CODEC);
if (codecName == null) {
String codecClassName = job.get("mapred.output.compression.codec", null);
String avroCodecName = HadoopCodecFactory.getAvroCodecName(codecClassName);
if (codecClassName != null && avroCodecName != null) {
factory = HadoopCodecFactory.fromHadoopString(codecClassName);
job.set(AvroJob.OUTPUT_CODEC, avroCodecName);
return factory;
} else {
return CodecFactory.deflateCodec(deflateLevel);
}
} else {
if (codecName.equals(DEFLATE_CODEC)) {
factory = CodecFactory.deflateCodec(deflateLevel);
} else if (codecName.equals(XZ_CODEC)) {
factory = CodecFactory.xzCodec(xzLevel);
} else if (codecName.equals(ZSTANDARD_CODEC)) {
factory = CodecFactory.zstandardCodec(zstdLevel, false, zstdBufferPool);
} else {
factory = CodecFactory.fromString(codecName);
}
}
}
return factory;
}
@Override
public RecordWriter<AvroWrapper<T>, NullWritable> getRecordWriter(FileSystem ignore, JobConf job, String name,
Progressable prog) throws IOException {
boolean isMapOnly = job.getNumReduceTasks() == 0;
Schema schema = isMapOnly ? AvroJob.getMapOutputSchema(job) : AvroJob.getOutputSchema(job);
GenericData dataModel = AvroJob.createDataModel(job);
final DataFileWriter<T> writer = new DataFileWriter<T>(dataModel.createDatumWriter(null));
configureDataFileWriter(writer, job);
Path path = FileOutputFormat.getTaskOutputPath(job, name + EXT);
writer.create(schema, path.getFileSystem(job).create(path));
return new RecordWriter<AvroWrapper<T>, NullWritable>() {
@Override
public void write(AvroWrapper<T> wrapper, NullWritable ignore) throws IOException {
writer.append(wrapper.datum());
}
@Override
public void close(Reporter reporter) throws IOException {
writer.close();
}
};
}
}
| 7,028 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroRecordReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.avro.file.FileReader;
import org.apache.avro.file.DataFileReader;
/** An {@link RecordReader} for Avro data files. */
public class AvroRecordReader<T> implements RecordReader<AvroWrapper<T>, NullWritable> {
private FileReader<T> reader;
private long start;
private long end;
public AvroRecordReader(JobConf job, FileSplit split) throws IOException {
this(DataFileReader.openReader(new FsInput(split.getPath(), job),
AvroJob.createInputDataModel(job).createDatumReader(AvroJob.getInputSchema(job))), split);
}
protected AvroRecordReader(FileReader<T> reader, FileSplit split) throws IOException {
this.reader = reader;
reader.sync(split.getStart()); // sync to start
this.start = reader.tell();
this.end = split.getStart() + split.getLength();
}
@Override
public AvroWrapper<T> createKey() {
return new AvroWrapper<>(null);
}
@Override
public NullWritable createValue() {
return NullWritable.get();
}
@Override
public boolean next(AvroWrapper<T> wrapper, NullWritable ignore) throws IOException {
if (!reader.hasNext() || reader.pastSync(end))
return false;
wrapper.datum(reader.next(wrapper.datum()));
return true;
}
@Override
public float getProgress() throws IOException {
if (end == start) {
return 0.0f;
} else {
return Math.min(1.0f, (getPos() - start) / (float) (end - start));
}
}
@Override
public long getPos() throws IOException {
return reader.tell();
}
@Override
public void close() throws IOException {
reader.close();
}
}
| 7,029 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroAsTextInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
/**
* An {@link org.apache.hadoop.mapred.InputFormat} for Avro data files, which
* converts each datum to string form in the input key. The input value is
* always empty. The string representation is
* <a href="https://www.json.org/">JSON</a>.
* <p>
* This {@link org.apache.hadoop.mapred.InputFormat} is useful for applications
* that wish to process Avro data using tools like MapReduce Streaming.
*
* By default, when pointed at a directory, this will silently skip over any
* files in it that do not have .avro extension. To instead include all files,
* set the avro.mapred.ignore.inputs.without.extension property to false.
*/
public class AvroAsTextInputFormat extends FileInputFormat<Text, Text> {
@Override
protected FileStatus[] listStatus(JobConf job) throws IOException {
if (job.getBoolean(AvroInputFormat.IGNORE_FILES_WITHOUT_EXTENSION_KEY,
AvroInputFormat.IGNORE_INPUTS_WITHOUT_EXTENSION_DEFAULT)) {
List<FileStatus> result = new ArrayList<>();
for (FileStatus file : super.listStatus(job))
if (file.getPath().getName().endsWith(AvroOutputFormat.EXT))
result.add(file);
return result.toArray(new FileStatus[0]);
} else {
return super.listStatus(job);
}
}
@Override
public RecordReader<Text, Text> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException {
reporter.setStatus(split.toString());
return new AvroAsTextRecordReader(job, (FileSplit) split);
}
}
| 7,030 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroCollector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import org.apache.hadoop.conf.Configured;
/** A collector for map and reduce output. */
public abstract class AvroCollector<T> extends Configured {
public abstract void collect(T datum) throws IOException;
}
| 7,031 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/HadoopReducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Bridge between a {@link org.apache.hadoop.mapred.Reducer} and an
* {@link AvroReducer}.
*/
class HadoopReducer<K, V, OUT> extends HadoopReducerBase<K, V, OUT, AvroWrapper<OUT>, NullWritable> {
@Override
@SuppressWarnings("unchecked")
protected AvroReducer<K, V, OUT> getReducer(JobConf conf) {
return ReflectionUtils.newInstance(conf.getClass(AvroJob.REDUCER, AvroReducer.class, AvroReducer.class), conf);
}
private class ReduceCollector extends AvroCollector<OUT> {
private final AvroWrapper<OUT> wrapper = new AvroWrapper<>(null);
private OutputCollector<AvroWrapper<OUT>, NullWritable> out;
public ReduceCollector(OutputCollector<AvroWrapper<OUT>, NullWritable> out) {
this.out = out;
}
@Override
public void collect(OUT datum) throws IOException {
wrapper.datum(datum);
out.collect(wrapper, NullWritable.get());
}
}
@Override
protected AvroCollector<OUT> getCollector(OutputCollector<AvroWrapper<OUT>, NullWritable> collector) {
return new ReduceCollector(collector);
}
}
| 7,032 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.RecordReader;
/**
* An {@link org.apache.hadoop.mapred.InputFormat} for Avro data files.
*
* By default, when pointed at a directory, this will silently skip over any
* files in it that do not have .avro extension. To instead include all files,
* set the avro.mapred.ignore.inputs.without.extension property to false.
*/
public class AvroInputFormat<T> extends FileInputFormat<AvroWrapper<T>, NullWritable> {
/** Whether to silently ignore input files without the .avro extension */
public static final String IGNORE_FILES_WITHOUT_EXTENSION_KEY = "avro.mapred.ignore.inputs.without.extension";
/**
* Default of whether to silently ignore input files without the .avro
* extension.
*/
public static final boolean IGNORE_INPUTS_WITHOUT_EXTENSION_DEFAULT = true;
@Override
protected FileStatus[] listStatus(JobConf job) throws IOException {
FileStatus[] status = super.listStatus(job);
if (job.getBoolean(IGNORE_FILES_WITHOUT_EXTENSION_KEY, IGNORE_INPUTS_WITHOUT_EXTENSION_DEFAULT)) {
List<FileStatus> result = new ArrayList<>(status.length);
for (FileStatus file : status)
if (file.getPath().getName().endsWith(AvroOutputFormat.EXT))
result.add(file);
status = result.toArray(new FileStatus[0]);
}
return status;
}
@Override
public RecordReader<AvroWrapper<T>, NullWritable> getRecordReader(InputSplit split, JobConf job, Reporter reporter)
throws IOException {
reporter.setStatus(split.toString());
return new AvroRecordReader<>(job, (FileSplit) split);
}
}
| 7,033 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroReducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobConfigurable;
import org.apache.hadoop.mapred.Reporter;
/**
* A reducer for Avro data.
*
* <p>
* Applications should subclass this class and pass their subclass to
* {@link AvroJob#setReducerClass(JobConf, Class)} and perhaps
* {@link AvroJob#setCombinerClass(JobConf, Class)}. Subclasses override
* {@link #reduce(Object, Iterable, AvroCollector, Reporter)}.
*/
public class AvroReducer<K, V, OUT> extends Configured implements JobConfigurable, Closeable {
private Pair<K, V> outputPair;
/**
* Called with all map output values with a given key. By default, pairs key
* with each value, collecting {@link Pair} instances.
*/
@SuppressWarnings("unchecked")
public void reduce(K key, Iterable<V> values, AvroCollector<OUT> collector, Reporter reporter) throws IOException {
if (outputPair == null)
outputPair = new Pair<>(AvroJob.getOutputSchema(getConf()));
for (V value : values) {
outputPair.set(key, value);
collector.collect((OUT) outputPair);
}
}
/** Subclasses can override this as desired. */
@Override
public void close() throws IOException {
// no op
}
/** Subclasses can override this as desired. */
@Override
public void configure(JobConf jobConf) {
// no op
}
}
| 7,034 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroJob.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.util.Collection;
import java.lang.reflect.Constructor;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.io.UnsupportedEncodingException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.reflect.ReflectData;
import org.apache.avro.specific.SpecificData;
/** Setters to configure jobs for Avro data. */
public class AvroJob {
private AvroJob() {
} // no public ctor
static final String MAPPER = "avro.mapper";
static final String COMBINER = "avro.combiner";
static final String REDUCER = "avro.reducer";
/** The configuration key for a job's input schema. */
public static final String INPUT_SCHEMA = "avro.input.schema";
/** The configuration key for a job's intermediate schema. */
public static final String MAP_OUTPUT_SCHEMA = "avro.map.output.schema";
/** The configuration key for a job's output schema. */
public static final String OUTPUT_SCHEMA = "avro.output.schema";
/**
* The configuration key for a job's output compression codec. This takes one of
* the strings registered in {@link org.apache.avro.file.CodecFactory}
*/
public static final String OUTPUT_CODEC = "avro.output.codec";
/** The configuration key prefix for a text output metadata. */
public static final String TEXT_PREFIX = "avro.meta.text.";
/** The configuration key prefix for a binary output metadata. */
public static final String BINARY_PREFIX = "avro.meta.binary.";
/** The configuration key for reflection-based input representation. */
public static final String INPUT_IS_REFLECT = "avro.input.is.reflect";
/** The configuration key for reflection-based map output representation. */
public static final String MAP_OUTPUT_IS_REFLECT = "avro.map.output.is.reflect";
/** The configuration key for the data model implementation class. */
private static final String CONF_DATA_MODEL = "avro.serialization.data.model";
/** Configure a job's map input schema. */
public static void setInputSchema(JobConf job, Schema s) {
job.set(INPUT_SCHEMA, s.toString());
configureAvroInput(job);
}
/** Return a job's map input schema. */
public static Schema getInputSchema(Configuration job) {
String schemaString = job.get(INPUT_SCHEMA);
return schemaString != null ? new Schema.Parser().parse(schemaString) : null;
}
/**
* Configure a job's map output schema. The map output schema defaults to the
* output schema and need only be specified when it differs. Thus must be a
* {@link Pair} schema.
*/
public static void setMapOutputSchema(JobConf job, Schema s) {
job.set(MAP_OUTPUT_SCHEMA, s.toString());
configureAvroShuffle(job);
}
/** Return a job's map output key schema. */
public static Schema getMapOutputSchema(Configuration job) {
return new Schema.Parser().parse(job.get(MAP_OUTPUT_SCHEMA, job.get(OUTPUT_SCHEMA)));
}
/**
* Configure a job's output schema. Unless this is a map-only job, this must be
* a {@link Pair} schema.
*/
public static void setOutputSchema(JobConf job, Schema s) {
job.set(OUTPUT_SCHEMA, s.toString());
configureAvroOutput(job);
}
/** Configure a job's output compression codec. */
public static void setOutputCodec(JobConf job, String codec) {
job.set(OUTPUT_CODEC, codec);
}
/** Add metadata to job output files. */
public static void setOutputMeta(JobConf job, String key, String value) {
job.set(TEXT_PREFIX + key, value);
}
/** Add metadata to job output files. */
public static void setOutputMeta(JobConf job, String key, long value) {
job.set(TEXT_PREFIX + key, Long.toString(value));
}
/** Add metadata to job output files. */
public static void setOutputMeta(JobConf job, String key, byte[] value) {
try {
job.set(BINARY_PREFIX + key,
URLEncoder.encode(new String(value, StandardCharsets.ISO_8859_1), StandardCharsets.ISO_8859_1.name()));
} catch (UnsupportedEncodingException e) {
}
}
/** Indicate that a job's input files are in SequenceFile format. */
public static void setInputSequenceFile(JobConf job) {
job.setInputFormat(SequenceFileInputFormat.class);
}
/** Indicate that all a job's data should use the reflect representation. */
public static void setReflect(JobConf job) {
setInputReflect(job);
setMapOutputReflect(job);
}
/** Indicate that a job's input data should use reflect representation. */
public static void setInputReflect(JobConf job) {
job.setBoolean(INPUT_IS_REFLECT, true);
}
/** Indicate that a job's map output data should use reflect representation. */
public static void setMapOutputReflect(JobConf job) {
job.setBoolean(MAP_OUTPUT_IS_REFLECT, true);
}
/** Return a job's output key schema. */
public static Schema getOutputSchema(Configuration job) {
return new Schema.Parser().parse(job.get(OUTPUT_SCHEMA));
}
private static void configureAvroInput(JobConf job) {
if (job.get("mapred.input.format.class") == null)
job.setInputFormat(AvroInputFormat.class);
if (job.getMapperClass() == IdentityMapper.class)
job.setMapperClass(HadoopMapper.class);
configureAvroShuffle(job);
}
private static void configureAvroOutput(JobConf job) {
if (job.get("mapred.output.format.class") == null)
job.setOutputFormat(AvroOutputFormat.class);
if (job.getReducerClass() == IdentityReducer.class)
job.setReducerClass(HadoopReducer.class);
job.setOutputKeyClass(AvroWrapper.class);
configureAvroShuffle(job);
}
private static void configureAvroShuffle(JobConf job) {
job.setOutputKeyComparatorClass(AvroKeyComparator.class);
job.setMapOutputKeyClass(AvroKey.class);
job.setMapOutputValueClass(AvroValue.class);
// add AvroSerialization to io.serializations
Collection<String> serializations = job.getStringCollection("io.serializations");
if (!serializations.contains(AvroSerialization.class.getName())) {
serializations.add(AvroSerialization.class.getName());
job.setStrings("io.serializations", serializations.toArray(new String[0]));
}
}
/** Configure a job's mapper implementation. */
public static void setMapperClass(JobConf job, Class<? extends AvroMapper> c) {
job.set(MAPPER, c.getName());
}
/** Configure a job's combiner implementation. */
public static void setCombinerClass(JobConf job, Class<? extends AvroReducer> c) {
job.set(COMBINER, c.getName());
job.setCombinerClass(HadoopCombiner.class);
}
/** Configure a job's reducer implementation. */
public static void setReducerClass(JobConf job, Class<? extends AvroReducer> c) {
job.set(REDUCER, c.getName());
}
/** Configure a job's data model implementation class. */
public static void setDataModelClass(JobConf job, Class<? extends GenericData> modelClass) {
job.setClass(CONF_DATA_MODEL, modelClass, GenericData.class);
}
/** Return the job's data model implementation class. */
public static Class<? extends GenericData> getDataModelClass(Configuration conf) {
return conf.getClass(CONF_DATA_MODEL, ReflectData.class, GenericData.class);
}
private static GenericData newDataModelInstance(Class<? extends GenericData> modelClass, Configuration conf) {
GenericData dataModel;
try {
Constructor<? extends GenericData> ctor = modelClass.getDeclaredConstructor(ClassLoader.class);
ctor.setAccessible(true);
dataModel = ctor.newInstance(conf.getClassLoader());
} catch (Exception e) {
throw new RuntimeException(e);
}
ReflectionUtils.setConf(dataModel, conf);
return dataModel;
}
public static GenericData createDataModel(Configuration conf) {
return newDataModelInstance(getDataModelClass(conf), conf);
}
public static GenericData createInputDataModel(Configuration conf) {
String className = conf.get(CONF_DATA_MODEL, null);
Class<? extends GenericData> modelClass;
if (className != null) {
modelClass = getDataModelClass(conf);
} else if (conf.getBoolean(INPUT_IS_REFLECT, false)) {
modelClass = ReflectData.class;
} else {
modelClass = SpecificData.class;
}
return newDataModelInstance(modelClass, conf);
}
public static GenericData createMapOutputDataModel(Configuration conf) {
String className = conf.get(CONF_DATA_MODEL, null);
Class<? extends GenericData> modelClass;
if (className != null) {
modelClass = getDataModelClass(conf);
} else if (conf.getBoolean(MAP_OUTPUT_IS_REFLECT, false)) {
modelClass = ReflectData.class;
} else {
modelClass = SpecificData.class;
}
return newDataModelInstance(modelClass, conf);
}
}
| 7,035 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/HadoopReducerBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.Reducer;
abstract class HadoopReducerBase<K, V, OUT, KO, VO> extends MapReduceBase
implements Reducer<AvroKey<K>, AvroValue<V>, KO, VO> {
private AvroReducer<K, V, OUT> reducer;
private AvroCollector<OUT> collector;
protected abstract AvroReducer<K, V, OUT> getReducer(JobConf conf);
protected abstract AvroCollector<OUT> getCollector(OutputCollector<KO, VO> c);
@Override
public void configure(JobConf conf) {
this.reducer = getReducer(conf);
}
class ReduceIterable implements Iterable<V>, Iterator<V> {
private Iterator<AvroValue<V>> values;
@Override
public boolean hasNext() {
return values.hasNext();
}
@Override
public V next() {
return values.next().datum();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public Iterator<V> iterator() {
return this;
}
}
private ReduceIterable reduceIterable = new ReduceIterable();
@Override
public final void reduce(AvroKey<K> key, Iterator<AvroValue<V>> values, OutputCollector<KO, VO> out,
Reporter reporter) throws IOException {
if (this.collector == null)
this.collector = getCollector(out);
reduceIterable.values = values;
reducer.reduce(key.datum(), reduceIterable, collector, reporter);
}
@Override
public void close() throws IOException {
this.reducer.close();
}
}
| 7,036 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroAsTextRecordReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.file.FileReader;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
class AvroAsTextRecordReader<T> implements RecordReader<Text, Text> {
private FileReader<T> reader;
private T datum;
private long start;
private long end;
public AvroAsTextRecordReader(JobConf job, FileSplit split) throws IOException {
this(DataFileReader.openReader(new FsInput(split.getPath(), job), new GenericDatumReader<>()), split);
}
protected AvroAsTextRecordReader(FileReader<T> reader, FileSplit split) throws IOException {
this.reader = reader;
reader.sync(split.getStart()); // sync to start
this.start = reader.tell();
this.end = split.getStart() + split.getLength();
}
@Override
public Text createKey() {
return new Text();
}
@Override
public Text createValue() {
return new Text();
}
@Override
public boolean next(Text key, Text ignore) throws IOException {
if (!reader.hasNext() || reader.pastSync(end))
return false;
datum = reader.next(datum);
if (datum instanceof ByteBuffer) {
ByteBuffer b = (ByteBuffer) datum;
if (b.hasArray()) {
int offset = b.arrayOffset();
int start = b.position();
int length = b.remaining();
key.set(b.array(), offset + start, offset + start + length);
} else {
byte[] bytes = new byte[b.remaining()];
b.duplicate().get(bytes);
key.set(bytes);
}
} else {
key.set(GenericData.get().toString(datum));
}
return true;
}
@Override
public float getProgress() throws IOException {
if (end == start) {
return 0.0f;
} else {
return Math.min(1.0f, (getPos() - start) / (float) (end - start));
}
}
@Override
public long getPos() throws IOException {
return reader.tell();
}
@Override
public void close() throws IOException {
reader.close();
}
}
| 7,037 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroWrapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
/** The wrapper of data for jobs configured with {@link AvroJob} . */
public class AvroWrapper<T> {
private T datum;
/** Wrap null. Construct {@link AvroWrapper} wrapping no datum. */
public AvroWrapper() {
this(null);
}
/** Wrap a datum. */
public AvroWrapper(T datum) {
this.datum = datum;
}
/** Return the wrapped datum. */
public T datum() {
return datum;
}
/** Set the wrapped datum. */
public void datum(T datum) {
this.datum = datum;
}
@Override
public int hashCode() {
return (datum == null) ? 0 : datum.hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
AvroWrapper that = (AvroWrapper) obj;
if (this.datum == null) {
return that.datum == null;
} else
return datum.equals(that.datum);
}
/** Get the wrapped datum as JSON. */
@Override
public String toString() {
return datum.toString();
}
}
| 7,038 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/MapCollector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.OutputCollector;
@SuppressWarnings("unchecked")
class MapCollector<OUT, K, V, KO, VO> extends AvroCollector<OUT> {
private final AvroWrapper<OUT> wrapper = new AvroWrapper<>(null);
private final AvroKey<K> keyWrapper = new AvroKey<>(null);
private final AvroValue<V> valueWrapper = new AvroValue<>(null);
private OutputCollector<KO, VO> collector;
private boolean isMapOnly;
public MapCollector(OutputCollector<KO, VO> collector, boolean isMapOnly) {
this.collector = collector;
this.isMapOnly = isMapOnly;
}
@Override
public void collect(OUT datum) throws IOException {
if (isMapOnly) {
wrapper.datum(datum);
collector.collect((KO) wrapper, (VO) NullWritable.get());
} else {
// split a pair
Pair<K, V> pair = (Pair<K, V>) datum;
keyWrapper.datum(pair.key());
valueWrapper.datum(pair.value());
collector.collect((KO) keyWrapper, (VO) valueWrapper);
}
}
}
| 7,039 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobConfigurable;
import org.apache.hadoop.mapred.Reporter;
/**
* A mapper for Avro data.
*
* <p>
* Applications subclass this class and pass their subclass to
* {@link AvroJob#setMapperClass(JobConf, Class)}, overriding
* {@link #map(Object, AvroCollector, Reporter)}.
*/
public class AvroMapper<IN, OUT> extends Configured implements JobConfigurable, Closeable {
/** Called with each map input datum. By default, collects inputs. */
@SuppressWarnings("unchecked")
public void map(IN datum, AvroCollector<OUT> collector, Reporter reporter) throws IOException {
collector.collect((OUT) datum);
}
/** Subclasses can override this as desired. */
@Override
public void close() throws IOException {
// no op
}
/** Subclasses can override this as desired. */
@Override
public void configure(JobConf jobConf) {
// no op
}
}
| 7,040 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/tether/TetherInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred.tether;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.avro.mapred.AvroInputFormat;
import org.apache.avro.mapred.AvroOutputFormat;
/**
* An {@link org.apache.hadoop.mapred.InputFormat} for tethered Avro input.
*
* By default, when pointed at a directory, this will silently skip over any
* files in it that do not have .avro extension. To instead include all files,
* set the avro.mapred.ignore.inputs.without.extension property to false.
*/
class TetherInputFormat extends FileInputFormat<TetherData, NullWritable> {
@Override
protected FileStatus[] listStatus(JobConf job) throws IOException {
if (job.getBoolean(AvroInputFormat.IGNORE_FILES_WITHOUT_EXTENSION_KEY,
AvroInputFormat.IGNORE_INPUTS_WITHOUT_EXTENSION_DEFAULT)) {
List<FileStatus> result = new ArrayList<>();
for (FileStatus file : super.listStatus(job))
if (file.getPath().getName().endsWith(AvroOutputFormat.EXT))
result.add(file);
return result.toArray(new FileStatus[0]);
} else {
return super.listStatus(job);
}
}
@Override
public RecordReader<TetherData, NullWritable> getRecordReader(InputSplit split, JobConf job, Reporter reporter)
throws IOException {
reporter.setStatus(split.toString());
return new TetherRecordReader(job, (FileSplit) split);
}
}
| 7,041 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/tether/TetherKeySerialization.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred.tether;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.io.serializer.Serialization;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.Serializer;
import org.apache.hadoop.conf.Configured;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.EncoderFactory;
/** A {@link Serialization} for {@link TetherData}. */
class TetherKeySerialization extends Configured implements Serialization<TetherData> {
public boolean accept(Class<?> c) {
return TetherData.class.isAssignableFrom(c);
}
public Deserializer<TetherData> getDeserializer(Class<TetherData> c) {
return new TetherDataDeserializer();
}
private static final DecoderFactory FACTORY = DecoderFactory.get();
private class TetherDataDeserializer implements Deserializer<TetherData> {
private BinaryDecoder decoder;
public void open(InputStream in) {
this.decoder = FACTORY.directBinaryDecoder(in, decoder);
}
public TetherData deserialize(TetherData datum) throws IOException {
if (datum == null)
datum = new TetherData();
datum.buffer(decoder.readBytes(datum.buffer()));
return datum;
}
public void close() throws IOException {
decoder.inputStream().close();
}
}
public Serializer<TetherData> getSerializer(Class<TetherData> c) {
return new TetherDataSerializer();
}
private class TetherDataSerializer implements Serializer<TetherData> {
private OutputStream out;
private BinaryEncoder encoder;
public void open(OutputStream out) {
this.out = out;
this.encoder = EncoderFactory.get().directBinaryEncoder(out, encoder);
}
public void serialize(TetherData datum) throws IOException {
encoder.writeBytes(datum.buffer());
encoder.flush(); // Flush shouldn't be required. Might be a bug in AVRO.
}
public void close() throws IOException {
encoder.flush();
out.close();
}
}
}
| 7,042 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/tether/TetheredProcess.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred.tether;
import java.io.IOException;
import java.io.File;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.HashMap;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TaskAttemptID;
import org.apache.hadoop.mapred.TaskLog;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileUtil;
import org.apache.avro.ipc.Transceiver;
import org.apache.avro.ipc.Server;
import org.apache.avro.ipc.SaslSocketServer;
import org.apache.avro.ipc.SaslSocketTransceiver;
import org.apache.avro.ipc.specific.SpecificRequestor;
import org.apache.avro.ipc.specific.SpecificResponder;
import org.apache.avro.ipc.jetty.HttpServer;
import org.apache.avro.ipc.HttpTransceiver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class TetheredProcess {
static final Logger LOG = LoggerFactory.getLogger(TetheredProcess.class);
private JobConf job;
TetherOutputService outputService;
Server outputServer;
Process subprocess;
Transceiver clientTransceiver;
InputProtocol inputClient;
/**
* Enumeration defines which transport protocol to use to communicate between
* the map/reduce java daemons and the tethered proce
*/
public enum Protocol {
HTTP, SASL, NONE
};
// which protocol we are using
Protocol proto;
public TetheredProcess(JobConf job, OutputCollector<TetherData, NullWritable> collector, Reporter reporter)
throws Exception {
try {
// start server
this.outputService = new TetherOutputService(collector, reporter);
proto = TetherJob.getProtocol(job);
InetSocketAddress iaddress;
switch (proto) {
case SASL:
iaddress = new InetSocketAddress(0);
this.outputServer = new SaslSocketServer(new SpecificResponder(OutputProtocol.class, outputService), iaddress);
break;
case HTTP:
iaddress = new InetSocketAddress(0);
// set it up for http
this.outputServer = new HttpServer(new SpecificResponder(OutputProtocol.class, outputService),
iaddress.getPort());
break;
case NONE:
default:
throw new RuntimeException("No transport protocol was specified in the job configuration");
}
outputServer.start();
// start sub-process, connecting back to server
this.subprocess = startSubprocess(job);
// check if the process has exited -- is there a better way to do this?
boolean hasexited = false;
try {
// exitValue throws an exception if process hasn't exited
this.subprocess.exitValue();
hasexited = true;
} catch (IllegalThreadStateException e) {
}
if (hasexited) {
LOG.error("Could not start subprocess");
throw new RuntimeException("Could not start subprocess");
}
// open client, connecting to sub-process
switch (proto) {
case SASL:
this.clientTransceiver = new SaslSocketTransceiver(new InetSocketAddress(outputService.inputPort()));
break;
case HTTP:
this.clientTransceiver = new HttpTransceiver(new URL("http://127.0.0.1:" + outputService.inputPort()));
break;
default:
throw new RuntimeException("Error: code to handle this protocol is not implemented");
}
this.inputClient = SpecificRequestor.getClient(InputProtocol.class, clientTransceiver);
} catch (Exception t) {
close();
throw t;
}
}
public void close() {
if (clientTransceiver != null)
try {
clientTransceiver.close();
} catch (IOException e) {
} // ignore
if (subprocess != null)
subprocess.destroy();
if (outputServer != null)
outputServer.close();
}
private Process startSubprocess(JobConf job) throws IOException, InterruptedException {
// get the executable command
List<String> command = new ArrayList<>();
String executable = "";
if (job.getBoolean(TetherJob.TETHER_EXEC_CACHED, false)) {
// we want to use the cached executable
Path[] localFiles = DistributedCache.getLocalCacheFiles(job);
if (localFiles == null) { // until MAPREDUCE-476
URI[] files = DistributedCache.getCacheFiles(job);
localFiles = new Path[] { new Path(files[0].toString()) };
}
executable = localFiles[0].toString();
FileUtil.chmod(executable.toString(), "a+x");
} else {
executable = job.get(TetherJob.TETHER_EXEC);
}
command.add(executable);
// Add the executable arguments. We assume the arguments are separated by
// newlines so we split the argument string based on newlines and add each
// token to command We need to do it this way because
// TaskLog.captureOutAndError will put quote marks around each argument so
// if we pass a single string containing all arguments we get quoted
// incorrectly
String args = job.get(TetherJob.TETHER_EXEC_ARGS);
// args might be null if TETHER_EXEC_ARGS wasn't set.
if (args != null) {
String[] aparams = args.split("\n");
for (int i = 0; i < aparams.length; i++) {
aparams[i] = aparams[i].trim();
if (aparams[i].length() > 0) {
command.add(aparams[i]);
}
}
}
if (System.getProperty("hadoop.log.dir") == null && System.getenv("HADOOP_LOG_DIR") != null)
System.setProperty("hadoop.log.dir", System.getenv("HADOOP_LOG_DIR"));
// wrap the command in a stdout/stderr capture
TaskAttemptID taskid = TaskAttemptID.forName(job.get("mapred.task.id"));
File stdout = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDOUT);
File stderr = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDERR);
long logLength = TaskLog.getTaskLogLength(job);
command = TaskLog.captureOutAndError(null, command, stdout, stderr, logLength, false);
stdout.getParentFile().mkdirs();
stderr.getParentFile().mkdirs();
// add output server's port to env
Map<String, String> env = new HashMap<>();
env.put("AVRO_TETHER_OUTPUT_PORT", Integer.toString(outputServer.getPort()));
// add an environment variable to specify what protocol to use for communication
env.put("AVRO_TETHER_PROTOCOL", job.get(TetherJob.TETHER_PROTOCOL));
// print an info message about the command
String imsg = "";
for (String s : command) {
imsg = s + " ";
}
LOG.info("TetheredProcess.startSubprocess: command: " + imsg);
LOG.info("Tetheredprocess.startSubprocess: stdout logged to: " + stdout.toString());
LOG.info("Tetheredprocess.startSubprocess: stderr logged to: " + stderr.toString());
// start child process
ProcessBuilder builder = new ProcessBuilder(command);
builder.environment().putAll(env);
return builder.start();
}
}
| 7,043 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/tether/TetherPartitioner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred.tether;
import java.nio.ByteBuffer;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Partitioner;
import org.apache.avro.Schema;
import org.apache.avro.io.BinaryData;
import org.apache.avro.mapred.AvroJob;
class TetherPartitioner implements Partitioner<TetherData, NullWritable> {
private static final ThreadLocal<Integer> CACHE = new ThreadLocal<>();
private Schema schema;
@Override
public void configure(JobConf job) {
schema = AvroJob.getMapOutputSchema(job);
}
static void setNextPartition(int newValue) {
CACHE.set(newValue);
}
@Override
public int getPartition(TetherData key, NullWritable value, int numPartitions) {
Integer result = CACHE.get();
if (result != null) // return cached value
return result;
ByteBuffer b = key.buffer();
int p = b.position();
int hashCode = BinaryData.hashCode(b.array(), p, b.limit() - p, schema);
if (hashCode < 0)
hashCode = -hashCode;
return hashCode % numPartitions;
}
}
| 7,044 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/tether/TetherData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred.tether;
import java.nio.ByteBuffer;
/** A wrapper for a ByteBuffer containing binary-encoded data. */
class TetherData {
private int count = 1; // only used for task input
private ByteBuffer buffer;
public TetherData() {
}
public TetherData(ByteBuffer buffer) {
this.buffer = buffer;
}
/** Return the count of records in the buffer. Used for task input only. */
public int count() {
return count;
}
/** Set the count of records in the buffer. Used for task input only. */
public void count(int count) {
this.count = count;
}
/** Return the buffer. */
public ByteBuffer buffer() {
return buffer;
}
/** Set the buffer. */
public void buffer(ByteBuffer buffer) {
this.buffer = buffer;
}
}
| 7,045 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/tether/TetherOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred.tether;
import java.io.IOException;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.util.Progressable;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.mapred.AvroJob;
import org.apache.avro.mapred.AvroOutputFormat;
/** An {@link org.apache.hadoop.mapred.OutputFormat} for Avro data files. */
class TetherOutputFormat extends FileOutputFormat<TetherData, NullWritable> {
/** Enable output compression using the deflate codec and specify its level. */
public static void setDeflateLevel(JobConf job, int level) {
FileOutputFormat.setCompressOutput(job, true);
job.setInt(AvroOutputFormat.DEFLATE_LEVEL_KEY, level);
}
@SuppressWarnings("unchecked")
@Override
public RecordWriter<TetherData, NullWritable> getRecordWriter(FileSystem ignore, JobConf job, String name,
Progressable prog) throws IOException {
Schema schema = AvroJob.getOutputSchema(job);
final DataFileWriter writer = new DataFileWriter(new GenericDatumWriter());
if (FileOutputFormat.getCompressOutput(job)) {
int level = job.getInt(AvroOutputFormat.DEFLATE_LEVEL_KEY, CodecFactory.DEFAULT_DEFLATE_LEVEL);
writer.setCodec(CodecFactory.deflateCodec(level));
}
Path path = FileOutputFormat.getTaskOutputPath(job, name + AvroOutputFormat.EXT);
writer.create(schema, path.getFileSystem(job).create(path));
return new RecordWriter<TetherData, NullWritable>() {
@Override
public void write(TetherData datum, NullWritable ignore) throws IOException {
writer.appendEncoded(datum.buffer());
}
@Override
public void close(Reporter reporter) throws IOException {
writer.close();
}
};
}
}
| 7,046 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/tether/TetherRecordReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred.tether;
import java.io.IOException;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.mapred.AvroJob;
import org.apache.avro.mapred.FsInput;
class TetherRecordReader implements RecordReader<TetherData, NullWritable> {
private FsInput in;
private DataFileReader reader;
private long start;
private long end;
public TetherRecordReader(JobConf job, FileSplit split) throws IOException {
this.in = new FsInput(split.getPath(), job);
this.reader = new DataFileReader<>(in, new GenericDatumReader<>());
reader.sync(split.getStart()); // sync to start
this.start = in.tell();
this.end = split.getStart() + split.getLength();
job.set(AvroJob.INPUT_SCHEMA, reader.getSchema().toString());
}
public Schema getSchema() {
return reader.getSchema();
}
@Override
public TetherData createKey() {
return new TetherData();
}
@Override
public NullWritable createValue() {
return NullWritable.get();
}
@Override
public boolean next(TetherData data, NullWritable ignore) throws IOException {
if (!reader.hasNext() || reader.pastSync(end))
return false;
data.buffer(reader.nextBlock());
data.count((int) reader.getBlockCount());
return true;
}
@Override
public float getProgress() throws IOException {
if (end == start) {
return 0.0f;
} else {
return Math.min(1.0f, (in.tell() - start) / (float) (end - start));
}
}
@Override
public long getPos() throws IOException {
return in.tell();
}
@Override
public void close() throws IOException {
reader.close();
}
}
| 7,047 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/tether/TetherKeyComparator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred.tether;
import java.nio.ByteBuffer;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.conf.Configuration;
import org.apache.avro.Schema;
import org.apache.avro.io.BinaryData;
import org.apache.avro.mapred.AvroJob;
/** The {@link RawComparator} used by jobs configured with {@link TetherJob}. */
class TetherKeyComparator extends Configured implements RawComparator<TetherData> {
private Schema schema;
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (conf != null)
schema = AvroJob.getMapOutputSchema(conf);
}
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
int diff = BinaryData.compare(b1, BinaryData.skipLong(b1, s1), l1, b2, BinaryData.skipLong(b2, s2), l2, schema);
return diff == 0 ? -1 : diff;
}
@Override
public int compare(TetherData x, TetherData y) {
ByteBuffer b1 = x.buffer(), b2 = y.buffer();
int diff = BinaryData.compare(b1.array(), b1.position(), b2.array(), b2.position(), schema);
return diff == 0 ? -1 : diff;
}
}
| 7,048 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/tether/TetherReducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred.tether;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.avro.mapred.AvroJob;
class TetherReducer implements Reducer<TetherData, NullWritable, TetherData, NullWritable> {
private JobConf job;
private TetheredProcess process;
private boolean error;
@Override
public void configure(JobConf job) {
this.job = job;
}
@Override
public void reduce(TetherData datum, Iterator<NullWritable> ignore,
OutputCollector<TetherData, NullWritable> collector, Reporter reporter) throws IOException {
try {
if (process == null) {
process = new TetheredProcess(job, collector, reporter);
process.inputClient.configure(TaskType.REDUCE, AvroJob.getMapOutputSchema(job).toString(),
AvroJob.getOutputSchema(job).toString());
}
process.inputClient.input(datum.buffer(), datum.count());
} catch (IOException e) {
error = true;
throw e;
} catch (Exception e) {
error = true;
throw new IOException(e);
}
}
/**
* Handle the end of the input by closing down the application.
*/
@Override
public void close() throws IOException {
if (process == null)
return;
try {
if (error)
process.inputClient.abort();
else
process.inputClient.complete();
process.outputService.waitForFinish();
} catch (InterruptedException e) {
throw new IOException(e);
} finally {
process.close();
}
}
}
| 7,049 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/tether/TetherMapRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred.tether;
import java.io.IOException;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapRunner;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.Counters.Counter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.avro.mapred.AvroJob;
class TetherMapRunner extends MapRunner<TetherData, NullWritable, TetherData, NullWritable> {
private static final Logger LOG = LoggerFactory.getLogger(TetherMapRunner.class);
private JobConf job;
private TetheredProcess process;
public void configure(JobConf job) {
this.job = job;
}
@SuppressWarnings("unchecked")
public void run(RecordReader<TetherData, NullWritable> recordReader,
OutputCollector<TetherData, NullWritable> collector, Reporter reporter) throws IOException {
try {
// start tethered process
process = new TetheredProcess(job, collector, reporter);
// configure it
LOG.info("send configure to subprocess for map task");
process.inputClient.configure(TaskType.MAP, job.get(AvroJob.INPUT_SCHEMA),
AvroJob.getMapOutputSchema(job).toString());
LOG.info("send partitions to subprocess for map task");
process.inputClient.partitions(job.getNumReduceTasks());
// run map
Counter inputRecordCounter = reporter.getCounter("org.apache.hadoop.mapred.Task$Counter", "MAP_INPUT_RECORDS");
TetherData data = new TetherData();
while (recordReader.next(data, NullWritable.get())) {
process.inputClient.input(data.buffer(), data.count());
inputRecordCounter.increment(data.count() - 1);
if (process.outputService.isFinished())
break;
}
LOG.info("send complete to subprocess for map task");
process.inputClient.complete();
// wait for completion
if (process.outputService.waitForFinish())
throw new IOException("Task failed: " + process.outputService.error());
} catch (Throwable t) { // send abort
LOG.warn("Task failed", t);
process.inputClient.abort();
throw new IOException("Task failed: " + t, t);
} finally { // clean up
if (process != null)
process.close();
}
}
}
| 7,050 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/tether/TetherOutputService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred.tether;
import java.nio.ByteBuffer;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class TetherOutputService implements OutputProtocol {
private Reporter reporter;
private OutputCollector<TetherData, NullWritable> collector;
private int inputPort;
private boolean complete;
private String error;
private static final Logger LOG = LoggerFactory.getLogger(TetherOutputService.class);
// timeout when waiting for messages in seconds.
// what is a good value?
public static final long TIMEOUT = 10 * 1000;
public TetherOutputService(OutputCollector<TetherData, NullWritable> collector, Reporter reporter) {
this.reporter = reporter;
this.collector = collector;
}
@Override
public synchronized void configure(int inputPort) {
LOG.info("got input port from child: inputport=" + inputPort);
this.inputPort = inputPort;
notify();
}
public synchronized int inputPort() throws Exception {
if (inputPort == 0) {
LOG.info("waiting for input port from child");
wait(TIMEOUT);
}
if (inputPort == 0) {
LOG.error(
"Parent process timed out waiting for subprocess to send input port. Check the job log files for more info.");
throw new Exception("Parent process timed out waiting for subprocess to send input port");
}
return inputPort;
}
@Override
public void output(ByteBuffer datum) {
try {
collector.collect(new TetherData(datum), NullWritable.get());
} catch (Throwable e) {
LOG.warn("Error: " + e, e);
synchronized (this) {
error = e.toString();
}
}
}
@Override
public void outputPartitioned(int partition, ByteBuffer datum) {
TetherPartitioner.setNextPartition(partition);
output(datum);
}
@Override
public void status(String message) {
reporter.setStatus(message.toString());
}
@Override
public void count(String group, String name, long amount) {
reporter.getCounter(group.toString(), name.toString()).increment(amount);
}
@Override
public synchronized void fail(String message) {
LOG.warn("Failing: " + message);
error = message;
notify();
}
@Override
public synchronized void complete() {
LOG.info("got task complete");
complete = true;
notify();
}
public synchronized boolean isFinished() {
return complete || (error != null);
}
public String error() {
return error;
}
public synchronized boolean waitForFinish() throws InterruptedException {
while (!isFinished())
wait();
return error != null;
}
}
| 7,051 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapred/tether/TetherJob.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred.tether;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RunningJob;
/**
* Constructs and submits tether jobs. This may be used as an API-based method
* to launch tether jobs.
*/
@SuppressWarnings("deprecation")
public class TetherJob extends Configured {
public static final String TETHER_EXEC = "avro.tether.executable";
public static final String TETHER_EXEC_ARGS = "avro.tether.executable_args";
public static final String TETHER_EXEC_CACHED = "avro.tether.executable_cached";
public static final String TETHER_PROTOCOL = "avro.tether.protocol";
/** Get the URI of the application's executable. */
public static URI getExecutable(JobConf job) {
try {
return new URI(job.get("avro.tether.executable"));
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
/** Set the URI for the application's executable. Normally this in HDFS. */
public static void setExecutable(JobConf job, File executable) {
setExecutable(job, executable, Collections.emptyList(), false);
}
/**
* Set the URI for the application's executable (i.e the program to run in a
* subprocess and provides the mapper/reducer).
*
* @param job - Job
* @param executable - The URI of the executable
* @param args - List of additional arguments; Null if no arguments
* @param cached - If true, the executable URI is cached using
* DistributedCache - if false its not cached. I.e if the file
* is already stored on each local file system or if its on a
* NFS share
*/
public static void setExecutable(JobConf job, File executable, List<String> args, boolean cached) {
job.set(TETHER_EXEC, executable.toString());
if (args != null) {
StringBuilder sb = new StringBuilder();
for (String a : args) {
sb.append(a);
sb.append('\n');
}
job.set(TETHER_EXEC_ARGS, sb.toString());
}
job.set(TETHER_EXEC_CACHED, (Boolean.valueOf(cached)).toString());
}
/**
* Extract from the job configuration file an instance of the TRANSPROTO
* enumeration to represent the protocol to use for the communication
*
* @param job
* @return - Get the currently used protocol
*/
public static TetheredProcess.Protocol getProtocol(JobConf job) {
if (job.get(TetherJob.TETHER_PROTOCOL) == null) {
return TetheredProcess.Protocol.NONE;
} else if (job.get(TetherJob.TETHER_PROTOCOL).equals("http")) {
return TetheredProcess.Protocol.HTTP;
} else if (job.get(TetherJob.TETHER_PROTOCOL).equals("sasl")) {
return TetheredProcess.Protocol.SASL;
} else {
throw new RuntimeException("Unknown value for protocol: " + job.get(TetherJob.TETHER_PROTOCOL));
}
}
/**
* Submit a job to the map/reduce cluster. All of the necessary modifications to
* the job to run under tether are made to the configuration.
*/
public static RunningJob runJob(JobConf job) throws IOException {
setupTetherJob(job);
return JobClient.runJob(job);
}
/** Submit a job to the Map-Reduce framework. */
public static RunningJob submitJob(JobConf conf) throws IOException {
setupTetherJob(conf);
return new JobClient(conf).submitJob(conf);
}
/**
* Determines which transport protocol (e.g http or sasl) used to communicate
* between the parent and subprocess
*
* @param job - job configuration
* @param proto - String identifying the protocol currently http or sasl
*/
public static void setProtocol(JobConf job, String proto) throws IOException {
proto = proto.trim().toLowerCase();
if (!(proto.equals("http") || proto.equals("sasl"))) {
throw new IOException("protocol must be 'http' or 'sasl'");
}
job.set(TETHER_PROTOCOL, proto);
}
private static void setupTetherJob(JobConf job) throws IOException {
job.setMapRunnerClass(TetherMapRunner.class);
job.setPartitionerClass(TetherPartitioner.class);
job.setReducerClass(TetherReducer.class);
job.setInputFormat(TetherInputFormat.class);
job.setOutputFormat(TetherOutputFormat.class);
job.setOutputKeyClass(TetherData.class);
job.setOutputKeyComparatorClass(TetherKeyComparator.class);
job.setMapOutputValueClass(NullWritable.class);
// set the map output key class to TetherData
job.setMapOutputKeyClass(TetherData.class);
// if protocol isn't set
if (job.getStrings(TETHER_PROTOCOL) == null) {
job.set(TETHER_PROTOCOL, "sasl");
}
// add TetherKeySerialization to io.serializations
Collection<String> serializations = job.getStringCollection("io.serializations");
if (!serializations.contains(TetherKeySerialization.class.getName())) {
serializations.add(TetherKeySerialization.class.getName());
job.setStrings("io.serializations", serializations.toArray(new String[0]));
}
// determine whether the executable should be added to the cache.
if (job.getBoolean(TETHER_EXEC_CACHED, false)) {
DistributedCache.addCacheFile(getExecutable(job), job);
}
}
}
| 7,052 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop/util/AvroCharSequenceComparator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.hadoop.util;
import java.util.Comparator;
/**
* Compares Avro string data (data with schema <i>"string"</i>).
*
* <p>
* The only case where comparing Avro objects does not work using their natural
* order is when the schema is <i>"string"</i>. The Avro string schema maps to
* the Java <code>CharSequence</code> interface, which does not define
* <code>equals</code>, <code>hashCode</code>, or <code>compareTo</code>.
* </p>
*
* <p>
* Using this comparator enables comparisons between <code>String</code> and
* <code>Utf8</code> objects that are both valid when working with Avro strings.
* </p>
*
* @param <T> The type of object to compare.
*/
public class AvroCharSequenceComparator<T> implements Comparator<T> {
/** A singleton instance. */
public static final AvroCharSequenceComparator<CharSequence> INSTANCE = new AvroCharSequenceComparator<>();
/** {@inheritDoc} */
@Override
public int compare(T o1, T o2) {
if (!(o1 instanceof CharSequence) || !(o2 instanceof CharSequence)) {
throw new RuntimeException("Attempted use of AvroCharSequenceComparator on non-CharSequence objects: "
+ o1.getClass().getName() + " and " + o2.getClass().getName());
}
return compareCharSequence((CharSequence) o1, (CharSequence) o2);
}
/**
* Compares the CharSequences <code>o1</code> and <code>o2</code>.
*
* @param o1 The left charsequence.
* @param o2 The right charsequence.
* @return a negative integer, zero, or a positive integer if the first argument
* is less than, equal to, or greater than the second, respectively.
*/
private int compareCharSequence(CharSequence o1, CharSequence o2) {
for (int i = 0; i < Math.max(o1.length(), o2.length()); i++) {
int charComparison = compareCharacter(o1, o2, i);
if (0 != charComparison) {
return charComparison;
}
}
return 0;
}
/**
* Compares the characters of <code>o1</code> and <code>o2</code> at index
* <code>index</code>.
*
* @param o1 The left charsequence.
* @param o2 The right charsequence.
* @param index The zero-based index into the charsequences to compare.
* @return a negative integer, zero, or a positive integer if the first argument
* is less than, equal to, or greater than the second, respectively.
*/
private int compareCharacter(CharSequence o1, CharSequence o2, int index) {
if (index < o1.length() && index < o2.length()) {
return Character.compare(o1.charAt(index), o2.charAt(index));
}
if (index >= o1.length() && index >= o2.length()) {
return 0;
}
return o1.length() - o2.length();
}
}
| 7,053 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop/file/SortedKeyValueFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.hadoop.file;
import java.io.Closeable;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Iterator;
import java.util.Map;
import java.util.NavigableMap;
import java.util.TreeMap;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.specific.SpecificData;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.hadoop.util.AvroCharSequenceComparator;
import org.apache.avro.hadoop.io.AvroKeyValue;
import org.apache.avro.mapred.FsInput;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A SortedKeyValueFile is an indexed Avro container file of KeyValue records
* sorted by key.
*
* <p>
* The SortedKeyValueFile is a directory with two files, named 'data' and
* 'index'. The 'data' file is an ordinary Avro container file with records.
* Each record has exactly two fields, 'key' and 'value'. The keys are sorted
* lexicographically. The 'index' file is a small Avro container file mapping
* keys in the 'data' file to their byte positions. The index file is intended
* to fit in memory, so it should remain small. There is one entry in the index
* file for each data block in the Avro container file.
* </p>
*
* <p>
* SortedKeyValueFile is to Avro container file as MapFile is to SequenceFile.
* </p>
*/
public class SortedKeyValueFile {
private static final Logger LOG = LoggerFactory.getLogger(SortedKeyValueFile.class);
/** The name of the data file within the SortedKeyValueFile directory. */
public static final String DATA_FILENAME = "data";
/** The name of the index file within the SortedKeyValueFile directory. */
public static final String INDEX_FILENAME = "index";
/**
* Reads a SortedKeyValueFile by loading the key index into memory.
*
* <p>
* When doing a lookup, this reader finds the correct block in the data file
* using the key index. It performs a single disk seek to the block and loads
* the entire block into memory. The block is scanned until the key is found or
* is determined not to exist.
* </p>
*
* @param <K> The key type.
* @param <V> The value type.
*/
public static class Reader<K, V> implements Closeable, Iterable<AvroKeyValue<K, V>> {
/** The index from key to its byte offset into the data file. */
private final NavigableMap<K, Long> mIndex;
/** The reader for the data file. */
private final DataFileReader<GenericRecord> mDataFileReader;
/** The key schema for the data file. */
private final Schema mKeySchema;
/** The model for the data. */
private GenericData model;
/** A class to encapsulate the options of a Reader. */
public static class Options {
/** The configuration. */
private Configuration mConf;
/** The path to the SortedKeyValueFile to read. */
private Path mPath;
/** The reader schema for the key. */
private Schema mKeySchema;
/** The reader schema for the value. */
private Schema mValueSchema;
/** The model for the data. */
private GenericData model = SpecificData.get();
/**
* Sets the configuration.
*
* @param conf The configuration.
* @return This options instance.
*/
public Options withConfiguration(Configuration conf) {
mConf = conf;
return this;
}
/**
* Gets the configuration.
*
* @return The configuration.
*/
public Configuration getConfiguration() {
return mConf;
}
/**
* Sets the input path.
*
* @param path The input path.
* @return This options instance.
*/
public Options withPath(Path path) {
mPath = path;
return this;
}
/**
* Gets the input path.
*
* @return The input path.
*/
public Path getPath() {
return mPath;
}
/**
* Sets the reader schema for the key.
*
* @param keySchema The reader schema for the key.
* @return This options instance.
*/
public Options withKeySchema(Schema keySchema) {
mKeySchema = keySchema;
return this;
}
/**
* Gets the reader schema for the key.
*
* @return The reader schema for the key.
*/
public Schema getKeySchema() {
return mKeySchema;
}
/**
* Sets the reader schema for the value.
*
* @param valueSchema The reader schema for the value.
* @return This options instance.
*/
public Options withValueSchema(Schema valueSchema) {
mValueSchema = valueSchema;
return this;
}
/**
* Gets the reader schema for the value.
*
* @return The reader schema for the value.
*/
public Schema getValueSchema() {
return mValueSchema;
}
/** Set the data model. */
public Options withDataModel(GenericData model) {
this.model = model;
return this;
}
/** Return the data model. */
public GenericData getDataModel() {
return model;
}
}
/**
* Constructs a reader.
*
* @param options The options.
* @throws IOException If there is an error.
*/
public Reader(Options options) throws IOException {
mKeySchema = options.getKeySchema();
this.model = options.getDataModel();
// Load the whole index file into memory.
Path indexFilePath = new Path(options.getPath(), INDEX_FILENAME);
LOG.debug("Loading the index from {}", indexFilePath);
mIndex = loadIndexFile(options.getConfiguration(), indexFilePath, mKeySchema);
// Open the data file.
Path dataFilePath = new Path(options.getPath(), DATA_FILENAME);
LOG.debug("Loading the data file {}", dataFilePath);
Schema recordSchema = AvroKeyValue.getSchema(mKeySchema, options.getValueSchema());
DatumReader<GenericRecord> datumReader = model.createDatumReader(recordSchema);
mDataFileReader = new DataFileReader<>(new FsInput(dataFilePath, options.getConfiguration()), datumReader);
}
/**
* Gets the first value associated with a given key, or null if it is not found.
*
* <p>
* This method will move the current position in the file to the record
* immediately following the requested key.
* </p>
*
* @param key The key to look up.
* @return The value associated with the key, or null if not found.
* @throws IOException If there is an error.
*/
public V get(K key) throws IOException {
// Look up the entry in the index.
LOG.debug("Looking up key {} in the index", key);
Map.Entry<K, Long> indexEntry = mIndex.floorEntry(key);
if (null == indexEntry) {
LOG.debug("Key {} was not found in the index (it is before the first entry)", key);
return null;
}
LOG.debug("Key was found in the index, seeking to syncpoint {}", indexEntry.getValue());
// Seek to the data block that would contain the entry.
mDataFileReader.seek(indexEntry.getValue());
// Scan from this position of the file until we find it or pass it.
for (AvroKeyValue<K, V> record : this) {
int comparison = model.compare(record.getKey(), key, mKeySchema);
if (0 == comparison) {
// We've found it!
LOG.debug("Found record for key {}", key);
return record.getValue();
}
if (comparison > 0) {
// We've passed it.
LOG.debug("Searched beyond the point where key {} would appear in the file", key);
return null;
}
}
// We've reached the end of the file.
LOG.debug("Searched to the end of the file but did not find key {}", key);
return null;
}
/**
* Returns an iterator starting at the current position in the file.
*
* <p>
* Use the get() method to move the current position.
* </p>
*
* <p>
* Note that this iterator is shared with other clients of the file; it does not
* contain a separate pointer into the file.
* </p>
*
* @return An iterator.
*/
@Override
public Iterator<AvroKeyValue<K, V>> iterator() {
return new AvroKeyValue.Iterator<>(mDataFileReader.iterator());
}
/** {@inheritDoc} */
@Override
public void close() throws IOException {
mDataFileReader.close();
}
/**
* Loads an index file into an in-memory map, from key to file offset in bytes.
*
* @param conf The configuration.
* @param path The path to the index file.
* @param keySchema The reader schema for the key.
* @throws IOException If there is an error.
*/
private <K> NavigableMap<K, Long> loadIndexFile(Configuration conf, Path path, Schema keySchema)
throws IOException {
DatumReader<GenericRecord> datumReader = model
.createDatumReader(AvroKeyValue.getSchema(keySchema, Schema.create(Schema.Type.LONG)));
NavigableMap<K, Long> index = new TreeMap<>();
try (DataFileReader<GenericRecord> fileReader = new DataFileReader<>(new FsInput(path, conf), datumReader)) {
if (Schema.create(Schema.Type.STRING).equals(keySchema)) {
// Because Avro STRING types are mapped to the Java CharSequence class that does
// not
// mandate the implementation of Comparable, we need to specify a special
// CharSequence comparator if the key type is a string. This hack only fixes the
// problem for primitive string types. If, for example, you tried to use a
// record
// type as the key, any string fields inside of it would not be compared
// correctly
// against java.lang.Strings.
index = new TreeMap<>(new AvroCharSequenceComparator<>());
}
for (GenericRecord genericRecord : fileReader) {
AvroKeyValue<K, Long> indexRecord = new AvroKeyValue<>(genericRecord);
index.put(indexRecord.getKey(), indexRecord.getValue());
}
}
return index;
}
}
/**
* Writes a SortedKeyValueFile.
*
* @param <K> The key type.
* @param <V> The value type.
*/
public static class Writer<K, V> implements Closeable {
/** The key schema. */
private final Schema mKeySchema;
/** The value schema. */
private final Schema mValueSchema;
/** The schema of the data file records. */
private final Schema mRecordSchema;
/** The schema of the index file records. */
private final Schema mIndexSchema;
/** The model for the data. */
private GenericData model;
/** The writer for the data file. */
private final DataFileWriter<GenericRecord> mDataFileWriter;
/** The writer for the index file. */
private final DataFileWriter<GenericRecord> mIndexFileWriter;
/**
* We store an indexed key for every mIndexInterval records written to the data
* file.
*/
private final int mIndexInterval;
/** The number of records written to the file so far. */
private long mRecordsWritten;
/** The most recent key that was appended to the file, or null. */
private K mPreviousKey;
/**
* A class to encapsulate the various options of a SortedKeyValueFile.Writer.
*/
public static class Options {
/** The key schema. */
private Schema mKeySchema;
/** The value schema. */
private Schema mValueSchema;
/** The configuration. */
private Configuration mConf;
/** The path to the output file. */
private Path mPath;
/** The number of records between indexed entries. */
private int mIndexInterval = 128;
/** The model for the data. */
private GenericData model = SpecificData.get();
/** The compression codec for the data. */
private CodecFactory codec = CodecFactory.nullCodec();
/**
* Sets the key schema.
*
* @param keySchema The key schema.
* @return This options instance.
*/
public Options withKeySchema(Schema keySchema) {
mKeySchema = keySchema;
return this;
}
/**
* Gets the key schema.
*
* @return The key schema.
*/
public Schema getKeySchema() {
return mKeySchema;
}
/**
* Sets the value schema.
*
* @param valueSchema The value schema.
* @return This options instance.
*/
public Options withValueSchema(Schema valueSchema) {
mValueSchema = valueSchema;
return this;
}
/**
* Gets the value schema.
*
* @return The value schema.
*/
public Schema getValueSchema() {
return mValueSchema;
}
/**
* Sets the configuration.
*
* @param conf The configuration.
* @return This options instance.
*/
public Options withConfiguration(Configuration conf) {
mConf = conf;
return this;
}
/**
* Gets the configuration.
*
* @return The configuration.
*/
public Configuration getConfiguration() {
return mConf;
}
/**
* Sets the output path.
*
* @param path The output path.
* @return This options instance.
*/
public Options withPath(Path path) {
mPath = path;
return this;
}
/**
* Gets the output path.
*
* @return The output path.
*/
public Path getPath() {
return mPath;
}
/**
* Sets the index interval.
*
* <p>
* If the index inverval is N, then every N records will be indexed into the
* index file.
* </p>
*
* @param indexInterval The index interval.
* @return This options instance.
*/
public Options withIndexInterval(int indexInterval) {
mIndexInterval = indexInterval;
return this;
}
/**
* Gets the index interval.
*
* @return The index interval.
*/
public int getIndexInterval() {
return mIndexInterval;
}
/** Set the data model. */
public Options withDataModel(GenericData model) {
this.model = model;
return this;
}
/** Return the data model. */
public GenericData getDataModel() {
return model;
}
/** Set the compression codec. */
public Options withCodec(String codec) {
this.codec = CodecFactory.fromString(codec);
return this;
}
/** Set the compression codec. */
public Options withCodec(CodecFactory codec) {
this.codec = codec;
return this;
}
/** Return the compression codec. */
public CodecFactory getCodec() {
return this.codec;
}
}
/**
* Creates a writer for a new file.
*
* @param options The options.
* @throws IOException If there is an error.
*/
public Writer(Options options) throws IOException {
this.model = options.getDataModel();
if (null == options.getConfiguration()) {
throw new IllegalArgumentException("Configuration may not be null");
}
FileSystem fileSystem = options.getPath().getFileSystem(options.getConfiguration());
// Save the key and value schemas.
mKeySchema = options.getKeySchema();
if (null == mKeySchema) {
throw new IllegalArgumentException("Key schema may not be null");
}
mValueSchema = options.getValueSchema();
if (null == mValueSchema) {
throw new IllegalArgumentException("Value schema may not be null");
}
// Save the index interval.
mIndexInterval = options.getIndexInterval();
// Create the directory.
if (!fileSystem.mkdirs(options.getPath())) {
throw new IOException("Unable to create directory for SortedKeyValueFile: " + options.getPath());
}
LOG.debug("Created directory {}", options.getPath());
// Open a writer for the data file.
Path dataFilePath = new Path(options.getPath(), DATA_FILENAME);
LOG.debug("Creating writer for avro data file: {}", dataFilePath);
mRecordSchema = AvroKeyValue.getSchema(mKeySchema, mValueSchema);
DatumWriter<GenericRecord> datumWriter = model.createDatumWriter(mRecordSchema);
OutputStream dataOutputStream = fileSystem.create(dataFilePath);
mDataFileWriter = new DataFileWriter<>(datumWriter).setSyncInterval(1 << 20) // Set the auto-sync interval
// sufficiently large, since
// we will manually sync every
// mIndexInterval records.
.setCodec(options.getCodec()).create(mRecordSchema, dataOutputStream);
// Open a writer for the index file.
Path indexFilePath = new Path(options.getPath(), INDEX_FILENAME);
LOG.debug("Creating writer for avro index file: {}", indexFilePath);
mIndexSchema = AvroKeyValue.getSchema(mKeySchema, Schema.create(Schema.Type.LONG));
DatumWriter<GenericRecord> indexWriter = model.createDatumWriter(mIndexSchema);
OutputStream indexOutputStream = fileSystem.create(indexFilePath);
mIndexFileWriter = new DataFileWriter<>(indexWriter).create(mIndexSchema, indexOutputStream);
}
/**
* Appends a record to the SortedKeyValueFile.
*
* @param key The key.
* @param value The value.
* @throws IOException If there is an error.
*/
public void append(K key, V value) throws IOException {
// Make sure the keys are inserted in sorted order.
if (null != mPreviousKey && model.compare(key, mPreviousKey, mKeySchema) < 0) {
throw new IllegalArgumentException("Records must be inserted in sorted key order." + " Attempted to insert key "
+ key + " after " + mPreviousKey + ".");
}
mPreviousKey = model.deepCopy(mKeySchema, key);
// Construct the data record.
AvroKeyValue<K, V> dataRecord = new AvroKeyValue<>(new GenericData.Record(mRecordSchema));
dataRecord.setKey(key);
dataRecord.setValue(value);
// Index it if necessary.
if (0 == mRecordsWritten++ % mIndexInterval) {
// Force a sync to the data file writer, which closes the current data block (if
// nonempty) and reports the current position in the file.
long position = mDataFileWriter.sync();
// Construct the record to put in the index.
AvroKeyValue<K, Long> indexRecord = new AvroKeyValue<>(new GenericData.Record(mIndexSchema));
indexRecord.setKey(key);
indexRecord.setValue(position);
mIndexFileWriter.append(indexRecord.get());
}
// Write it to the data file.
mDataFileWriter.append(dataRecord.get());
}
/** {@inheritDoc} */
@Override
public void close() throws IOException {
mIndexFileWriter.close();
mDataFileWriter.close();
}
}
}
| 7,054 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop/file/HadoopCodecFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.hadoop.file;
import java.util.HashMap;
import java.util.Map;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.file.CodecFactory;
/**
* Encapsulates the ability to specify and configure an avro compression codec
* from a given hadoop codec defined with the configuration parameter:
* mapred.output.compression.codec
*
* Currently there are three codecs registered by default:
* <ul>
* <li>{@code org.apache.hadoop.io.compress.DeflateCodec} will map to
* {@code deflate}</li>
* <li>{@code org.apache.hadoop.io.compress.SnappyCodec} will map to
* {@code snappy}</li>
* <li>{@code org.apache.hadoop.io.compress.BZip2Codec} will map to
* {@code zbip2}</li>
* <li>{@code org.apache.hadoop.io.compress.GZipCodec} will map to
* {@code deflate}</li>
* </ul>
*/
public class HadoopCodecFactory {
private static final Map<String, String> HADOOP_AVRO_NAME_MAP = new HashMap<>();
static {
HADOOP_AVRO_NAME_MAP.put("org.apache.hadoop.io.compress.DeflateCodec", "deflate");
HADOOP_AVRO_NAME_MAP.put("org.apache.hadoop.io.compress.SnappyCodec", "snappy");
HADOOP_AVRO_NAME_MAP.put("org.apache.hadoop.io.compress.BZip2Codec", "bzip2");
HADOOP_AVRO_NAME_MAP.put("org.apache.hadoop.io.compress.GZipCodec", "deflate");
}
/**
* Maps a hadoop codec name into a CodecFactory.
*
* Currently there are four hadoop codecs registered:
* <ul>
* <li>{@code org.apache.hadoop.io.compress.DeflateCodec} will map to
* {@code deflate}</li>
* <li>{@code org.apache.hadoop.io.compress.SnappyCodec} will map to
* {@code snappy}</li>
* <li>{@code org.apache.hadoop.io.compress.BZip2Codec} will map to
* {@code zbip2}</li>
* <li>{@code org.apache.hadoop.io.compress.GZipCodec} will map to
* {@code deflate}</li>
* </ul>
*/
public static CodecFactory fromHadoopString(String hadoopCodecClass) {
CodecFactory o = null;
try {
String avroCodec = HADOOP_AVRO_NAME_MAP.get(hadoopCodecClass);
if (avroCodec != null) {
o = CodecFactory.fromString(avroCodec);
}
} catch (Exception e) {
throw new AvroRuntimeException("Unrecognized hadoop codec: " + hadoopCodecClass, e);
}
return o;
}
public static String getAvroCodecName(String hadoopCodecClass) {
return HADOOP_AVRO_NAME_MAP.get(hadoopCodecClass);
}
}
| 7,055 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop/io/AvroKeyComparator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.hadoop.io;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.io.BinaryData;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapreduce.AvroJob;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.RawComparator;
/**
* The {@link org.apache.hadoop.io.RawComparator} used by jobs configured with
* {@link org.apache.avro.mapreduce.AvroJob}.
*
* <p>
* Compares AvroKeys output from the map phase for sorting.
* </p>
*/
public class AvroKeyComparator<T> extends Configured implements RawComparator<AvroKey<T>> {
/** The schema of the Avro data in the key to compare. */
private Schema mSchema;
private GenericData mDataModel;
/** {@inheritDoc} */
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (null != conf) {
// The MapReduce framework will be using this comparator to sort AvroKey objects
// output from the map phase, so use the schema defined for the map output key
// and the data model non-raw compare() implementation.
mSchema = AvroJob.getMapOutputKeySchema(conf);
mDataModel = AvroSerialization.createDataModel(conf);
}
}
/** {@inheritDoc} */
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
return BinaryData.compare(b1, s1, b2, s2, mSchema);
}
/** {@inheritDoc} */
@Override
public int compare(AvroKey<T> x, AvroKey<T> y) {
return mDataModel.compare(x.datum(), y.datum(), mSchema);
}
}
| 7,056 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop/io/AvroDatumConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.hadoop.io;
import org.apache.avro.Schema;
/**
* Converts a Java object into an Avro datum.
*
* @param <INPUT> The type of the input Java object to convert.
* @param <OUTPUT> The type of the Avro datum to convert to.
*/
public abstract class AvroDatumConverter<INPUT, OUTPUT> {
public abstract OUTPUT convert(INPUT input);
/**
* Gets the writer schema that should be used to serialize the output Avro
* datum.
*
* @return The writer schema for the output Avro datum.
*/
public abstract Schema getWriterSchema();
}
| 7,057 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop/io/AvroKeyDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.hadoop.io;
import org.apache.avro.Schema;
import org.apache.avro.io.DatumReader;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroWrapper;
/**
* Deserializes AvroKey objects within Hadoop.
*
* @param <D> The java type of the avro data to deserialize.
*
* @see AvroDeserializer
*/
public class AvroKeyDeserializer<D> extends AvroDeserializer<AvroWrapper<D>, D> {
/**
* Constructor.
*
* @param writerSchema The Avro writer schema for the data to deserialize.
* @param readerSchema The Avro reader schema for the data to deserialize.
*/
public AvroKeyDeserializer(Schema writerSchema, Schema readerSchema, ClassLoader classLoader) {
super(writerSchema, readerSchema, classLoader);
}
/**
* Constructor.
*
* @param writerSchema The Avro writer schema for the data to deserialize.
* @param readerSchema The Avro reader schema for the data to deserialize.
* @param datumReader The Avro datum reader to use for deserialization.
*/
public AvroKeyDeserializer(Schema writerSchema, Schema readerSchema, DatumReader<D> datumReader) {
super(writerSchema, readerSchema, datumReader);
}
/**
* Creates a new empty <code>AvroKey</code> instance.
*
* @return a new empty AvroKey.
*/
@Override
protected AvroWrapper<D> createAvroWrapper() {
return new AvroKey<>(null);
}
}
| 7,058 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop/io/AvroSequenceFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.hadoop.io;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.SequenceFile.Metadata;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.util.Progressable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
/**
* A wrapper around a Hadoop {@link org.apache.hadoop.io.SequenceFile} that also
* supports reading and writing Avro data.
*
* <p>
* The vanilla Hadoop <code>SequenceFile</code> contains a <i>header</i>
* followed by a sequence of <i>records</i>. A <i>record</i> consists of a
* <i>key</i> and a <i>value</i>. The <i>key</i> and <i>value</i> must either:
* </p>
*
* <ul>
* <li>implement the <code>Writable</code> interface, or</li>
* <li>be accepted by a <code>Serialization</code> registered with the
* <code>SerializationFactory</code>.</li>
* </ul>
*
* <p>
* Since Avro data are Plain Old Java Objects (e.g., <code>Integer</code> for
* data with schema <i>"int"</i>), they do not implement <i>Writable</i>.
* Furthermore, a {@link org.apache.hadoop.io.serializer.Serialization}
* implementation cannot determine whether an object instance of type
* <code>CharSequence</code> that also implements <code>Writable</code> should
* be serialized using Avro or WritableSerialization.
* </p>
*
* <p>
* The solution implemented in <code>AvroSequenceFile</code> is to:
* </p>
*
* <ul>
* <li>wrap Avro key data in an <code>AvroKey</code> object,</li>
* <li>wrap Avro value data in an <code>AvroValue</code> object,</li>
* <li>configure and register <code>AvroSerialization</code> with the
* <code>SerializationFactory</code>, which will accept only objects that are
* instances of either <code>AvroKey</code> or <code>AvroValue</code>, and</li>
* <li>store the Avro key and value schemas in the SequenceFile
* <i>header</i>.</li>
* </ul>
*/
public class AvroSequenceFile {
private static final Logger LOG = LoggerFactory.getLogger(AvroSequenceFile.class);
/** The SequenceFile.Metadata field for the Avro key writer schema. */
public static final Text METADATA_FIELD_KEY_SCHEMA = new Text("avro.key.schema");
/** The SequenceFile.Metadata field for the Avro value writer schema. */
public static final Text METADATA_FIELD_VALUE_SCHEMA = new Text("avro.value.schema");
/** Constructor disabled for this container class. */
private AvroSequenceFile() {
}
/**
* Creates a writer from a set of options.
*
* <p>
* Since there are different implementations of <code>Writer</code> depending on
* the compression type, this method constructs the appropriate subclass
* depending on the compression type given in the <code>options</code>.
* </p>
*
* @param options The options for the writer.
* @return A new writer instance.
* @throws IOException If the writer cannot be created.
*/
public static SequenceFile.Writer createWriter(Writer.Options options) throws IOException {
return SequenceFile.createWriter(options.getFileSystem(), options.getConfigurationWithAvroSerialization(),
options.getOutputPath(), options.getKeyClass(), options.getValueClass(), options.getBufferSizeBytes(),
options.getReplicationFactor(), options.getBlockSizeBytes(), options.getCompressionType(),
options.getCompressionCodec(), options.getProgressable(), options.getMetadataWithAvroSchemas());
}
/**
* A writer for an uncompressed SequenceFile that supports Avro data.
*/
public static class Writer extends SequenceFile.Writer {
/**
* A helper class to encapsulate the options that can be used to construct a
* Writer.
*/
public static class Options {
/**
* A magic value representing the default for buffer size, block size, and
* replication factor.
*/
private static final short DEFAULT = -1;
private FileSystem mFileSystem;
private Configuration mConf;
private Path mOutputPath;
private Class<?> mKeyClass;
private Schema mKeyWriterSchema;
private Class<?> mValueClass;
private Schema mValueWriterSchema;
private int mBufferSizeBytes;
private short mReplicationFactor;
private long mBlockSizeBytes;
private Progressable mProgressable;
private CompressionType mCompressionType;
private CompressionCodec mCompressionCodec;
private Metadata mMetadata;
/**
* Creates a new <code>Options</code> instance with default values.
*/
public Options() {
mBufferSizeBytes = DEFAULT;
mReplicationFactor = DEFAULT;
mBlockSizeBytes = DEFAULT;
mCompressionType = CompressionType.NONE;
mMetadata = new Metadata();
}
/**
* Sets the filesystem the SequenceFile should be written to.
*
* @param fileSystem The filesystem.
* @return This options instance.
*/
public Options withFileSystem(FileSystem fileSystem) {
if (null == fileSystem) {
throw new IllegalArgumentException("Filesystem may not be null");
}
mFileSystem = fileSystem;
return this;
}
/**
* Sets the Hadoop configuration.
*
* @param conf The configuration.
* @return This options instance.
*/
public Options withConfiguration(Configuration conf) {
if (null == conf) {
throw new IllegalArgumentException("Configuration may not be null");
}
mConf = conf;
return this;
}
/**
* Sets the output path for the SequenceFile.
*
* @param outputPath The output path.
* @return This options instance.
*/
public Options withOutputPath(Path outputPath) {
if (null == outputPath) {
throw new IllegalArgumentException("Output path may not be null");
}
mOutputPath = outputPath;
return this;
}
/**
* Sets the class of the key records to be written.
*
* <p>
* If the keys will be Avro data, use
* {@link #withKeySchema(org.apache.avro.Schema)} to specify the writer schema.
* The key class will be automatically set to
* {@link org.apache.avro.mapred.AvroKey}.
* </p>
*
* @param keyClass The key class.
* @return This options instance.
*/
public Options withKeyClass(Class<?> keyClass) {
if (null == keyClass) {
throw new IllegalArgumentException("Key class may not be null");
}
mKeyClass = keyClass;
return this;
}
/**
* Sets the writer schema of the key records when using Avro data.
*
* <p>
* The key class will automatically be set to
* {@link org.apache.avro.mapred.AvroKey}, so there is no need to call
* {@link #withKeyClass(Class)} when using this method.
* </p>
*
* @param keyWriterSchema The writer schema for the keys.
* @return This options instance.
*/
public Options withKeySchema(Schema keyWriterSchema) {
if (null == keyWriterSchema) {
throw new IllegalArgumentException("Key schema may not be null");
}
withKeyClass(AvroKey.class);
mKeyWriterSchema = keyWriterSchema;
return this;
}
/**
* Sets the class of the value records to be written.
*
* <p>
* If the values will be Avro data, use
* {@link #withValueSchema(org.apache.avro.Schema)} to specify the writer
* schema. The value class will be automatically set to
* {@link org.apache.avro.mapred.AvroValue}.
* </p>
*
* @param valueClass The value class.
* @return This options instance.
*/
public Options withValueClass(Class<?> valueClass) {
if (null == valueClass) {
throw new IllegalArgumentException("Value class may not be null");
}
mValueClass = valueClass;
return this;
}
/**
* Sets the writer schema of the value records when using Avro data.
*
* <p>
* The value class will automatically be set to
* {@link org.apache.avro.mapred.AvroValue}, so there is no need to call
* {@link #withValueClass(Class)} when using this method.
* </p>
*
* @param valueWriterSchema The writer schema for the values.
* @return This options instance.
*/
public Options withValueSchema(Schema valueWriterSchema) {
if (null == valueWriterSchema) {
throw new IllegalArgumentException("Value schema may not be null");
}
withValueClass(AvroValue.class);
mValueWriterSchema = valueWriterSchema;
return this;
}
/**
* Sets the write buffer size in bytes.
*
* @param bytes The desired buffer size.
* @return This options instance.
*/
public Options withBufferSizeBytes(int bytes) {
if (bytes < 0) {
throw new IllegalArgumentException("Buffer size may not be negative");
}
mBufferSizeBytes = bytes;
return this;
}
/**
* Sets the desired replication factor for the file.
*
* @param replicationFactor The replication factor.
* @return This options instance.
*/
public Options withReplicationFactor(short replicationFactor) {
if (replicationFactor <= 0) {
throw new IllegalArgumentException("Replication factor must be positive");
}
mReplicationFactor = replicationFactor;
return this;
}
/**
* Sets the desired size of the file blocks.
*
* @param bytes The desired block size in bytes.
* @return This options instance.
*/
public Options withBlockSizeBytes(long bytes) {
if (bytes <= 0) {
throw new IllegalArgumentException("Block size must be positive");
}
mBlockSizeBytes = bytes;
return this;
}
/**
* Sets an object to report progress to.
*
* @param progressable A progressable object to track progress.
* @return This options instance.
*/
public Options withProgressable(Progressable progressable) {
mProgressable = progressable;
return this;
}
/**
* Sets the type of compression.
*
* @param compressionType The type of compression for the output file.
* @return This options instance.
*/
public Options withCompressionType(CompressionType compressionType) {
mCompressionType = compressionType;
return this;
}
/**
* Sets the compression codec to use if it is enabled.
*
* @param compressionCodec The compression codec.
* @return This options instance.
*/
public Options withCompressionCodec(CompressionCodec compressionCodec) {
mCompressionCodec = compressionCodec;
return this;
}
/**
* Sets the metadata that should be stored in the file <i>header</i>.
*
* @param metadata The file metadata.
* @return This options instance.
*/
public Options withMetadata(Metadata metadata) {
if (null == metadata) {
throw new IllegalArgumentException("Metadata may not be null");
}
mMetadata = metadata;
return this;
}
/**
* Gets the filesystem the SequenceFile should be written to.
*
* @return The file system to write to.
*/
public FileSystem getFileSystem() {
if (null == mFileSystem) {
throw new RuntimeException("Must call Options.withFileSystem()");
}
return mFileSystem;
}
/**
* Gets the Hadoop configuration.
*
* @return The Hadoop configuration.
*/
public Configuration getConfiguration() {
return mConf;
}
/**
* Gets the Hadoop configuration with Avro serialization registered.
*
* @return The Hadoop configuration.
*/
public Configuration getConfigurationWithAvroSerialization() {
Configuration conf = getConfiguration();
if (null == conf) {
throw new RuntimeException("Must call Options.withConfiguration()");
}
Configuration confWithAvro = new Configuration(conf);
if (null != mKeyWriterSchema) {
AvroSerialization.setKeyWriterSchema(confWithAvro, mKeyWriterSchema);
}
if (null != mValueWriterSchema) {
AvroSerialization.setValueWriterSchema(confWithAvro, mValueWriterSchema);
}
AvroSerialization.addToConfiguration(confWithAvro);
return confWithAvro;
}
/**
* Gets the output path for the sequence file.
*
* @return The output path.
*/
public Path getOutputPath() {
if (null == mOutputPath) {
throw new RuntimeException("Must call Options.withOutputPath()");
}
return mOutputPath;
}
/**
* Gets the class of the key records.
*
* @return The key class.
*/
public Class<?> getKeyClass() {
if (null == mKeyClass) {
throw new RuntimeException("Must call Options.withKeyClass() or Options.withKeySchema()");
}
return mKeyClass;
}
/**
* Gets the class of the value records.
*
* @return The value class.
*/
public Class<?> getValueClass() {
if (null == mValueClass) {
throw new RuntimeException("Must call Options.withValueClass() or Options.withValueSchema()");
}
return mValueClass;
}
/**
* Gets the desired size of the buffer used when flushing records to disk.
*
* @return The buffer size in bytes.
*/
public int getBufferSizeBytes() {
if (DEFAULT == mBufferSizeBytes) {
return getConfiguration().getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT);
}
return mBufferSizeBytes;
}
/**
* Gets the desired number of replicas to store for each block of the file.
*
* @return The replication factor for the blocks of the file.
*/
public short getReplicationFactor() {
if (DEFAULT == mReplicationFactor) {
return getFileSystem().getDefaultReplication();
}
return mReplicationFactor;
}
/**
* Gets the desired size of the file blocks.
*
* @return The size of a file block in bytes.
*/
public long getBlockSizeBytes() {
if (DEFAULT == mBlockSizeBytes) {
return getFileSystem().getDefaultBlockSize();
}
return mBlockSizeBytes;
}
/**
* Gets the object to report progress to.
*
* @return A progressable object to track progress.
*/
public Progressable getProgressable() {
return mProgressable;
}
/**
* Gets the type of compression.
*
* @return The compression type.
*/
public CompressionType getCompressionType() {
return mCompressionType;
}
/**
* Gets the compression codec.
*
* @return The compression codec.
*/
public CompressionCodec getCompressionCodec() {
return mCompressionCodec;
}
/**
* Gets the SequenceFile metadata to store in the <i>header</i>.
*
* @return The metadata header.
*/
public Metadata getMetadata() {
return mMetadata;
}
/**
* Gets the metadata to store in the file header, which includes any necessary
* Avro writer schemas.
*
* @return The metadata header with Avro writer schemas if Avro data is being
* written.
*/
private Metadata getMetadataWithAvroSchemas() {
// mMetadata was intialized in the constructor, and cannot be set to null.
assert null != mMetadata;
if (null != mKeyWriterSchema) {
mMetadata.set(METADATA_FIELD_KEY_SCHEMA, new Text(mKeyWriterSchema.toString()));
}
if (null != mValueWriterSchema) {
mMetadata.set(METADATA_FIELD_VALUE_SCHEMA, new Text(mValueWriterSchema.toString()));
}
return mMetadata;
}
}
/**
* Creates a new <code>Writer</code> to a SequenceFile that supports Avro data.
*
* @param options The writer options.
* @throws IOException If the writer cannot be initialized.
*/
public Writer(Options options) throws IOException {
super(options.getFileSystem(), options.getConfigurationWithAvroSerialization(), options.getOutputPath(),
options.getKeyClass(), options.getValueClass(), options.getBufferSizeBytes(), options.getReplicationFactor(),
options.getBlockSizeBytes(), options.getProgressable(), options.getMetadataWithAvroSchemas());
}
}
/**
* A reader for SequenceFiles that may contain Avro data.
*/
public static class Reader extends SequenceFile.Reader {
/**
* A helper class to encapsulate the options that can be used to construct a
* Reader.
*/
public static class Options {
private FileSystem mFileSystem;
private Path mInputPath;
private Configuration mConf;
private Schema mKeyReaderSchema;
private Schema mValueReaderSchema;
/**
* Sets the filesystem the SequenceFile should be read from.
*
* @param fileSystem The filesystem.
* @return This options instance.
*/
public Options withFileSystem(FileSystem fileSystem) {
if (null == fileSystem) {
throw new IllegalArgumentException("Filesystem may not be null");
}
mFileSystem = fileSystem;
return this;
}
/**
* Sets the input path for the SequenceFile.
*
* @param inputPath The input path.
* @return This options instance.
*/
public Options withInputPath(Path inputPath) {
if (null == inputPath) {
throw new IllegalArgumentException("Input path may not be null");
}
mInputPath = inputPath;
return this;
}
/**
* Sets the Hadoop configuration.
*
* @param conf The configuration.
* @return This options instance.
*/
public Options withConfiguration(Configuration conf) {
if (null == conf) {
throw new IllegalArgumentException("Configuration may not be null");
}
mConf = conf;
return this;
}
/**
* Sets the reader schema of the key records when using Avro data.
*
* <p>
* If not set, the writer schema will be used as the reader schema.
* </p>
*
* @param keyReaderSchema The reader schema for the keys.
* @return This options instance.
*/
public Options withKeySchema(Schema keyReaderSchema) {
mKeyReaderSchema = keyReaderSchema;
return this;
}
/**
* Sets the reader schema of the value records when using Avro data.
*
* <p>
* If not set, the writer schema will be used as the reader schema.
* </p>
*
* @param valueReaderSchema The reader schema for the values.
* @return This options instance.
*/
public Options withValueSchema(Schema valueReaderSchema) {
mValueReaderSchema = valueReaderSchema;
return this;
}
/**
* Gets the filesystem the SequenceFile should be read rom.
*
* @return The file system to read from.
*/
public FileSystem getFileSystem() {
if (null == mFileSystem) {
throw new RuntimeException("Must call Options.withFileSystem()");
}
return mFileSystem;
}
/**
* Gets the input path for the sequence file.
*
* @return The input path.
*/
public Path getInputPath() {
if (null == mInputPath) {
throw new RuntimeException("Must call Options.withInputPath()");
}
return mInputPath;
}
/**
* Gets the Hadoop configuration.
*
* @return The Hadoop configuration.
*/
public Configuration getConfiguration() {
return mConf;
}
/**
* Gets the Hadoop configuration with Avro serialization registered.
*
* @return The Hadoop configuration.
* @throws IOException If there is an error configuring Avro serialization.
*/
public Configuration getConfigurationWithAvroSerialization() throws IOException {
Configuration conf = getConfiguration();
if (null == conf) {
throw new RuntimeException("Must call Options.withConfiguration()");
}
// Configure schemas and add Avro serialization to the configuration.
Configuration confWithAvro = new Configuration(conf);
AvroSerialization.addToConfiguration(confWithAvro);
// Read the metadata header from the SequenceFile to get the writer schemas.
Metadata metadata = AvroSequenceFile.getMetadata(getFileSystem(), getInputPath(), confWithAvro);
// Set the key schema if present in the metadata.
Text keySchemaText = metadata.get(METADATA_FIELD_KEY_SCHEMA);
if (null != keySchemaText) {
LOG.debug("Using key writer schema from SequenceFile metadata: {}", keySchemaText);
AvroSerialization.setKeyWriterSchema(confWithAvro, new Schema.Parser().parse(keySchemaText.toString()));
if (null != mKeyReaderSchema) {
AvroSerialization.setKeyReaderSchema(confWithAvro, mKeyReaderSchema);
}
}
// Set the value schema if present in the metadata.
Text valueSchemaText = metadata.get(METADATA_FIELD_VALUE_SCHEMA);
if (null != valueSchemaText) {
LOG.debug("Using value writer schema from SequenceFile metadata: {}", valueSchemaText);
AvroSerialization.setValueWriterSchema(confWithAvro, new Schema.Parser().parse(valueSchemaText.toString()));
if (null != mValueReaderSchema) {
AvroSerialization.setValueReaderSchema(confWithAvro, mValueReaderSchema);
}
}
return confWithAvro;
}
}
/**
* Creates a new <code>Reader</code> from a SequenceFile that supports Avro
* data.
*
* @param options The reader options.
* @throws IOException If the reader cannot be initialized.
*/
public Reader(Options options) throws IOException {
super(options.getFileSystem(), options.getInputPath(), options.getConfigurationWithAvroSerialization());
}
}
/**
* Open and read just the metadata header from a SequenceFile.
*
* @param fs The FileSystem the SequenceFile is on.
* @param path The path to the file.
* @param conf The Hadoop configuration.
* @return The metadata header.
* @throws IOException If the metadata cannot be read from the file.
*/
private static Metadata getMetadata(FileSystem fs, Path path, Configuration conf) throws IOException {
try (SequenceFile.Reader metadataReader = new SequenceFile.Reader(fs, path, conf)) {
return metadataReader.getMetadata();
}
}
}
| 7,059 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop/io/AvroSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.hadoop.io;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.avro.Schema;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.mapred.AvroWrapper;
import org.apache.avro.reflect.ReflectDatumWriter;
import org.apache.hadoop.io.serializer.Serializer;
/**
* Serializes AvroWrapper objects within Hadoop.
*
* <p>
* Keys and values containing Avro types are more efficiently serialized outside
* of the WritableSerialization model, so they are wrapped in
* {@link org.apache.avro.mapred.AvroWrapper} objects and serialization is
* handled by this class.
* </p>
*
* <p>
* MapReduce jobs that use AvroWrapper objects as keys or values need to be
* configured with {@link AvroSerialization}. Use
* {@link org.apache.avro.mapreduce.AvroJob} to help with Job configuration.
* </p>
*
* @param <T> The Java type of the Avro data.
*/
public class AvroSerializer<T> implements Serializer<AvroWrapper<T>> {
/** An factory for creating Avro datum encoders. */
private static final EncoderFactory ENCODER_FACTORY = new EncoderFactory();
/** The writer schema for the data to serialize. */
private final Schema mWriterSchema;
/** The Avro datum writer for serializing. */
private final DatumWriter<T> mAvroDatumWriter;
/** The Avro encoder for serializing. */
private BinaryEncoder mAvroEncoder;
/** The output stream for serializing. */
private OutputStream mOutputStream;
/**
* Constructor.
*
* @param writerSchema The writer schema for the Avro data being serialized.
*/
public AvroSerializer(Schema writerSchema) {
if (null == writerSchema) {
throw new IllegalArgumentException("Writer schema may not be null");
}
mWriterSchema = writerSchema;
mAvroDatumWriter = new ReflectDatumWriter<>(writerSchema);
}
/**
* Constructor.
*
* @param writerSchema The writer schema for the Avro data being serialized.
* @param datumWriter The datum writer to use for serialization.
*/
public AvroSerializer(Schema writerSchema, DatumWriter<T> datumWriter) {
if (null == writerSchema) {
throw new IllegalArgumentException("Writer schema may not be null");
}
mWriterSchema = writerSchema;
mAvroDatumWriter = datumWriter;
}
/**
* Gets the writer schema being used for serialization.
*
* @return The writer schema.
*/
public Schema getWriterSchema() {
return mWriterSchema;
}
/** {@inheritDoc} */
@Override
public void open(OutputStream outputStream) throws IOException {
mOutputStream = outputStream;
mAvroEncoder = ENCODER_FACTORY.binaryEncoder(outputStream, mAvroEncoder);
}
/** {@inheritDoc} */
@Override
public void serialize(AvroWrapper<T> avroWrapper) throws IOException {
mAvroDatumWriter.write(avroWrapper.datum(), mAvroEncoder);
// This would be a lot faster if the Serializer interface had a flush() method
// and the
// Hadoop framework called it when needed. For now, we'll have to flush on every
// record.
mAvroEncoder.flush();
}
/** {@inheritDoc} */
@Override
public void close() throws IOException {
mOutputStream.close();
}
}
| 7,060 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop/io/AvroDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.hadoop.io;
import java.io.IOException;
import java.io.InputStream;
import org.apache.avro.Schema;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.mapred.AvroWrapper;
import org.apache.avro.reflect.ReflectData;
import org.apache.avro.reflect.ReflectDatumReader;
import org.apache.hadoop.io.serializer.Deserializer;
/**
* Deserializes AvroWrapper objects within Hadoop.
*
* <p>
* Keys and values containing Avro types are more efficiently serialized outside
* of the WritableSerialization model, so they are wrapper in
* {@link org.apache.avro.mapred.AvroWrapper} objects and deserialization is
* handled by this class.
* </p>
*
* <p>
* MapReduce jobs that use AvroWrapper objects as keys or values need to be
* configured with {@link AvroSerialization}. Use
* {@link org.apache.avro.mapreduce.AvroJob} to help with Job configuration.
* </p>
*
* @param <T> The type of Avro wrapper.
* @param <D> The Java type of the Avro data being wrapped.
*/
public abstract class AvroDeserializer<T extends AvroWrapper<D>, D> implements Deserializer<T> {
/** The Avro writer schema for deserializing. */
private final Schema mWriterSchema;
/** The Avro reader schema for deserializing. */
private final Schema mReaderSchema;
/** The Avro datum reader for deserializing. */
final DatumReader<D> mAvroDatumReader;
/** An Avro binary decoder for deserializing. */
private BinaryDecoder mAvroDecoder;
/**
* Constructor.
*
* @param writerSchema The Avro writer schema for the data to deserialize.
* @param readerSchema The Avro reader schema for the data to deserialize (may
* be null).
*/
protected AvroDeserializer(Schema writerSchema, Schema readerSchema, ClassLoader classLoader) {
mWriterSchema = writerSchema;
mReaderSchema = null != readerSchema ? readerSchema : writerSchema;
mAvroDatumReader = new ReflectDatumReader<>(mWriterSchema, mReaderSchema, new ReflectData(classLoader));
}
/**
* Constructor.
*
* @param writerSchema The Avro writer schema for the data to deserialize.
* @param readerSchema The Avro reader schema for the data to deserialize (may
* be null).
* @param datumReader The Avro datum reader to use for deserialization.
*/
protected AvroDeserializer(Schema writerSchema, Schema readerSchema, DatumReader<D> datumReader) {
mWriterSchema = writerSchema;
mReaderSchema = null != readerSchema ? readerSchema : writerSchema;
mAvroDatumReader = datumReader;
}
/**
* Gets the writer schema used for deserializing.
*
* @return The writer schema;
*/
public Schema getWriterSchema() {
return mWriterSchema;
}
/**
* Gets the reader schema used for deserializing.
*
* @return The reader schema.
*/
public Schema getReaderSchema() {
return mReaderSchema;
}
/** {@inheritDoc} */
@Override
public void open(InputStream inputStream) throws IOException {
mAvroDecoder = DecoderFactory.get().directBinaryDecoder(inputStream, mAvroDecoder);
}
/** {@inheritDoc} */
@Override
public T deserialize(T avroWrapperToReuse) throws IOException {
// Create a new Avro wrapper if there isn't one to reuse.
if (null == avroWrapperToReuse) {
avroWrapperToReuse = createAvroWrapper();
}
// Deserialize the Avro datum from the input stream.
avroWrapperToReuse.datum(mAvroDatumReader.read(avroWrapperToReuse.datum(), mAvroDecoder));
return avroWrapperToReuse;
}
/** {@inheritDoc} */
@Override
public void close() throws IOException {
mAvroDecoder.inputStream().close();
}
/**
* Creates a new empty <code>T</code> (extends AvroWrapper) instance.
*
* @return A new empty <code>T</code> instance.
*/
protected abstract T createAvroWrapper();
}
| 7,061 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop/io/AvroSerialization.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.hadoop.io;
import java.lang.reflect.Constructor;
import java.util.Collection;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.avro.mapred.AvroWrapper;
import org.apache.avro.reflect.ReflectData;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.Serialization;
import org.apache.hadoop.io.serializer.Serializer;
import org.apache.hadoop.util.ReflectionUtils;
/**
* The {@link org.apache.hadoop.io.serializer.Serialization} used by jobs
* configured with {@link org.apache.avro.mapreduce.AvroJob}.
*
* @param <T> The Java type of the Avro data to serialize.
*/
public class AvroSerialization<T> extends Configured implements Serialization<AvroWrapper<T>> {
/**
* Conf key for the writer schema of the AvroKey datum being
* serialized/deserialized.
*/
private static final String CONF_KEY_WRITER_SCHEMA = "avro.serialization.key.writer.schema";
/**
* Conf key for the reader schema of the AvroKey datum being
* serialized/deserialized.
*/
private static final String CONF_KEY_READER_SCHEMA = "avro.serialization.key.reader.schema";
/**
* Conf key for the writer schema of the AvroValue datum being
* serialized/deserialized.
*/
private static final String CONF_VALUE_WRITER_SCHEMA = "avro.serialization.value.writer.schema";
/**
* Conf key for the reader schema of the AvroValue datum being
* serialized/deserialized.
*/
private static final String CONF_VALUE_READER_SCHEMA = "avro.serialization.value.reader.schema";
/** Conf key for the data model implementation class. */
private static final String CONF_DATA_MODEL = "avro.serialization.data.model";
/** {@inheritDoc} */
@Override
public boolean accept(Class<?> c) {
return AvroKey.class.isAssignableFrom(c) || AvroValue.class.isAssignableFrom(c);
}
/**
* Gets an object capable of deserializing the output from a Mapper.
*
* @param c The class to get a deserializer for.
* @return A deserializer for objects of class <code>c</code>.
*/
@Override
public Deserializer<AvroWrapper<T>> getDeserializer(Class<AvroWrapper<T>> c) {
Configuration conf = getConf();
GenericData dataModel = createDataModel(conf);
if (AvroKey.class.isAssignableFrom(c)) {
Schema writerSchema = getKeyWriterSchema(conf);
Schema readerSchema = getKeyReaderSchema(conf);
DatumReader<T> datumReader = (readerSchema != null) ? dataModel.createDatumReader(writerSchema, readerSchema)
: dataModel.createDatumReader(writerSchema);
return new AvroKeyDeserializer<>(writerSchema, readerSchema, datumReader);
} else if (AvroValue.class.isAssignableFrom(c)) {
Schema writerSchema = getValueWriterSchema(conf);
Schema readerSchema = getValueReaderSchema(conf);
DatumReader<T> datumReader = (readerSchema != null) ? dataModel.createDatumReader(writerSchema, readerSchema)
: dataModel.createDatumReader(writerSchema);
return new AvroValueDeserializer<>(writerSchema, readerSchema, datumReader);
} else {
throw new IllegalStateException("Only AvroKey and AvroValue are supported.");
}
}
/**
* Gets an object capable of serializing output from a Mapper.
*
* @param c The class to get a serializer for.
* @return A serializer for objects of class <code>c</code>.
*/
@Override
public Serializer<AvroWrapper<T>> getSerializer(Class<AvroWrapper<T>> c) {
Configuration conf = getConf();
Schema schema;
if (AvroKey.class.isAssignableFrom(c)) {
schema = getKeyWriterSchema(conf);
} else if (AvroValue.class.isAssignableFrom(c)) {
schema = getValueWriterSchema(conf);
} else {
throw new IllegalStateException("Only AvroKey and AvroValue are supported.");
}
GenericData dataModel = createDataModel(conf);
DatumWriter<T> datumWriter = dataModel.createDatumWriter(schema);
return new AvroSerializer<>(schema, datumWriter);
}
/**
* Adds the AvroSerialization scheme to the configuration, so
* SerializationFactory instances constructed from the given configuration will
* be aware of it.
*
* @param conf The configuration to add AvroSerialization to.
*/
public static void addToConfiguration(Configuration conf) {
Collection<String> serializations = conf.getStringCollection("io.serializations");
if (!serializations.contains(AvroSerialization.class.getName())) {
serializations.add(AvroSerialization.class.getName());
conf.setStrings("io.serializations", serializations.toArray(new String[0]));
}
}
/**
* Sets the writer schema of the AvroKey datum that is being
* serialized/deserialized.
*
* @param conf The configuration.
* @param schema The Avro key schema.
*/
public static void setKeyWriterSchema(Configuration conf, Schema schema) {
if (null == schema) {
throw new IllegalArgumentException("Writer schema may not be null");
}
conf.set(CONF_KEY_WRITER_SCHEMA, schema.toString());
}
/**
* Sets the reader schema of the AvroKey datum that is being
* serialized/deserialized.
*
* @param conf The configuration.
* @param schema The Avro key schema.
*/
public static void setKeyReaderSchema(Configuration conf, Schema schema) {
conf.set(CONF_KEY_READER_SCHEMA, schema.toString());
}
/**
* Sets the writer schema of the AvroValue datum that is being
* serialized/deserialized.
*
* @param conf The configuration.
* @param schema The Avro value schema.
*/
public static void setValueWriterSchema(Configuration conf, Schema schema) {
if (null == schema) {
throw new IllegalArgumentException("Writer schema may not be null");
}
conf.set(CONF_VALUE_WRITER_SCHEMA, schema.toString());
}
/**
* Sets the reader schema of the AvroValue datum that is being
* serialized/deserialized.
*
* @param conf The configuration.
* @param schema The Avro value schema.
*/
public static void setValueReaderSchema(Configuration conf, Schema schema) {
conf.set(CONF_VALUE_READER_SCHEMA, schema.toString());
}
/**
* Sets the data model class for de/serialization.
*
* @param conf The configuration.
* @param modelClass The data model class.
*/
public static void setDataModelClass(Configuration conf, Class<? extends GenericData> modelClass) {
conf.setClass(CONF_DATA_MODEL, modelClass, GenericData.class);
}
/**
* Gets the writer schema of the AvroKey datum that is being
* serialized/deserialized.
*
* @param conf The configuration.
* @return The Avro key writer schema, or null if none was set.
*/
public static Schema getKeyWriterSchema(Configuration conf) {
String json = conf.get(CONF_KEY_WRITER_SCHEMA);
return null == json ? null : new Schema.Parser().parse(json);
}
/**
* Gets the reader schema of the AvroKey datum that is being
* serialized/deserialized.
*
* @param conf The configuration.
* @return The Avro key reader schema, or null if none was set.
*/
public static Schema getKeyReaderSchema(Configuration conf) {
String json = conf.get(CONF_KEY_READER_SCHEMA);
return null == json ? null : new Schema.Parser().parse(json);
}
/**
* Gets the writer schema of the AvroValue datum that is being
* serialized/deserialized.
*
* @param conf The configuration.
* @return The Avro value writer schema, or null if none was set.
*/
public static Schema getValueWriterSchema(Configuration conf) {
String json = conf.get(CONF_VALUE_WRITER_SCHEMA);
return null == json ? null : new Schema.Parser().parse(json);
}
/**
* Gets the reader schema of the AvroValue datum that is being
* serialized/deserialized.
*
* @param conf The configuration.
* @return The Avro value reader schema, or null if none was set.
*/
public static Schema getValueReaderSchema(Configuration conf) {
String json = conf.get(CONF_VALUE_READER_SCHEMA);
return null == json ? null : new Schema.Parser().parse(json);
}
/**
* Gets the data model class for de/serialization.
*
* @param conf The configuration.
*/
public static Class<? extends GenericData> getDataModelClass(Configuration conf) {
return conf.getClass(CONF_DATA_MODEL, ReflectData.class, GenericData.class);
}
private static GenericData newDataModelInstance(Class<? extends GenericData> modelClass, Configuration conf) {
GenericData dataModel;
try {
Constructor<? extends GenericData> ctor = modelClass.getDeclaredConstructor(ClassLoader.class);
ctor.setAccessible(true);
dataModel = ctor.newInstance(conf.getClassLoader());
} catch (Exception e) {
throw new RuntimeException(e);
}
ReflectionUtils.setConf(dataModel, conf);
return dataModel;
}
/**
* Gets an instance of data model implementation, defaulting to
* {@link ReflectData} if not explicitly specified.
*
* @param conf The job configuration.
* @return Instance of the job data model implementation.
*/
public static GenericData createDataModel(Configuration conf) {
Class<? extends GenericData> modelClass = getDataModelClass(conf);
return newDataModelInstance(modelClass, conf);
}
}
| 7,062 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop/io/AvroValueDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.hadoop.io;
import org.apache.avro.Schema;
import org.apache.avro.io.DatumReader;
import org.apache.avro.mapred.AvroValue;
import org.apache.avro.mapred.AvroWrapper;
/**
* Deserializes AvroValue objects within Hadoop.
*
* @param <D> The java type of the avro data to deserialize.
*
* @see AvroDeserializer
*/
public class AvroValueDeserializer<D> extends AvroDeserializer<AvroWrapper<D>, D> {
/**
* Constructor.
*
* @param writerSchema The Avro writer schema for the data to deserialize.
* @param readerSchema The Avro reader schema for the data to deserialize.
*/
public AvroValueDeserializer(Schema writerSchema, Schema readerSchema, ClassLoader classLoader) {
super(writerSchema, readerSchema, classLoader);
}
/**
* Constructor.
*
* @param writerSchema The Avro writer schema for the data to deserialize.
* @param readerSchema The Avro reader schema for the data to deserialize.
* @param datumReader The Avro datum reader to use for deserialization.
*/
public AvroValueDeserializer(Schema writerSchema, Schema readerSchema, DatumReader<D> datumReader) {
super(writerSchema, readerSchema, datumReader);
}
/**
* Creates a new empty <code>AvroValue</code> instance.
*
* @return a new empty AvroValue.
*/
@Override
protected AvroWrapper<D> createAvroWrapper() {
return new AvroValue<>(null);
}
}
| 7,063 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop/io/AvroDatumConverterFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.hadoop.io;
import java.nio.ByteBuffer;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericFixed;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.avro.mapred.AvroWrapper;
import org.apache.avro.mapreduce.AvroJob;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.ByteWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
/**
* Constructs converters that turn objects (usually from the output of a MR job)
* into Avro data that can be serialized.
*
* <p>
* Currently, only the following types have implemented converters:
* <ul>
* <li>AvroKey</li>
* <li>AvroValue</li>
* <li>BooleanWritable</li>
* <li>BytesWritable</li>
* <li>ByteWritable</li>
* <li>DoubleWritable</li>
* <li>FloatWritable</li>
* <li>IntWritable</li>
* <li>LongWritable</li>
* <li>NullWritable</li>
* <li>Text</li>
* </ul>
* </p>
*/
public class AvroDatumConverterFactory extends Configured {
/**
* Creates a new <code>AvroDatumConverterFactory</code> instance.
*
* @param conf The job configuration.
*/
public AvroDatumConverterFactory(Configuration conf) {
super(conf);
}
/**
* Creates a converter that turns objects of type <code>inputClass</code> into
* Avro data.
*
* @param inputClass The type of input data to convert.
* @return A converter that turns objects of type <code>inputClass</code> into
* Avro data.
*/
@SuppressWarnings("unchecked")
public <IN, OUT> AvroDatumConverter<IN, OUT> create(Class<IN> inputClass) {
boolean isMapOnly = ((JobConf) getConf()).getNumReduceTasks() == 0;
if (AvroKey.class.isAssignableFrom(inputClass)) {
Schema schema;
if (isMapOnly) {
schema = AvroJob.getMapOutputKeySchema(getConf());
if (null == schema) {
schema = AvroJob.getOutputKeySchema(getConf());
}
} else {
schema = AvroJob.getOutputKeySchema(getConf());
}
if (null == schema) {
throw new IllegalStateException("Writer schema for output key was not set. Use AvroJob.setOutputKeySchema().");
}
return (AvroDatumConverter<IN, OUT>) new AvroWrapperConverter(schema);
}
if (AvroValue.class.isAssignableFrom(inputClass)) {
Schema schema;
if (isMapOnly) {
schema = AvroJob.getMapOutputValueSchema(getConf());
if (null == schema) {
schema = AvroJob.getOutputValueSchema(getConf());
}
} else {
schema = AvroJob.getOutputValueSchema(getConf());
}
if (null == schema) {
throw new IllegalStateException(
"Writer schema for output value was not set. Use AvroJob.setOutputValueSchema().");
}
return (AvroDatumConverter<IN, OUT>) new AvroWrapperConverter(schema);
}
if (BooleanWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new BooleanWritableConverter();
}
if (BytesWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new BytesWritableConverter();
}
if (ByteWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new ByteWritableConverter();
}
if (DoubleWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new DoubleWritableConverter();
}
if (FloatWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new FloatWritableConverter();
}
if (IntWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new IntWritableConverter();
}
if (LongWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new LongWritableConverter();
}
if (NullWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new NullWritableConverter();
}
if (Text.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new TextConverter();
}
throw new UnsupportedOperationException("Unsupported input type: " + inputClass.getName());
}
/** Converts AvroWrappers into their wrapped Avro data. */
public static class AvroWrapperConverter extends AvroDatumConverter<AvroWrapper<?>, Object> {
private final Schema mSchema;
public AvroWrapperConverter(Schema schema) {
mSchema = schema;
}
/** {@inheritDoc} */
@Override
public Object convert(AvroWrapper<?> input) {
return input.datum();
}
/** {@inheritDoc} */
@Override
public Schema getWriterSchema() {
return mSchema;
}
}
/** Converts BooleanWritables into Booleans. */
public static class BooleanWritableConverter extends AvroDatumConverter<BooleanWritable, Boolean> {
private final Schema mSchema;
/** Constructor. */
public BooleanWritableConverter() {
mSchema = Schema.create(Schema.Type.BOOLEAN);
}
/** {@inheritDoc} */
@Override
public Boolean convert(BooleanWritable input) {
return input.get();
}
/** {@inheritDoc} */
@Override
public Schema getWriterSchema() {
return mSchema;
}
}
/** Converts BytesWritables into ByteBuffers. */
public static class BytesWritableConverter extends AvroDatumConverter<BytesWritable, ByteBuffer> {
private final Schema mSchema;
/** Constructor. */
public BytesWritableConverter() {
mSchema = Schema.create(Schema.Type.BYTES);
}
/** {@inheritDoc} */
@Override
public ByteBuffer convert(BytesWritable input) {
return ByteBuffer.wrap(input.getBytes());
}
/** {@inheritDoc} */
@Override
public Schema getWriterSchema() {
return mSchema;
}
}
/** Converts ByteWritables into GenericFixed of size 1. */
public static class ByteWritableConverter extends AvroDatumConverter<ByteWritable, GenericFixed> {
private final Schema mSchema;
/** Constructor. */
public ByteWritableConverter() {
mSchema = Schema.createFixed("Byte", "A single byte", "org.apache.avro.mapreduce", 1);
}
/** {@inheritDoc} */
@Override
public GenericFixed convert(ByteWritable input) {
return new GenericData.Fixed(mSchema, new byte[] { input.get() });
}
/** {@inheritDoc} */
@Override
public Schema getWriterSchema() {
return mSchema;
}
}
/** Converts DoubleWritables into Doubles. */
public static class DoubleWritableConverter extends AvroDatumConverter<DoubleWritable, Double> {
private final Schema mSchema;
/** Constructor. */
public DoubleWritableConverter() {
mSchema = Schema.create(Schema.Type.DOUBLE);
}
/** {@inheritDoc} */
@Override
public Double convert(DoubleWritable input) {
return input.get();
}
/** {@inheritDoc} */
@Override
public Schema getWriterSchema() {
return mSchema;
}
}
/** Converts FloatWritables into Floats. */
public static class FloatWritableConverter extends AvroDatumConverter<FloatWritable, Float> {
private final Schema mSchema;
/** Constructor. */
public FloatWritableConverter() {
mSchema = Schema.create(Schema.Type.FLOAT);
}
/** {@inheritDoc} */
@Override
public Float convert(FloatWritable input) {
return input.get();
}
/** {@inheritDoc} */
@Override
public Schema getWriterSchema() {
return mSchema;
}
}
/** Converts IntWritables into Ints. */
public static class IntWritableConverter extends AvroDatumConverter<IntWritable, Integer> {
private final Schema mSchema;
/** Constructor. */
public IntWritableConverter() {
mSchema = Schema.create(Schema.Type.INT);
}
/** {@inheritDoc} */
@Override
public Integer convert(IntWritable input) {
return input.get();
}
/** {@inheritDoc} */
@Override
public Schema getWriterSchema() {
return mSchema;
}
}
/** Converts LongWritables into Longs. */
public static class LongWritableConverter extends AvroDatumConverter<LongWritable, Long> {
private final Schema mSchema;
/** Constructor. */
public LongWritableConverter() {
mSchema = Schema.create(Schema.Type.LONG);
}
/** {@inheritDoc} */
@Override
public Long convert(LongWritable input) {
return input.get();
}
/** {@inheritDoc} */
@Override
public Schema getWriterSchema() {
return mSchema;
}
}
/** Converts NullWritables into Nulls. */
public static class NullWritableConverter extends AvroDatumConverter<NullWritable, Object> {
private final Schema mSchema;
/** Constructor. */
public NullWritableConverter() {
mSchema = Schema.create(Schema.Type.NULL);
}
/** {@inheritDoc} */
@Override
public Object convert(NullWritable input) {
return null;
}
/** {@inheritDoc} */
@Override
public Schema getWriterSchema() {
return mSchema;
}
}
/** Converts Text into CharSequences. */
public static class TextConverter extends AvroDatumConverter<Text, CharSequence> {
private final Schema mSchema;
/** Constructor. */
public TextConverter() {
mSchema = Schema.create(Schema.Type.STRING);
}
/** {@inheritDoc} */
@Override
public CharSequence convert(Text input) {
return input.toString();
}
/** {@inheritDoc} */
@Override
public Schema getWriterSchema() {
return mSchema;
}
}
}
| 7,064 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/hadoop/io/AvroKeyValue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.hadoop.io;
import java.util.Arrays;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
/**
* A helper object for working with the Avro generic records that are used to
* store key/value pairs in an Avro container file.
*
* @param <K> The java type for the key.
* @param <V> The java type for the value.
*/
public class AvroKeyValue<K, V> {
/** The name of the key value pair generic record. */
public static final String KEY_VALUE_PAIR_RECORD_NAME = "KeyValuePair";
/** The namespace of the key value pair generic record. */
public static final String KEY_VALUE_PAIR_RECORD_NAMESPACE = "org.apache.avro.mapreduce";
/** The name of the generic record field containing the key. */
public static final String KEY_FIELD = "key";
/** The name of the generic record field containing the value. */
public static final String VALUE_FIELD = "value";
/** The key/value generic record wrapped by this class. */
private final GenericRecord mKeyValueRecord;
/**
* Wraps a GenericRecord that is a key value pair.
*/
public AvroKeyValue(GenericRecord keyValueRecord) {
mKeyValueRecord = keyValueRecord;
}
/**
* Gets the wrapped key/value GenericRecord.
*
* @return The key/value Avro generic record.
*/
public GenericRecord get() {
return mKeyValueRecord;
}
/**
* Read the key.
*
* @return The key from the key/value generic record.
*/
@SuppressWarnings("unchecked")
public K getKey() {
return (K) mKeyValueRecord.get(KEY_FIELD);
}
/**
* Read the value.
*
* @return The value from the key/value generic record.
*/
@SuppressWarnings("unchecked")
public V getValue() {
return (V) mKeyValueRecord.get(VALUE_FIELD);
}
/**
* Sets the key.
*
* @param key The key.
*/
public void setKey(K key) {
mKeyValueRecord.put(KEY_FIELD, key);
}
/**
* Sets the value.
*
* @param value The value.
*/
public void setValue(V value) {
mKeyValueRecord.put(VALUE_FIELD, value);
}
/**
* Creates a KeyValuePair generic record schema.
*
* @return A schema for a generic record with two fields: 'key' and 'value'.
*/
public static Schema getSchema(Schema keySchema, Schema valueSchema) {
Schema schema = Schema.createRecord(KEY_VALUE_PAIR_RECORD_NAME, "A key/value pair", KEY_VALUE_PAIR_RECORD_NAMESPACE,
false);
schema.setFields(Arrays.asList(new Schema.Field(KEY_FIELD, keySchema, "The key", null),
new Schema.Field(VALUE_FIELD, valueSchema, "The value", null)));
return schema;
}
/**
* A wrapper for iterators over GenericRecords that are known to be KeyValue
* records.
*
* @param <K> The key type.
* @param <V> The value type.
*/
public static class Iterator<K, V> implements java.util.Iterator<AvroKeyValue<K, V>> {
/** The wrapped iterator. */
private final java.util.Iterator<? extends GenericRecord> mGenericIterator;
/**
* Constructs an iterator over key-value map entries out of a generic iterator.
*
* @param genericIterator An iterator over some generic record entries.
*/
public Iterator(java.util.Iterator<? extends GenericRecord> genericIterator) {
mGenericIterator = genericIterator;
}
/** {@inheritDoc} */
@Override
public boolean hasNext() {
return mGenericIterator.hasNext();
}
/** {@inheritDoc} */
@Override
public AvroKeyValue<K, V> next() {
GenericRecord genericRecord = mGenericIterator.next();
if (null == genericRecord) {
return null;
}
return new AvroKeyValue<>(genericRecord);
}
/** {@inheritDoc} */
@Override
public void remove() {
mGenericIterator.remove();
}
}
}
| 7,065 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapreduce/AvroKeyValueRecordWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.avro.Schema;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.file.DataFileConstants;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.hadoop.io.AvroDatumConverter;
import org.apache.avro.hadoop.io.AvroKeyValue;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* Writes key/value pairs to an Avro container file.
*
* <p>
* Each entry in the Avro container file will be a generic record with two
* fields, named 'key' and 'value'. The input types may be basic Writable
* objects like Text or IntWritable, or they may be AvroWrapper subclasses
* (AvroKey or AvroValue). Writable objects will be converted to their
* corresponding Avro types when written to the generic record key/value pair.
* </p>
*
* @param <K> The type of key to write.
* @param <V> The type of value to write.
*/
public class AvroKeyValueRecordWriter<K, V> extends RecordWriter<K, V> implements Syncable {
/** A writer for the Avro container file. */
private final DataFileWriter<GenericRecord> mAvroFileWriter;
/**
* The writer schema for the generic record entries of the Avro container file.
*/
private final Schema mKeyValuePairSchema;
/** A reusable Avro generic record for writing key/value pairs to the file. */
private final AvroKeyValue<Object, Object> mOutputRecord;
/** A helper object that converts the input key to an Avro datum. */
private final AvroDatumConverter<K, ?> mKeyConverter;
/** A helper object that converts the input value to an Avro datum. */
private final AvroDatumConverter<V, ?> mValueConverter;
/**
* Constructor.
*
* @param keyConverter A key to Avro datum converter.
* @param valueConverter A value to Avro datum converter.
* @param dataModel The data model for key and value.
* @param compressionCodec A compression codec factory for the Avro container
* file.
* @param outputStream The output stream to write the Avro container file
* to.
* @param syncInterval The sync interval for the Avro container file.
* @throws IOException If the record writer cannot be opened.
*/
public AvroKeyValueRecordWriter(AvroDatumConverter<K, ?> keyConverter, AvroDatumConverter<V, ?> valueConverter,
GenericData dataModel, CodecFactory compressionCodec, OutputStream outputStream, int syncInterval)
throws IOException {
// Create the generic record schema for the key/value pair.
mKeyValuePairSchema = AvroKeyValue.getSchema(keyConverter.getWriterSchema(), valueConverter.getWriterSchema());
// Create an Avro container file and a writer to it.
mAvroFileWriter = new DataFileWriter<GenericRecord>(dataModel.createDatumWriter(mKeyValuePairSchema));
mAvroFileWriter.setCodec(compressionCodec);
mAvroFileWriter.setSyncInterval(syncInterval);
mAvroFileWriter.create(mKeyValuePairSchema, outputStream);
// Keep a reference to the converters.
mKeyConverter = keyConverter;
mValueConverter = valueConverter;
// Create a reusable output record.
mOutputRecord = new AvroKeyValue<>(new GenericData.Record(mKeyValuePairSchema));
}
/**
* Constructor.
*
* @param keyConverter A key to Avro datum converter.
* @param valueConverter A value to Avro datum converter.
* @param dataModel The data model for key and value.
* @param compressionCodec A compression codec factory for the Avro container
* file.
* @param outputStream The output stream to write the Avro container file
* to.
* @throws IOException If the record writer cannot be opened.
*/
public AvroKeyValueRecordWriter(AvroDatumConverter<K, ?> keyConverter, AvroDatumConverter<V, ?> valueConverter,
GenericData dataModel, CodecFactory compressionCodec, OutputStream outputStream) throws IOException {
this(keyConverter, valueConverter, dataModel, compressionCodec, outputStream,
DataFileConstants.DEFAULT_SYNC_INTERVAL);
}
/**
* Gets the writer schema for the key/value pair generic record.
*
* @return The writer schema used for entries of the Avro container file.
*/
public Schema getWriterSchema() {
return mKeyValuePairSchema;
}
/** {@inheritDoc} */
@Override
public void write(K key, V value) throws IOException {
mOutputRecord.setKey(mKeyConverter.convert(key));
mOutputRecord.setValue(mValueConverter.convert(value));
mAvroFileWriter.append(mOutputRecord.get());
}
/** {@inheritDoc} */
@Override
public void close(TaskAttemptContext context) throws IOException {
mAvroFileWriter.close();
}
/** {@inheritDoc} */
@Override
public long sync() throws IOException {
return mAvroFileWriter.sync();
}
}
| 7,066 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapreduce/AvroKeyValueOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.avro.generic.GenericData;
import org.apache.avro.hadoop.io.AvroDatumConverter;
import org.apache.avro.hadoop.io.AvroDatumConverterFactory;
import org.apache.avro.hadoop.io.AvroSerialization;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* FileOutputFormat for writing Avro container files of key/value pairs.
*
* <p>
* Since Avro container files can only contain records (not key/value pairs),
* this output format puts the key and value into an Avro generic record with
* two fields, named 'key' and 'value'.
* </p>
*
* <p>
* The keys and values given to this output format may be Avro objects wrapped
* in <code>AvroKey</code> or <code>AvroValue</code> objects. The basic Writable
* types are also supported (e.g., IntWritable, Text); they will be converted to
* their corresponding Avro types.
* </p>
*
* @param <K> The type of key. If an Avro type, it must be wrapped in an
* <code>AvroKey</code>.
* @param <V> The type of value. If an Avro type, it must be wrapped in an
* <code>AvroValue</code>.
*/
public class AvroKeyValueOutputFormat<K, V> extends AvroOutputFormatBase<K, V> {
/** {@inheritDoc} */
@Override
@SuppressWarnings("unchecked")
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext context) throws IOException {
Configuration conf = context.getConfiguration();
AvroDatumConverterFactory converterFactory = new AvroDatumConverterFactory(conf);
AvroDatumConverter<K, ?> keyConverter = converterFactory.create((Class<K>) context.getOutputKeyClass());
AvroDatumConverter<V, ?> valueConverter = converterFactory.create((Class<V>) context.getOutputValueClass());
GenericData dataModel = AvroSerialization.createDataModel(conf);
OutputStream out = getAvroFileOutputStream(context);
try {
return new AvroKeyValueRecordWriter<>(keyConverter, valueConverter, dataModel, getCompressionCodec(context), out,
getSyncInterval(context));
} catch (IOException e) {
out.close();
throw e;
}
}
}
| 7,067 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapreduce/AvroSequenceFileInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.avro.hadoop.io.AvroSequenceFile;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
/**
* An input format for reading from AvroSequenceFiles (sequence files that
* support Avro data).
*
* @param <K> The input key type.
* @param <V> The input value type.
*/
public class AvroSequenceFileInputFormat<K, V> extends SequenceFileInputFormat<K, V> {
/** {@inheritDoc} */
@Override
public RecordReader<K, V> createRecordReader(InputSplit inputSplit, TaskAttemptContext context) throws IOException {
return new AvroSequenceFileRecordReader();
}
/**
* Reads records from a SequenceFile that supports Avro data.
*
* <p>
* This class is based on Hadoop's SequenceFileRecordReader, modified to
* construct an AvroSequenceFile.Reader instead of a SequenceFile.Reader.
* </p>
*/
protected class AvroSequenceFileRecordReader extends RecordReader<K, V> {
private SequenceFile.Reader mReader;
private long mStart;
private long mEnd;
private boolean mHasMoreData;
private K mCurrentKey;
private V mCurrentValue;
/** {@inheritDoc} */
@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
FileSplit fileSplit = (FileSplit) split;
Configuration conf = context.getConfiguration();
Path path = fileSplit.getPath();
FileSystem fs = path.getFileSystem(conf);
// Configure the SequenceFile reader.
AvroSequenceFile.Reader.Options options = new AvroSequenceFile.Reader.Options().withFileSystem(fs)
.withInputPath(path).withConfiguration(conf);
Schema keySchema = AvroJob.getInputKeySchema(conf);
if (null != keySchema) {
options.withKeySchema(keySchema);
}
Schema valueSchema = AvroJob.getInputValueSchema(conf);
if (null != valueSchema) {
options.withValueSchema(valueSchema);
}
mReader = new AvroSequenceFile.Reader(options);
mEnd = fileSplit.getStart() + fileSplit.getLength();
if (fileSplit.getStart() > mReader.getPosition()) {
// Sync to the beginning of the input split.
mReader.sync(fileSplit.getStart());
}
mStart = mReader.getPosition();
mHasMoreData = mStart < mEnd;
}
/** {@inheritDoc} */
@Override
@SuppressWarnings("unchecked")
public boolean nextKeyValue() throws IOException, InterruptedException {
if (!mHasMoreData) {
return false;
}
long pos = mReader.getPosition();
mCurrentKey = (K) mReader.next(mCurrentKey);
if (null == mCurrentKey || (pos >= mEnd && mReader.syncSeen())) {
mHasMoreData = false;
mCurrentKey = null;
mCurrentValue = null;
} else {
mCurrentValue = (V) mReader.getCurrentValue(mCurrentValue);
}
return mHasMoreData;
}
/** {@inheritDoc} */
@Override
public K getCurrentKey() {
return mCurrentKey;
}
/** {@inheritDoc} */
@Override
public V getCurrentValue() {
return mCurrentValue;
}
/** {@inheritDoc} */
@Override
public float getProgress() throws IOException {
if (mEnd == mStart) {
return 0.0f;
} else {
return Math.min(1.0f, (mReader.getPosition() - mStart) / (float) (mEnd - mStart));
}
}
/** {@inheritDoc} */
@Override
public synchronized void close() throws IOException {
mReader.close();
}
}
}
| 7,068 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapreduce/AvroKeyValueRecordReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.hadoop.io.AvroKeyValue;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
/**
* Reads Avro generic records from an Avro container file, where the records
* contain two fields: 'key' and 'value'.
*
* <p>
* The contents of the 'key' field will be parsed into an AvroKey object. The
* contents of the 'value' field will be parsed into an AvroValue object.
* </p>
*
* @param <K> The type of the Avro key to read.
* @param <V> The type of the Avro value to read.
*/
public class AvroKeyValueRecordReader<K, V> extends AvroRecordReaderBase<AvroKey<K>, AvroValue<V>, GenericRecord> {
/** The current key the reader is on. */
private final AvroKey<K> mCurrentKey;
/** The current value the reader is on. */
private final AvroValue<V> mCurrentValue;
/**
* Constructor.
*
* @param keyReaderSchema The reader schema for the key within the generic
* record.
* @param valueReaderSchema The reader schema for the value within the generic
* record.
*/
public AvroKeyValueRecordReader(Schema keyReaderSchema, Schema valueReaderSchema) {
super(AvroKeyValue.getSchema(keyReaderSchema, valueReaderSchema));
mCurrentKey = new AvroKey<>(null);
mCurrentValue = new AvroValue<>(null);
}
/** {@inheritDoc} */
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
boolean hasNext = super.nextKeyValue();
if (hasNext) {
AvroKeyValue<K, V> avroKeyValue = new AvroKeyValue<>(getCurrentRecord());
mCurrentKey.datum(avroKeyValue.getKey());
mCurrentValue.datum(avroKeyValue.getValue());
} else {
mCurrentKey.datum(null);
mCurrentValue.datum(null);
}
return hasNext;
}
/** {@inheritDoc} */
@Override
public AvroKey<K> getCurrentKey() throws IOException, InterruptedException {
return mCurrentKey;
}
/** {@inheritDoc} */
@Override
public AvroValue<V> getCurrentValue() throws IOException, InterruptedException {
return mCurrentValue;
}
}
| 7,069 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapreduce/AvroKeyValueInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A MapReduce InputFormat that reads from Avro container files of key/value
* generic records.
*
* <p>
* Avro container files that container generic records with the two fields 'key'
* and 'value' are expected. The contents of the 'key' field will be used as the
* job input key, and the contents of the 'value' field will be used as the job
* output value.
* </p>
*
* @param <K> The type of the Avro key to read.
* @param <V> The type of the Avro value to read.
*/
public class AvroKeyValueInputFormat<K, V> extends FileInputFormat<AvroKey<K>, AvroValue<V>> {
private static final Logger LOG = LoggerFactory.getLogger(AvroKeyValueInputFormat.class);
/** {@inheritDoc} */
@Override
public RecordReader<AvroKey<K>, AvroValue<V>> createRecordReader(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
Schema keyReaderSchema = AvroJob.getInputKeySchema(context.getConfiguration());
if (null == keyReaderSchema) {
LOG.warn("Key reader schema was not set. Use AvroJob.setInputKeySchema() if desired.");
LOG.info("Using a key reader schema equal to the writer schema.");
}
Schema valueReaderSchema = AvroJob.getInputValueSchema(context.getConfiguration());
if (null == valueReaderSchema) {
LOG.warn("Value reader schema was not set. Use AvroJob.setInputValueSchema() if desired.");
LOG.info("Using a value reader schema equal to the writer schema.");
}
return new AvroKeyValueRecordReader<>(keyReaderSchema, valueReaderSchema);
}
}
| 7,070 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapreduce/AvroKeyInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.mapred.AvroKey;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A MapReduce InputFormat that can handle Avro container files.
*
* <p>
* Keys are AvroKey wrapper objects that contain the Avro data. Since Avro
* container files store only records (not key/value pairs), the value from this
* InputFormat is a NullWritable.
* </p>
*/
public class AvroKeyInputFormat<T> extends FileInputFormat<AvroKey<T>, NullWritable> {
private static final Logger LOG = LoggerFactory.getLogger(AvroKeyInputFormat.class);
/** {@inheritDoc} */
@Override
public RecordReader<AvroKey<T>, NullWritable> createRecordReader(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
Schema readerSchema = AvroJob.getInputKeySchema(context.getConfiguration());
if (null == readerSchema) {
LOG.warn("Reader schema was not set. Use AvroJob.setInputKeySchema() if desired.");
LOG.info("Using a reader schema equal to the writer schema.");
}
return new AvroKeyRecordReader<>(readerSchema);
}
}
| 7,071 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapreduce/AvroMultipleOutputs.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapreduce;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.StringTokenizer;
import org.apache.avro.Schema;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.apache.hadoop.util.ReflectionUtils;
/**
* The AvroMultipleOutputs class simplifies writing Avro output data to multiple
* outputs
*
* <p>
* Case one: writing to additional outputs other than the job default output.
*
* Each additional output, or named output, may be configured with its own
* <code>Schema</code> and <code>OutputFormat</code>.
* </p>
* <p>
* Case two: to write data to different files provided by user
* </p>
*
* <p>
* AvroMultipleOutputs supports counters, by default they are disabled. The
* counters group is the {@link AvroMultipleOutputs} class name. The names of
* the counters are the same as the output name. These count the number of
* records written to each output name.
* </p>
*
* Usage pattern for job submission:
*
* <pre>
*
* Job job = Job.getInstance();
*
* FileInputFormat.setInputPath(job, inDir);
* FileOutputFormat.setOutputPath(job, outDir);
*
* job.setMapperClass(MyAvroMapper.class);
* job.setReducerClass(MyAvroReducer.class);
* ...
*
* Schema schema;
* ...
* // Defines additional single output 'avro1' for the job
* AvroMultipleOutputs.addNamedOutput(job, "avro1", AvroKeyValueOutputFormat.class,
* keyschema, valueSchema); // valueSchema can be set to null if there only Key to be written
to file in the RecordWriter
*
* // Defines additional output 'avro2' with different schema for the job
* AvroMultipleOutputs.addNamedOutput(job, "avro2",
* AvroKeyOutputFormat.class,
* schema,null);
* ...
*
* job.waitForCompletion(true);
* ...
* </pre>
* <p>
* Usage in Reducer:
*
* <pre>
*
* public class MyAvroReducer extends
* Reducer<K, V, T, NullWritable> {
* private MultipleOutputs amos;
*
*
* public void setup(Context context) {
* ...
* amos = new AvroMultipleOutputs(context);
* }
*
* public void reduce(K, Iterator<V> values,Context context)
* throws IOException {
* ...
* amos.write("avro1",datum,NullWritable.get());
* amos.write("avro2",datum,NullWritable.get());
* amos.getCollector("avro3",datum); // here the value is taken as NullWritable
* ...
* }
*
* public void cleanup(Context context) throws IOException {
* amos.close();
* ...
* }
*
* }
* </pre>
*/
public class AvroMultipleOutputs {
private static final String MULTIPLE_OUTPUTS = "avro.mapreduce.multipleoutputs";
private static final String MO_PREFIX = "avro.mapreduce.multipleoutputs.namedOutput.";
private static final String FORMAT = ".format";
private static final String COUNTERS_ENABLED = "avro.mapreduce.multipleoutputs.counters";
/**
* Counters group used by the counters of MultipleOutputs.
*/
private static final String COUNTERS_GROUP = AvroMultipleOutputs.class.getName();
/**
* Cache for the taskContexts
*/
private Map<String, TaskAttemptContext> taskContexts = new HashMap<>();
/**
* Checks if a named output name is valid token.
*
* @param namedOutput named output Name
* @throws IllegalArgumentException if the output name is not valid.
*/
private static void checkTokenName(String namedOutput) {
if (namedOutput == null || namedOutput.length() == 0) {
throw new IllegalArgumentException("Name cannot be NULL or empty");
}
for (char ch : namedOutput.toCharArray()) {
if ((ch >= 'A') && (ch <= 'Z')) {
continue;
}
if ((ch >= 'a') && (ch <= 'z')) {
continue;
}
if ((ch >= '0') && (ch <= '9')) {
continue;
}
throw new IllegalArgumentException("Name cannot have a '" + ch + "' char");
}
}
/**
* Checks if output name is valid.
*
* name cannot be the name used for the default output
*
* @param outputPath base output Name
* @throws IllegalArgumentException if the output name is not valid.
*/
private static void checkBaseOutputPath(String outputPath) {
if (outputPath.equals("part")) {
throw new IllegalArgumentException("output name cannot be 'part'");
}
}
/**
* Checks if a named output name is valid.
*
* @param namedOutput named output Name
* @throws IllegalArgumentException if the output name is not valid.
*/
private static void checkNamedOutputName(JobContext job, String namedOutput, boolean alreadyDefined) {
checkTokenName(namedOutput);
checkBaseOutputPath(namedOutput);
List<String> definedChannels = getNamedOutputsList(job);
if (alreadyDefined && definedChannels.contains(namedOutput)) {
throw new IllegalArgumentException("Named output '" + namedOutput + "' already alreadyDefined");
} else if (!alreadyDefined && !definedChannels.contains(namedOutput)) {
throw new IllegalArgumentException("Named output '" + namedOutput + "' not defined");
}
}
// Returns list of channel names.
private static List<String> getNamedOutputsList(JobContext job) {
List<String> names = new ArrayList<>();
StringTokenizer st = new StringTokenizer(job.getConfiguration().get(MULTIPLE_OUTPUTS, ""), " ");
while (st.hasMoreTokens()) {
names.add(st.nextToken());
}
return names;
}
// Returns the named output OutputFormat.
@SuppressWarnings("unchecked")
private static Class<? extends OutputFormat<?, ?>> getNamedOutputFormatClass(JobContext job, String namedOutput) {
return (Class<? extends OutputFormat<?, ?>>) job.getConfiguration().getClass(MO_PREFIX + namedOutput + FORMAT, null,
OutputFormat.class);
}
/**
* Adds a named output for the job.
* <p/>
*
* @param job job to add the named output
* @param namedOutput named output name, it has to be a word, letters and
* numbers only, cannot be the word 'part' as that is
* reserved for the default output.
* @param outputFormatClass OutputFormat class.
* @param keySchema Schema for the Key
*/
@SuppressWarnings("unchecked")
public static void addNamedOutput(Job job, String namedOutput, Class<? extends OutputFormat> outputFormatClass,
Schema keySchema) {
addNamedOutput(job, namedOutput, outputFormatClass, keySchema, null);
}
/**
* Adds a named output for the job.
* <p/>
*
* @param job job to add the named output
* @param namedOutput named output name, it has to be a word, letters and
* numbers only, cannot be the word 'part' as that is
* reserved for the default output.
* @param outputFormatClass OutputFormat class.
* @param keySchema Schema for the Key
* @param valueSchema Schema for the Value (used in case of
* AvroKeyValueOutputFormat or null)
*/
@SuppressWarnings("unchecked")
public static void addNamedOutput(Job job, String namedOutput, Class<? extends OutputFormat> outputFormatClass,
Schema keySchema, Schema valueSchema) {
checkNamedOutputName(job, namedOutput, true);
Configuration conf = job.getConfiguration();
conf.set(MULTIPLE_OUTPUTS, conf.get(MULTIPLE_OUTPUTS, "") + " " + namedOutput);
conf.setClass(MO_PREFIX + namedOutput + FORMAT, outputFormatClass, OutputFormat.class);
conf.set(MO_PREFIX + namedOutput + ".keyschema", keySchema.toString());
if (valueSchema != null) {
conf.set(MO_PREFIX + namedOutput + ".valueschema", valueSchema.toString());
}
}
/**
* Enables or disables counters for the named outputs.
*
* The counters group is the {@link AvroMultipleOutputs} class name. The names
* of the counters are the same as the named outputs. These counters count the
* number records written to each output name. By default these counters are
* disabled.
*
* @param job job to enable counters
* @param enabled indicates if the counters will be enabled or not.
*/
public static void setCountersEnabled(Job job, boolean enabled) {
job.getConfiguration().setBoolean(COUNTERS_ENABLED, enabled);
}
/**
* Returns if the counters for the named outputs are enabled or not. By default
* these counters are disabled.
*
* @param job the job
* @return TRUE if the counters are enabled, FALSE if they are disabled.
*/
public static boolean getCountersEnabled(JobContext job) {
return job.getConfiguration().getBoolean(COUNTERS_ENABLED, false);
}
/**
* Wraps RecordWriter to increment counters.
*/
@SuppressWarnings("unchecked")
private static class RecordWriterWithCounter extends RecordWriter {
private RecordWriter writer;
private String counterName;
private TaskInputOutputContext context;
public RecordWriterWithCounter(RecordWriter writer, String counterName, TaskInputOutputContext context) {
this.writer = writer;
this.counterName = counterName;
this.context = context;
}
@SuppressWarnings({ "unchecked" })
@Override
public void write(Object key, Object value) throws IOException, InterruptedException {
context.getCounter(COUNTERS_GROUP, counterName).increment(1);
writer.write(key, value);
}
@Override
public void close(TaskAttemptContext context) throws IOException, InterruptedException {
writer.close(context);
}
}
// instance code, to be used from Mapper/Reducer code
private TaskInputOutputContext<?, ?, ?, ?> context;
private Set<String> namedOutputs;
private Map<String, RecordWriter<?, ?>> recordWriters;
private boolean countersEnabled;
/**
* Creates and initializes multiple outputs support, it should be instantiated
* in the Mapper/Reducer setup method.
*
* @param context the TaskInputOutputContext object
*/
public AvroMultipleOutputs(TaskInputOutputContext<?, ?, ?, ?> context) {
this.context = context;
namedOutputs = Collections.unmodifiableSet(new HashSet<>(AvroMultipleOutputs.getNamedOutputsList(context)));
recordWriters = new HashMap<>();
countersEnabled = getCountersEnabled(context);
}
/**
* Write key and value to the namedOutput.
*
* Output path is a unique file generated for the namedOutput. For example,
* {namedOutput}-(m|r)-{part-number}
*
* @param namedOutput the named output name
* @param key the key , value is NullWritable
*/
@SuppressWarnings("unchecked")
public void write(String namedOutput, Object key) throws IOException, InterruptedException {
write(namedOutput, key, NullWritable.get(), namedOutput);
}
/**
* Write key and value to the namedOutput.
*
* Output path is a unique file generated for the namedOutput. For example,
* {namedOutput}-(m|r)-{part-number}
*
* @param namedOutput the named output name
* @param key the key
* @param value the value
*/
@SuppressWarnings("unchecked")
public void write(String namedOutput, Object key, Object value) throws IOException, InterruptedException {
write(namedOutput, key, value, namedOutput);
}
/**
* Write key and value to baseOutputPath using the namedOutput.
*
* @param namedOutput the named output name
* @param key the key
* @param value the value
* @param baseOutputPath base-output path to write the record to. Note:
* Framework will generate unique filename for the
* baseOutputPath
*/
@SuppressWarnings("unchecked")
public void write(String namedOutput, Object key, Object value, String baseOutputPath)
throws IOException, InterruptedException {
checkNamedOutputName(context, namedOutput, false);
checkBaseOutputPath(baseOutputPath);
if (!namedOutputs.contains(namedOutput)) {
throw new IllegalArgumentException("Undefined named output '" + namedOutput + "'");
}
TaskAttemptContext taskContext = getContext(namedOutput);
getRecordWriter(taskContext, baseOutputPath).write(key, value);
}
/**
* Write key value to an output file name.
*
* Gets the record writer from job's output format. Job's output format should
* be a FileOutputFormat.
*
* @param key the key
* @param value the value
* @param baseOutputPath base-output path to write the record to. Note:
* Framework will generate unique filename for the
* baseOutputPath
*/
public void write(Object key, Object value, String baseOutputPath) throws IOException, InterruptedException {
write(key, value, null, null, baseOutputPath);
}
/**
* Write key value to an output file name.
*
* Gets the record writer from job's output format. Job's output format should
* be a FileOutputFormat.
*
* @param key the key
* @param value the value
* @param keySchema keySchema to use
* @param valSchema ValueSchema to use
* @param baseOutputPath base-output path to write the record to. Note:
* Framework will generate unique filename for the
* baseOutputPath
*/
@SuppressWarnings("unchecked")
public void write(Object key, Object value, Schema keySchema, Schema valSchema, String baseOutputPath)
throws IOException, InterruptedException {
checkBaseOutputPath(baseOutputPath);
Job job = Job.getInstance(context.getConfiguration());
setSchema(job, keySchema, valSchema);
TaskAttemptContext taskContext = createTaskAttemptContext(job.getConfiguration(), context.getTaskAttemptID());
getRecordWriter(taskContext, baseOutputPath).write(key, value);
}
/**
*
* Gets the record writer from job's output format. Job's output format should
* be a FileOutputFormat.If the record writer implements Syncable then returns
* the current position as a value that may be passed to
* DataFileReader.seek(long) otherwise returns -1. Forces the end of the current
* block, emitting a synchronization marker.
*
* @param namedOutput the namedOutput
* @param baseOutputPath base-output path to write the record to. Note:
* Framework will generate unique filename for the
* baseOutputPath
*/
@SuppressWarnings("unchecked")
public long sync(String namedOutput, String baseOutputPath) throws IOException, InterruptedException {
checkNamedOutputName(context, namedOutput, false);
checkBaseOutputPath(baseOutputPath);
if (!namedOutputs.contains(namedOutput)) {
throw new IllegalArgumentException("Undefined named output '" + namedOutput + "'");
}
TaskAttemptContext taskContext = getContext(namedOutput);
RecordWriter recordWriter = getRecordWriter(taskContext, baseOutputPath);
long position = -1;
if (recordWriter instanceof Syncable) {
Syncable syncableWriter = (Syncable) recordWriter;
position = syncableWriter.sync();
}
return position;
}
// by being synchronized MultipleOutputTask can be use with a
// MultithreadedMapper.
@SuppressWarnings("unchecked")
private synchronized RecordWriter getRecordWriter(TaskAttemptContext taskContext, String baseFileName)
throws IOException, InterruptedException {
// look for record-writer in the cache
RecordWriter writer = recordWriters.get(baseFileName);
// If not in cache, create a new one
if (writer == null) {
// get the record writer from context output format
// FileOutputFormat.setOutputName(taskContext, baseFileName);
taskContext.getConfiguration().set("avro.mo.config.namedOutput", baseFileName);
try {
writer = ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), taskContext.getConfiguration())
.getRecordWriter(taskContext);
} catch (ClassNotFoundException e) {
throw new IOException(e);
}
// if counters are enabled, wrap the writer with context
// to increment counters
if (countersEnabled) {
writer = new RecordWriterWithCounter(writer, baseFileName, context);
}
// add the record-writer to the cache
recordWriters.put(baseFileName, writer);
}
return writer;
}
private void setSchema(Job job, Schema keySchema, Schema valSchema) {
boolean isMaponly = job.getNumReduceTasks() == 0;
if (keySchema != null) {
if (isMaponly)
AvroJob.setMapOutputKeySchema(job, keySchema);
else
AvroJob.setOutputKeySchema(job, keySchema);
}
if (valSchema != null) {
if (isMaponly)
AvroJob.setMapOutputValueSchema(job, valSchema);
else
AvroJob.setOutputValueSchema(job, valSchema);
}
}
// Create a taskAttemptContext for the named output with
// output format and output key/value types put in the context
@SuppressWarnings("deprecation")
private TaskAttemptContext getContext(String nameOutput) throws IOException {
TaskAttemptContext taskContext = taskContexts.get(nameOutput);
if (taskContext != null) {
return taskContext;
}
// The following trick leverages the instantiation of a record writer via
// the job thus supporting arbitrary output formats.
Job job = new Job(context.getConfiguration());
job.setOutputFormatClass(getNamedOutputFormatClass(context, nameOutput));
Schema keySchema = null, valSchema = null;
if (job.getConfiguration().get(MO_PREFIX + nameOutput + ".keyschema", null) != null)
keySchema = Schema.parse(job.getConfiguration().get(MO_PREFIX + nameOutput + ".keyschema"));
if (job.getConfiguration().get(MO_PREFIX + nameOutput + ".valueschema", null) != null)
valSchema = Schema.parse(job.getConfiguration().get(MO_PREFIX + nameOutput + ".valueschema"));
setSchema(job, keySchema, valSchema);
taskContext = createTaskAttemptContext(job.getConfiguration(), context.getTaskAttemptID());
taskContexts.put(nameOutput, taskContext);
return taskContext;
}
private TaskAttemptContext createTaskAttemptContext(Configuration conf, TaskAttemptID taskId) {
// Use reflection since the context types changed incompatibly between 1.0
// and 2.0.
try {
Class<?> c = getTaskAttemptContextClass();
Constructor<?> cons = c.getConstructor(Configuration.class, TaskAttemptID.class);
return (TaskAttemptContext) cons.newInstance(conf, taskId);
} catch (Exception e) {
throw new IllegalStateException(e);
}
}
private Class<?> getTaskAttemptContextClass() {
try {
return Class.forName("org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl");
} catch (Exception e) {
try {
return Class.forName("org.apache.hadoop.mapreduce.TaskAttemptContext");
} catch (Exception ex) {
throw new IllegalStateException(ex);
}
}
}
/**
* Closes all the opened outputs.
*
* This should be called from cleanup method of map/reduce task. If overridden
* subclasses must invoke <code>super.close()</code> at the end of their
* <code>close()</code>
*
*/
@SuppressWarnings("unchecked")
public void close() throws IOException, InterruptedException {
for (RecordWriter writer : recordWriters.values()) {
writer.close(context);
}
}
}
| 7,072 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapreduce/AvroSequenceFileOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.hadoop.io.AvroSequenceFile;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.ReflectionUtils;
/**
* A sequence file output format that knows how to write AvroKeys and AvroValues
* in addition to Writables.
*
* @param <K> The job output key type (may be a Writable, AvroKey).
* @param <V> The job output value type (may be a Writable, AvroValue).
*/
public class AvroSequenceFileOutputFormat<K, V> extends FileOutputFormat<K, V> {
/** {@inheritDoc} */
@Override
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
// Configure compression if requested.
CompressionCodec codec = null;
CompressionType compressionType = CompressionType.NONE;
if (getCompressOutput(context)) {
// Find the kind of compression to do.
compressionType = getOutputCompressionType(conf);
// Find the right codec.
Class<?> codecClass = getOutputCompressorClass(context, DefaultCodec.class);
codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf);
}
// Get the path of the output file.
Path outputFile = getDefaultWorkFile(context, "");
FileSystem fs = outputFile.getFileSystem(conf);
// Configure the writer.
AvroSequenceFile.Writer.Options options = new AvroSequenceFile.Writer.Options().withFileSystem(fs)
.withConfiguration(conf).withOutputPath(outputFile).withKeyClass(context.getOutputKeyClass())
.withValueClass(context.getOutputValueClass()).withProgressable(context).withCompressionType(compressionType)
.withCompressionCodec(codec);
Schema keySchema = AvroJob.getOutputKeySchema(conf);
if (null != keySchema) {
options.withKeySchema(keySchema);
}
Schema valueSchema = AvroJob.getOutputValueSchema(conf);
if (null != valueSchema) {
options.withValueSchema(valueSchema);
}
final SequenceFile.Writer out = AvroSequenceFile.createWriter(options);
return new RecordWriter<K, V>() {
@Override
public void write(K key, V value) throws IOException {
out.append(key, value);
}
@Override
public void close(TaskAttemptContext context) throws IOException {
out.close();
}
};
}
/**
* Sets the type of compression for the output sequence file.
*
* @param job The job configuration.
* @param compressionType The compression type for the target sequence file.
*/
public static void setOutputCompressionType(Job job, CompressionType compressionType) {
setCompressOutput(job, true);
job.getConfiguration().set(FileOutputFormat.COMPRESS_TYPE, compressionType.name());
}
/**
* Gets type of compression for the output sequence file.
*
* @param conf The job configuration.
* @return The compression type.
*/
public static CompressionType getOutputCompressionType(Configuration conf) {
String typeName = conf.get(FileOutputFormat.COMPRESS_TYPE);
if (typeName != null) {
return CompressionType.valueOf(typeName);
}
return SequenceFile.getDefaultCompressionType(conf);
}
}
| 7,073 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapreduce/AvroRecordReaderBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.file.SeekableInput;
import org.apache.avro.generic.GenericData;
import org.apache.avro.hadoop.io.AvroSerialization;
import org.apache.avro.io.DatumReader;
import org.apache.avro.mapred.FsInput;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Abstract base class for <code>RecordReader</code>s that read Avro container
* files.
*
* @param <K> The type of key the record reader should generate.
* @param <V> The type of value the record reader should generate.
* @param <T> The type of the entries within the Avro container file being read.
*/
public abstract class AvroRecordReaderBase<K, V, T> extends RecordReader<K, V> {
private static final Logger LOG = LoggerFactory.getLogger(AvroRecordReaderBase.class);
/** The reader schema for the records within the input Avro container file. */
private final Schema mReaderSchema;
/** The current record from the Avro container file being read. */
private T mCurrentRecord;
/** A reader for the Avro container file containing the current input split. */
private DataFileReader<T> mAvroFileReader;
/**
* The byte offset into the Avro container file where the first block that fits
* completely within the current input split begins.
*/
private long mStartPosition;
/**
* The byte offset into the Avro container file where the current input split
* ends.
*/
private long mEndPosition;
/**
* Constructor.
*
* @param readerSchema The reader schema for the records of the Avro container
* file.
*/
protected AvroRecordReaderBase(Schema readerSchema) {
mReaderSchema = readerSchema;
mCurrentRecord = null;
}
/** {@inheritDoc} */
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext context) throws IOException, InterruptedException {
if (!(inputSplit instanceof FileSplit)) {
throw new IllegalArgumentException("Only compatible with FileSplits.");
}
FileSplit fileSplit = (FileSplit) inputSplit;
// Open a seekable input stream to the Avro container file.
SeekableInput seekableFileInput = createSeekableInput(context.getConfiguration(), fileSplit.getPath());
// Wrap the seekable input stream in an Avro DataFileReader.
Configuration conf = context.getConfiguration();
GenericData dataModel = AvroSerialization.createDataModel(conf);
DatumReader<T> datumReader = dataModel.createDatumReader(mReaderSchema);
mAvroFileReader = createAvroFileReader(seekableFileInput, datumReader);
// Initialize the start and end offsets into the file based on the boundaries of
// the
// input split we're responsible for. We will read the first block that begins
// after the input split start boundary. We will read up to but not including
// the
// first block that starts after input split end boundary.
// Sync to the closest block/record boundary just after beginning of our input
// split.
mAvroFileReader.sync(fileSplit.getStart());
// Initialize the start position to the beginning of the first block of the
// input split.
mStartPosition = mAvroFileReader.previousSync();
// Initialize the end position to the end of the input split (this isn't
// necessarily
// on a block boundary so using this for reporting progress will be approximate.
mEndPosition = fileSplit.getStart() + fileSplit.getLength();
}
/** {@inheritDoc} */
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
assert null != mAvroFileReader;
if (mAvroFileReader.hasNext() && !mAvroFileReader.pastSync(mEndPosition)) {
mCurrentRecord = mAvroFileReader.next(mCurrentRecord);
return true;
}
return false;
}
/** {@inheritDoc} */
@Override
public float getProgress() throws IOException, InterruptedException {
assert null != mAvroFileReader;
if (mEndPosition == mStartPosition) {
// Trivial empty input split.
return 0.0f;
}
long bytesRead = mAvroFileReader.previousSync() - mStartPosition;
long bytesTotal = mEndPosition - mStartPosition;
LOG.debug("Progress: bytesRead=" + bytesRead + ", bytesTotal=" + bytesTotal);
return Math.min(1.0f, (float) bytesRead / (float) bytesTotal);
}
/** {@inheritDoc} */
@Override
public void close() throws IOException {
if (null != mAvroFileReader) {
try {
mAvroFileReader.close();
} finally {
mAvroFileReader = null;
}
}
}
/**
* Gets the current record read from the Avro container file.
*
* <p>
* Calling <code>nextKeyValue()</code> moves this to the next record.
* </p>
*
* @return The current Avro record (may be null if no record has been read).
*/
protected T getCurrentRecord() {
return mCurrentRecord;
}
/**
* Creates a seekable input stream to an Avro container file.
*
* @param conf The hadoop configuration.
* @param path The path to the avro container file.
* @throws IOException If there is an error reading from the path.
*/
protected SeekableInput createSeekableInput(Configuration conf, Path path) throws IOException {
return new FsInput(path, conf);
}
/**
* Creates an Avro container file reader from a seekable input stream.
*
* @param input The input containing the Avro container file.
* @param datumReader The reader to use for the individual records in the Avro
* container file.
* @throws IOException If there is an error reading from the input stream.
*/
protected DataFileReader<T> createAvroFileReader(SeekableInput input, DatumReader<T> datumReader) throws IOException {
return new DataFileReader<>(input, datumReader);
}
}
| 7,074 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapreduce/Syncable.java | package org.apache.avro.mapreduce;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
public interface Syncable {
/**
* Return the current position as a value that may be passed to
* DataFileReader.seek(long). Forces the end of the current block, emitting a
* synchronization marker.
*
* @throws IOException - if an error occurred while attempting to sync.
*/
long sync() throws IOException;
}
| 7,075 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapreduce/AvroOutputFormatBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.file.DataFileConstants;
import org.apache.avro.hadoop.file.HadoopCodecFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import static org.apache.avro.file.CodecFactory.DEFAULT_ZSTANDARD_BUFFERPOOL;
import static org.apache.avro.file.CodecFactory.DEFAULT_ZSTANDARD_LEVEL;
/**
* Abstract base class for output formats that write Avro container files.
*
* @param <K> The type of key to write.
* @param <V> The type of value to write.
*/
public abstract class AvroOutputFormatBase<K, V> extends FileOutputFormat<K, V> {
/**
* Gets the configured compression codec from the task context.
*
* @param context The task attempt context.
* @return The compression codec to use for the output Avro container file.
*/
protected static CodecFactory getCompressionCodec(TaskAttemptContext context) {
if (FileOutputFormat.getCompressOutput(context)) {
// Default to deflate compression.
int deflateLevel = context.getConfiguration().getInt(org.apache.avro.mapred.AvroOutputFormat.DEFLATE_LEVEL_KEY,
CodecFactory.DEFAULT_DEFLATE_LEVEL);
int xzLevel = context.getConfiguration().getInt(org.apache.avro.mapred.AvroOutputFormat.XZ_LEVEL_KEY,
CodecFactory.DEFAULT_XZ_LEVEL);
int zstdLevel = context.getConfiguration().getInt(org.apache.avro.mapred.AvroOutputFormat.ZSTD_LEVEL_KEY,
DEFAULT_ZSTANDARD_LEVEL);
boolean zstdBufferPool = context.getConfiguration()
.getBoolean(org.apache.avro.mapred.AvroOutputFormat.ZSTD_BUFFERPOOL_KEY, DEFAULT_ZSTANDARD_BUFFERPOOL);
String outputCodec = context.getConfiguration().get(AvroJob.CONF_OUTPUT_CODEC);
if (outputCodec == null) {
String compressionCodec = context.getConfiguration().get("mapred.output.compression.codec");
String avroCodecName = HadoopCodecFactory.getAvroCodecName(compressionCodec);
if (avroCodecName != null) {
context.getConfiguration().set(AvroJob.CONF_OUTPUT_CODEC, avroCodecName);
return HadoopCodecFactory.fromHadoopString(compressionCodec);
} else {
return CodecFactory.deflateCodec(deflateLevel);
}
} else if (DataFileConstants.DEFLATE_CODEC.equals(outputCodec)) {
return CodecFactory.deflateCodec(deflateLevel);
} else if (DataFileConstants.XZ_CODEC.equals(outputCodec)) {
return CodecFactory.xzCodec(xzLevel);
} else if (DataFileConstants.ZSTANDARD_CODEC.equals(outputCodec)) {
return CodecFactory.zstandardCodec(zstdLevel, false, zstdBufferPool);
} else {
return CodecFactory.fromString(outputCodec);
}
}
// No compression.
return CodecFactory.nullCodec();
}
private Path getWorkPathFromCommitter(TaskAttemptContext context) throws IOException {
// When Hadoop 2 support is dropped, this method removed to a simple cast
// See https://github.com/apache/avro/pull/1431/
OutputCommitter committer = getOutputCommitter(context);
try {
return (Path) committer.getClass().getMethod("getWorkPath").invoke(committer);
} catch (ReflectiveOperationException e) {
throw new AvroRuntimeException(
"Committer: " + committer.getClass().getName() + " does not have method getWorkPath", e);
}
}
/**
* Gets the target output stream where the Avro container file should be
* written.
*
* @param context The task attempt context.
* @return The target output stream.
*/
protected OutputStream getAvroFileOutputStream(TaskAttemptContext context) throws IOException {
Path path = new Path(getWorkPathFromCommitter(context),
getUniqueFile(context, context.getConfiguration().get("avro.mo.config.namedOutput", "part"),
org.apache.avro.mapred.AvroOutputFormat.EXT));
return path.getFileSystem(context.getConfiguration()).create(path);
}
/**
* Gets the configured sync interval from the task context.
*
* @param context The task attempt context.
* @return The sync interval to use for the output Avro container file.
*/
protected static int getSyncInterval(TaskAttemptContext context) {
return context.getConfiguration().getInt(org.apache.avro.mapred.AvroOutputFormat.SYNC_INTERVAL_KEY,
DataFileConstants.DEFAULT_SYNC_INTERVAL);
}
}
| 7,076 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapreduce/AvroKeyRecordReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.mapred.AvroKey;
import org.apache.hadoop.io.NullWritable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Reads records from an input split representing a chunk of an Avro container
* file.
*
* @param <T> The (java) type of data in Avro container file.
*/
public class AvroKeyRecordReader<T> extends AvroRecordReaderBase<AvroKey<T>, NullWritable, T> {
private static final Logger LOG = LoggerFactory.getLogger(AvroKeyRecordReader.class);
/** A reusable object to hold records of the Avro container file. */
private final AvroKey<T> mCurrentRecord;
/**
* Constructor.
*
* @param readerSchema The reader schema to use for the records in the Avro
* container file.
*/
public AvroKeyRecordReader(Schema readerSchema) {
super(readerSchema);
mCurrentRecord = new AvroKey<>(null);
}
/** {@inheritDoc} */
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
boolean hasNext = super.nextKeyValue();
mCurrentRecord.datum(getCurrentRecord());
return hasNext;
}
/** {@inheritDoc} */
@Override
public AvroKey<T> getCurrentKey() throws IOException, InterruptedException {
return mCurrentRecord;
}
/** {@inheritDoc} */
@Override
public NullWritable getCurrentValue() throws IOException, InterruptedException {
return NullWritable.get();
}
}
| 7,077 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapreduce/CombineAvroKeyValueFileInputFormat.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapreduce;
import java.io.IOException;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader;
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReaderWrapper;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
/**
* A combine avro keyvalue file input format that can combine small avro files
* into mappers.
*
* @param <K> The type of the Avro key to read.
* @param <V> The type of the Avro value to read.
*/
public class CombineAvroKeyValueFileInputFormat<K, V> extends CombineFileInputFormat<AvroKey<K>, AvroValue<V>> {
@Override
public RecordReader<AvroKey<K>, AvroValue<V>> createRecordReader(InputSplit inputSplit,
TaskAttemptContext taskAttemptContext) throws IOException {
return new CombineFileRecordReader((CombineFileSplit) inputSplit, taskAttemptContext,
CombineAvroKeyValueFileInputFormat.AvroKeyValueFileRecordReaderWrapper.class);
}
/**
* A record reader that may be passed to <code>CombineFileRecordReader</code> so
* that it can be used in a <code>CombineFileInputFormat</code>-equivalent for
* <code>AvroKeyValueInputFormat</code>.
*
* @see CombineFileRecordReader
* @see CombineFileInputFormat
* @see AvroKeyValueInputFormat
*/
private static class AvroKeyValueFileRecordReaderWrapper<K, V>
extends CombineFileRecordReaderWrapper<AvroKey<K>, AvroValue<V>> {
// this constructor signature is required by CombineFileRecordReader
public AvroKeyValueFileRecordReaderWrapper(CombineFileSplit split, TaskAttemptContext context, Integer idx)
throws IOException, InterruptedException {
super(new AvroKeyValueInputFormat<>(), split, context, idx);
}
}
}
| 7,078 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapreduce/AvroKeyRecordWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.avro.Schema;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.file.DataFileConstants;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericData;
import org.apache.avro.mapred.AvroKey;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* Writes Avro records to an Avro container file output stream.
*
* @param <T> The Java type of the Avro data to write.
*/
public class AvroKeyRecordWriter<T> extends RecordWriter<AvroKey<T>, NullWritable> implements Syncable {
/** A writer for the Avro container file. */
private final DataFileWriter<T> mAvroFileWriter;
/**
* Constructor.
*
* @param writerSchema The writer schema for the records in the Avro
* container file.
* @param compressionCodec A compression codec factory for the Avro container
* file.
* @param outputStream The output stream to write the Avro container file
* to.
* @param syncInterval The sync interval for the Avro container file.
* @throws IOException If the record writer cannot be opened.
*/
public AvroKeyRecordWriter(Schema writerSchema, GenericData dataModel, CodecFactory compressionCodec,
OutputStream outputStream, int syncInterval) throws IOException {
// Create an Avro container file and a writer to it.
mAvroFileWriter = new DataFileWriter<T>(dataModel.createDatumWriter(writerSchema));
mAvroFileWriter.setCodec(compressionCodec);
mAvroFileWriter.setSyncInterval(syncInterval);
mAvroFileWriter.create(writerSchema, outputStream);
}
/**
* Constructor.
*
* @param writerSchema The writer schema for the records in the Avro
* container file.
* @param compressionCodec A compression codec factory for the Avro container
* file.
* @param outputStream The output stream to write the Avro container file
* to.
* @throws IOException If the record writer cannot be opened.
*/
public AvroKeyRecordWriter(Schema writerSchema, GenericData dataModel, CodecFactory compressionCodec,
OutputStream outputStream) throws IOException {
this(writerSchema, dataModel, compressionCodec, outputStream, DataFileConstants.DEFAULT_SYNC_INTERVAL);
}
/** {@inheritDoc} */
@Override
public void write(AvroKey<T> record, NullWritable ignore) throws IOException {
mAvroFileWriter.append(record.datum());
}
/** {@inheritDoc} */
@Override
public void close(TaskAttemptContext context) throws IOException {
mAvroFileWriter.close();
}
/** {@inheritDoc} */
@Override
public long sync() throws IOException {
return mAvroFileWriter.sync();
}
}
| 7,079 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapreduce/AvroKeyOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.avro.Schema;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.generic.GenericData;
import org.apache.avro.hadoop.io.AvroSerialization;
import org.apache.avro.mapred.AvroKey;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* FileOutputFormat for writing Avro container files.
*
* <p>
* Since Avro container files only contain records (not key/value pairs), this
* output format ignores the value.
* </p>
*
* @param <T> The (java) type of the Avro data to write.
*/
public class AvroKeyOutputFormat<T> extends AvroOutputFormatBase<AvroKey<T>, NullWritable> {
/** A factory for creating record writers. */
private final RecordWriterFactory mRecordWriterFactory;
/**
* Constructor.
*/
public AvroKeyOutputFormat() {
this(new RecordWriterFactory());
}
/**
* Constructor.
*
* @param recordWriterFactory A factory for creating record writers.
*/
protected AvroKeyOutputFormat(RecordWriterFactory recordWriterFactory) {
mRecordWriterFactory = recordWriterFactory;
}
/**
* A factory for creating record writers.
*
* @param <T> The java type of the avro record to write.
*/
protected static class RecordWriterFactory<T> {
/**
* Creates a new record writer instance.
*
* @param writerSchema The writer schema for the records to write.
* @param compressionCodec The compression type for the writer file.
* @param outputStream The target output stream for the records.
* @param syncInterval The sync interval for the writer file.
*/
protected RecordWriter<AvroKey<T>, NullWritable> create(Schema writerSchema, GenericData dataModel,
CodecFactory compressionCodec, OutputStream outputStream, int syncInterval) throws IOException {
return new AvroKeyRecordWriter<>(writerSchema, dataModel, compressionCodec, outputStream, syncInterval);
}
}
/** {@inheritDoc} */
@Override
@SuppressWarnings("unchecked")
public RecordWriter<AvroKey<T>, NullWritable> getRecordWriter(TaskAttemptContext context) throws IOException {
Configuration conf = context.getConfiguration();
// Get the writer schema.
Schema writerSchema = AvroJob.getOutputKeySchema(conf);
boolean isMapOnly = context.getNumReduceTasks() == 0;
if (isMapOnly) {
Schema mapOutputSchema = AvroJob.getMapOutputKeySchema(conf);
if (mapOutputSchema != null) {
writerSchema = mapOutputSchema;
}
}
if (null == writerSchema) {
throw new IOException("AvroKeyOutputFormat requires an output schema. Use AvroJob.setOutputKeySchema().");
}
GenericData dataModel = AvroSerialization.createDataModel(conf);
OutputStream out = getAvroFileOutputStream(context);
try {
return mRecordWriterFactory.create(writerSchema, dataModel, getCompressionCodec(context), out,
getSyncInterval(context));
} catch (IOException e) {
out.close();
throw e;
}
}
}
| 7,080 |
0 | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro | Create_ds/avro/lang/java/mapred/src/main/java/org/apache/avro/mapreduce/AvroJob.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro.mapreduce;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.hadoop.io.AvroKeyComparator;
import org.apache.avro.hadoop.io.AvroSerialization;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Job;
/**
* Utility methods for configuring jobs that work with Avro.
*
* <p>
* When using Avro data as MapReduce keys and values, data must be wrapped in a
* suitable AvroWrapper implementation. MapReduce keys must be wrapped in an
* AvroKey object, and MapReduce values must be wrapped in an AvroValue object.
* </p>
*
* <p>
* Suppose you would like to write a line count mapper that reads from a text
* file. If instead of using a Text and IntWritable output value, you would like
* to use Avro data with a schema of <i>"string"</i> and <i>"int"</i>,
* respectively, you may parametrize your mapper with
* {@code AvroKey<CharSequence>} and {@code AvroValue<Integer>} types. Then, use
* the <code>setMapOutputKeySchema()</code> and
* <code>setMapOutputValueSchema()</code> methods to set writer schemas for the
* records you will generate.
* </p>
*/
public final class AvroJob {
/** Disable the constructor for this utility class. */
private AvroJob() {
}
/** Configuration key for the input key schema. */
private static final String CONF_INPUT_KEY_SCHEMA = "avro.schema.input.key";
/** Configuration key for the input value schema. */
private static final String CONF_INPUT_VALUE_SCHEMA = "avro.schema.input.value";
/** Configuration key for the output key schema. */
private static final String CONF_OUTPUT_KEY_SCHEMA = "avro.schema.output.key";
/** Configuration key for the output value schema. */
private static final String CONF_OUTPUT_VALUE_SCHEMA = "avro.schema.output.value";
/**
* The configuration key for a job's output compression codec. This takes one of
* the strings registered in {@link org.apache.avro.file.CodecFactory}
*/
public static final String CONF_OUTPUT_CODEC = "avro.output.codec";
/**
* Sets the job input key schema.
*
* @param job The job to configure.
* @param schema The input key schema.
*/
public static void setInputKeySchema(Job job, Schema schema) {
job.getConfiguration().set(CONF_INPUT_KEY_SCHEMA, schema.toString());
}
/**
* Sets the job input value schema.
*
* @param job The job to configure.
* @param schema The input value schema.
*/
public static void setInputValueSchema(Job job, Schema schema) {
job.getConfiguration().set(CONF_INPUT_VALUE_SCHEMA, schema.toString());
}
/**
* Sets the map output key schema.
*
* @param job The job to configure.
* @param schema The map output key schema.
*/
public static void setMapOutputKeySchema(Job job, Schema schema) {
job.setMapOutputKeyClass(AvroKey.class);
job.setGroupingComparatorClass(AvroKeyComparator.class);
job.setSortComparatorClass(AvroKeyComparator.class);
AvroSerialization.setKeyWriterSchema(job.getConfiguration(), schema);
AvroSerialization.setKeyReaderSchema(job.getConfiguration(), schema);
AvroSerialization.addToConfiguration(job.getConfiguration());
}
/**
* Sets the map output value schema.
*
* @param job The job to configure.
* @param schema The map output value schema.
*/
public static void setMapOutputValueSchema(Job job, Schema schema) {
job.setMapOutputValueClass(AvroValue.class);
AvroSerialization.setValueWriterSchema(job.getConfiguration(), schema);
AvroSerialization.setValueReaderSchema(job.getConfiguration(), schema);
AvroSerialization.addToConfiguration(job.getConfiguration());
}
/**
* Sets the job output key schema.
*
* @param job The job to configure.
* @param schema The job output key schema.
*/
public static void setOutputKeySchema(Job job, Schema schema) {
job.setOutputKeyClass(AvroKey.class);
job.getConfiguration().set(CONF_OUTPUT_KEY_SCHEMA, schema.toString());
}
/**
* Sets the job output value schema.
*
* @param job The job to configure.
* @param schema The job output value schema.
*/
public static void setOutputValueSchema(Job job, Schema schema) {
job.setOutputValueClass(AvroValue.class);
job.getConfiguration().set(CONF_OUTPUT_VALUE_SCHEMA, schema.toString());
}
/**
* Sets the job data model class.
*
* @param job The job to configure.
* @param modelClass The job data model class.
*/
public static void setDataModelClass(Job job, Class<? extends GenericData> modelClass) {
AvroSerialization.setDataModelClass(job.getConfiguration(), modelClass);
}
/**
* Gets the job input key schema.
*
* @param conf The job configuration.
* @return The job input key schema, or null if not set.
*/
public static Schema getInputKeySchema(Configuration conf) {
String schemaString = conf.get(CONF_INPUT_KEY_SCHEMA);
return schemaString != null ? new Schema.Parser().parse(schemaString) : null;
}
/**
* Gets the job input value schema.
*
* @param conf The job configuration.
* @return The job input value schema, or null if not set.
*/
public static Schema getInputValueSchema(Configuration conf) {
String schemaString = conf.get(CONF_INPUT_VALUE_SCHEMA);
return schemaString != null ? new Schema.Parser().parse(schemaString) : null;
}
/**
* Gets the map output key schema.
*
* @param conf The job configuration.
* @return The map output key schema, or null if not set.
*/
public static Schema getMapOutputKeySchema(Configuration conf) {
return AvroSerialization.getKeyWriterSchema(conf);
}
/**
* Gets the map output value schema.
*
* @param conf The job configuration.
* @return The map output value schema, or null if not set.
*/
public static Schema getMapOutputValueSchema(Configuration conf) {
return AvroSerialization.getValueWriterSchema(conf);
}
/**
* Gets the job output key schema.
*
* @param conf The job configuration.
* @return The job output key schema, or null if not set.
*/
public static Schema getOutputKeySchema(Configuration conf) {
String schemaString = conf.get(CONF_OUTPUT_KEY_SCHEMA);
return schemaString != null ? new Schema.Parser().parse(schemaString) : null;
}
/**
* Gets the job output value schema.
*
* @param conf The job configuration.
* @return The job output value schema, or null if not set.
*/
public static Schema getOutputValueSchema(Configuration conf) {
String schemaString = conf.get(CONF_OUTPUT_VALUE_SCHEMA);
return schemaString != null ? new Schema.Parser().parse(schemaString) : null;
}
}
| 7,081 |
0 | Create_ds/avro/lang/java/integration-test/codegen-test/src/test/java/org/apache/avro | Create_ds/avro/lang/java/integration-test/codegen-test/src/test/java/org/apache/avro/codegentest/TestLogicalTypeForStringType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.codegentest;
import org.apache.avro.codegentest.testdata.StringLogicalType;
import org.apache.avro.generic.GenericData;
import org.junit.jupiter.api.Test;
import java.util.UUID;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.MatcherAssert.assertThat;
public class TestLogicalTypeForStringType {
/**
* See AVRO-2548: StringType of "String" causes logicalType converters to be
* ignored for field
*/
@Test
void shouldUseUUIDAsType() {
StringLogicalType stringLogicalType = new StringLogicalType();
stringLogicalType.setSomeIdentifier(UUID.randomUUID());
assertThat(stringLogicalType.getSomeIdentifier(), instanceOf(UUID.class));
assertThat(StringLogicalType.getClassSchema().getField("someJavaString").schema().getProp(GenericData.STRING_PROP),
equalTo("String"));
}
}
| 7,082 |
0 | Create_ds/avro/lang/java/integration-test/codegen-test/src/test/java/org/apache/avro | Create_ds/avro/lang/java/integration-test/codegen-test/src/test/java/org/apache/avro/codegentest/TestNullableLogicalTypes.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.codegentest;
import java.time.LocalDate;
import org.apache.avro.codegentest.testdata.NullableLogicalTypes;
import org.junit.jupiter.api.Test;
import java.io.IOException;
public class TestNullableLogicalTypes extends AbstractSpecificRecordTest {
@Test
void withNullValues() throws IOException {
NullableLogicalTypes instanceOfGeneratedClass = NullableLogicalTypes.newBuilder().setNullableDate(null).build();
verifySerDeAndStandardMethods(instanceOfGeneratedClass);
}
@Test
void date() throws IOException {
NullableLogicalTypes instanceOfGeneratedClass = NullableLogicalTypes.newBuilder().setNullableDate(LocalDate.now())
.build();
verifySerDeAndStandardMethods(instanceOfGeneratedClass);
}
}
| 7,083 |
0 | Create_ds/avro/lang/java/integration-test/codegen-test/src/test/java/org/apache/avro | Create_ds/avro/lang/java/integration-test/codegen-test/src/test/java/org/apache/avro/codegentest/AbstractSpecificRecordTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.codegentest;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.specific.SpecificDatumReader;
import org.apache.avro.specific.SpecificDatumWriter;
import org.apache.avro.specific.SpecificRecordBase;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
abstract class AbstractSpecificRecordTest {
@SuppressWarnings("unchecked")
<T extends SpecificRecordBase> void verifySerDeAndStandardMethods(T original) {
final SpecificDatumWriter<T> datumWriterFromSchema = new SpecificDatumWriter<>(original.getSchema());
final SpecificDatumReader<T> datumReaderFromSchema = new SpecificDatumReader<>(original.getSchema(),
original.getSchema());
verifySerDeAndStandardMethods(original, datumWriterFromSchema, datumReaderFromSchema);
final SpecificDatumWriter<T> datumWriterFromClass = new SpecificDatumWriter(original.getClass());
final SpecificDatumReader<T> datumReaderFromClass = new SpecificDatumReader(original.getClass());
verifySerDeAndStandardMethods(original, datumWriterFromClass, datumReaderFromClass);
}
private <T extends SpecificRecordBase> void verifySerDeAndStandardMethods(T original,
SpecificDatumWriter<T> datumWriter, SpecificDatumReader<T> datumReader) {
final byte[] serialized = serialize(original, datumWriter);
final T copy = deserialize(serialized, datumReader);
assertEquals(original, copy);
// In addition to equals() tested above, make sure the other methods that use
// SpecificData work as intended
// compareTo() throws an exception for maps, otherwise we would have tested it
// here
// Assert.assertEquals(0, original.compareTo(copy));
assertEquals(original.hashCode(), copy.hashCode());
assertEquals(original.toString(), copy.toString());
}
private <T extends SpecificRecordBase> byte[] serialize(T object, SpecificDatumWriter<T> datumWriter) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
try {
datumWriter.write(object, EncoderFactory.get().directBinaryEncoder(outputStream, null));
return outputStream.toByteArray();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private <T extends SpecificRecordBase> T deserialize(byte[] bytes, SpecificDatumReader<T> datumReader) {
try {
final ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(bytes);
return datumReader.read(null, DecoderFactory.get().directBinaryDecoder(byteArrayInputStream, null));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| 7,084 |
0 | Create_ds/avro/lang/java/integration-test/codegen-test/src/test/java/org/apache/avro | Create_ds/avro/lang/java/integration-test/codegen-test/src/test/java/org/apache/avro/codegentest/TestNestedLogicalTypes.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.codegentest;
import org.apache.avro.codegentest.testdata.NestedLogicalTypesArray;
import org.apache.avro.codegentest.testdata.NestedLogicalTypesMap;
import org.apache.avro.codegentest.testdata.NestedLogicalTypesRecord;
import org.apache.avro.codegentest.testdata.NestedLogicalTypesUnion;
import org.apache.avro.codegentest.testdata.NestedLogicalTypesUnionFixedDecimal;
import org.apache.avro.codegentest.testdata.NestedRecord;
import org.apache.avro.codegentest.testdata.NullableLogicalTypesArray;
import org.apache.avro.codegentest.testdata.RecordInArray;
import org.apache.avro.codegentest.testdata.RecordInMap;
import org.apache.avro.codegentest.testdata.RecordInUnion;
import org.junit.jupiter.api.Test;
import java.math.BigInteger;
import java.time.LocalDate;
import java.util.Collections;
public class TestNestedLogicalTypes extends AbstractSpecificRecordTest {
@Test
void nullableLogicalTypeInNestedRecord() {
final NestedLogicalTypesRecord nestedLogicalTypesRecord = NestedLogicalTypesRecord.newBuilder()
.setNestedRecord(NestedRecord.newBuilder().setNullableDateField(LocalDate.now()).build()).build();
verifySerDeAndStandardMethods(nestedLogicalTypesRecord);
}
@Test
void nullableLogicalTypeInArray() {
final NullableLogicalTypesArray logicalTypesArray = NullableLogicalTypesArray.newBuilder()
.setArrayOfLogicalType(Collections.singletonList(LocalDate.now())).build();
verifySerDeAndStandardMethods(logicalTypesArray);
}
@Test
void nullableLogicalTypeInRecordInArray() {
final NestedLogicalTypesArray nestedLogicalTypesArray = NestedLogicalTypesArray.newBuilder()
.setArrayOfRecords(
Collections.singletonList(RecordInArray.newBuilder().setNullableDateField(LocalDate.now()).build()))
.build();
verifySerDeAndStandardMethods(nestedLogicalTypesArray);
}
@Test
void nullableLogicalTypeInRecordInUnion() {
final NestedLogicalTypesUnion nestedLogicalTypesUnion = NestedLogicalTypesUnion.newBuilder()
.setUnionOfRecords(RecordInUnion.newBuilder().setNullableDateField(LocalDate.now()).build()).build();
verifySerDeAndStandardMethods(nestedLogicalTypesUnion);
}
@Test
void nullableLogicalTypeInRecordInMap() {
final NestedLogicalTypesMap nestedLogicalTypesMap = NestedLogicalTypesMap.newBuilder()
.setMapOfRecords(
Collections.singletonMap("key", RecordInMap.newBuilder().setNullableDateField(LocalDate.now()).build()))
.build();
verifySerDeAndStandardMethods(nestedLogicalTypesMap);
}
@Test
void nullableLogicalTypeInRecordInFixedDecimal() {
final NestedLogicalTypesUnionFixedDecimal nestedLogicalTypesUnionFixedDecimal = NestedLogicalTypesUnionFixedDecimal
.newBuilder().setUnionOfFixedDecimal(new CustomDecimal(BigInteger.TEN, 15)).build();
verifySerDeAndStandardMethods(nestedLogicalTypesUnionFixedDecimal);
}
}
| 7,085 |
0 | Create_ds/avro/lang/java/integration-test/codegen-test/src/test/java/org/apache/avro | Create_ds/avro/lang/java/integration-test/codegen-test/src/test/java/org/apache/avro/codegentest/TestCustomConversion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.codegentest;
import org.apache.avro.codegentest.testdata.CustomConversionWithLogicalTypes;
import org.apache.avro.codegentest.testdata.LogicalTypesWithCustomConversion;
import org.apache.avro.codegentest.testdata.LogicalTypesWithCustomConversionIdl;
import org.junit.jupiter.api.Test;
import java.math.BigInteger;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class TestCustomConversion extends AbstractSpecificRecordTest {
@Test
void nullValues() {
LogicalTypesWithCustomConversion instanceOfGeneratedClass = LogicalTypesWithCustomConversion.newBuilder()
.setNonNullCustomField(new CustomDecimal(BigInteger.valueOf(100), 2))
.setNonNullFixedSizeString(new FixedSizeString("test")).build();
verifySerDeAndStandardMethods(instanceOfGeneratedClass);
}
@Test
void nullValuesIdl() {
LogicalTypesWithCustomConversionIdl instanceOfGeneratedClass = LogicalTypesWithCustomConversionIdl.newBuilder()
.setNonNullCustomField(new CustomDecimal(BigInteger.valueOf(100), 2))
.setNonNullFixedSizeString(new FixedSizeString("test")).build();
verifySerDeAndStandardMethods(instanceOfGeneratedClass);
}
@Test
void nonNullValues() {
LogicalTypesWithCustomConversion instanceOfGeneratedClass = LogicalTypesWithCustomConversion.newBuilder()
.setNonNullCustomField(new CustomDecimal(BigInteger.valueOf(100), 2))
.setNullableCustomField(new CustomDecimal(BigInteger.valueOf(3000), 2))
.setNonNullFixedSizeString(new FixedSizeString("test")).setNullableFixedSizeString(new FixedSizeString("test2"))
.build();
verifySerDeAndStandardMethods(instanceOfGeneratedClass);
}
@Test
void stringViolatesLimit() {
assertThrows(IllegalArgumentException.class, () -> {
LogicalTypesWithCustomConversion instanceOfGeneratedClass = LogicalTypesWithCustomConversion.newBuilder()
.setNonNullCustomField(new CustomDecimal(BigInteger.valueOf(100), 2))
.setNonNullFixedSizeString(new FixedSizeString("")).build();
verifySerDeAndStandardMethods(instanceOfGeneratedClass);
});
}
@Test
void customConversionWithCustomLogicalType() {
final CustomConversionWithLogicalTypes customConversionWithLogicalTypes = CustomConversionWithLogicalTypes
.newBuilder().setCustomEnum(new CustomEnumType("TWO")).build();
verifySerDeAndStandardMethods(customConversionWithLogicalTypes);
}
}
| 7,086 |
0 | Create_ds/avro/lang/java/integration-test/codegen-test/src/test/java/org/apache/avro | Create_ds/avro/lang/java/integration-test/codegen-test/src/test/java/org/apache/avro/codegentest/TestLogicalTypesWithDefaults.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.codegentest;
import java.time.LocalDate;
import org.apache.avro.codegentest.testdata.LogicalTypesWithDefaults;
import org.junit.Assert;
import org.junit.jupiter.api.Test;
import java.io.IOException;
public class TestLogicalTypesWithDefaults extends AbstractSpecificRecordTest {
private static final LocalDate DEFAULT_VALUE = LocalDate.parse("1973-05-19");
@Test
void defaultValueOfNullableField() throws IOException {
LogicalTypesWithDefaults instanceOfGeneratedClass = LogicalTypesWithDefaults.newBuilder()
.setNonNullDate(LocalDate.now()).build();
verifySerDeAndStandardMethods(instanceOfGeneratedClass);
}
@Test
void defaultValueOfNonNullField() throws IOException {
LogicalTypesWithDefaults instanceOfGeneratedClass = LogicalTypesWithDefaults.newBuilder()
.setNullableDate(LocalDate.now()).build();
Assert.assertEquals(DEFAULT_VALUE, instanceOfGeneratedClass.getNonNullDate());
verifySerDeAndStandardMethods(instanceOfGeneratedClass);
}
@Test
void withValues() throws IOException {
LogicalTypesWithDefaults instanceOfGeneratedClass = LogicalTypesWithDefaults.newBuilder()
.setNullableDate(LocalDate.now()).setNonNullDate(LocalDate.now()).build();
verifySerDeAndStandardMethods(instanceOfGeneratedClass);
}
}
| 7,087 |
0 | Create_ds/avro/lang/java/integration-test/codegen-test/src/test/java/org/apache/avro | Create_ds/avro/lang/java/integration-test/codegen-test/src/test/java/org/apache/avro/codegentest/TestNestedRecordsWithDifferentNamespaces.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.codegentest;
import org.apache.avro.codegentest.other.NestedOtherNamespaceRecord;
import org.apache.avro.codegentest.some.NestedSomeNamespaceRecord;
import org.junit.jupiter.api.Test;
public class TestNestedRecordsWithDifferentNamespaces extends AbstractSpecificRecordTest {
@Test
void nestedRecordsWithDifferentNamespaces() {
NestedSomeNamespaceRecord instanceOfGeneratedClass = NestedSomeNamespaceRecord.newBuilder()
.setNestedRecordBuilder(NestedOtherNamespaceRecord.newBuilder().setSomeField(1)).build();
verifySerDeAndStandardMethods(instanceOfGeneratedClass);
}
}
| 7,088 |
0 | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro/codegentest/CustomDecimalConversion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.codegentest;
import org.apache.avro.Conversion;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericFixed;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.util.Arrays;
public class CustomDecimalConversion extends Conversion<CustomDecimal> {
@Override
public Class<CustomDecimal> getConvertedType() {
return CustomDecimal.class;
}
@Override
public String getLogicalTypeName() {
return "decimal";
}
@Override
public CustomDecimal fromBytes(ByteBuffer value, Schema schema, LogicalType type) {
int scale = ((LogicalTypes.Decimal) type).getScale();
byte[] bytes = value.get(new byte[value.remaining()]).array();
return new CustomDecimal(new BigInteger(bytes), scale);
}
@Override
public ByteBuffer toBytes(CustomDecimal value, Schema schema, LogicalType type) {
int scale = ((LogicalTypes.Decimal) type).getScale();
return ByteBuffer.wrap(value.toByteArray(scale));
}
@Override
public CustomDecimal fromFixed(GenericFixed value, Schema schema, LogicalType type) {
int scale = ((LogicalTypes.Decimal) type).getScale();
return new CustomDecimal(new BigInteger(value.bytes()), scale);
}
@Override
public GenericFixed toFixed(CustomDecimal value, Schema schema, LogicalType type) {
int scale = ((LogicalTypes.Decimal) type).getScale();
byte fillByte = (byte) (value.signum() < 0 ? 0xFF : 0x00);
byte[] unscaled = value.toByteArray(scale);
byte[] bytes = new byte[schema.getFixedSize()];
int offset = bytes.length - unscaled.length;
// Fill the front of the array and copy remaining with unscaled values
Arrays.fill(bytes, 0, offset, fillByte);
System.arraycopy(unscaled, 0, bytes, offset, bytes.length - offset);
return new GenericData.Fixed(schema, bytes);
}
}
| 7,089 |
0 | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro/codegentest/FixedSizeString.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.codegentest;
import java.util.Objects;
import static java.util.Objects.isNull;
public class FixedSizeString implements Comparable<FixedSizeString> {
private String value;
public FixedSizeString() {
}
public FixedSizeString(String value) {
this.value = value;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
@Override
public int compareTo(FixedSizeString that) {
return this.value.compareTo(that.value);
}
@Override
public boolean equals(Object that) {
if (isNull(value)) {
return true;
}
if (isNull(that) || getClass() != that.getClass()) {
return false;
}
FixedSizeString typedThat = (FixedSizeString) that;
return Objects.equals(value, typedThat.value);
}
@Override
public int hashCode() {
return Objects.hash(value);
}
}
| 7,090 |
0 | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro/codegentest/FixedSizeStringFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.codegentest;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
public class FixedSizeStringFactory implements LogicalTypes.LogicalTypeFactory {
public static final String NAME = "fixed-size-string";
@Override
public LogicalType fromSchema(Schema schema) {
return new FixedSizeStringLogicalType(schema);
}
@Override
public String getTypeName() {
return NAME;
}
}
| 7,091 |
0 | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro/codegentest/FixedSizeStringLogicalType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.codegentest;
import org.apache.avro.LogicalType;
import org.apache.avro.Schema;
import static java.util.Objects.isNull;
public class FixedSizeStringLogicalType extends LogicalType {
private static final String MIN_LENGTH = "minLength";
private static final String MAX_LENGTH = "maxLength";
private final Integer minLength;
private final Integer maxLength;
public FixedSizeStringLogicalType() {
super(FixedSizeStringFactory.NAME);
this.minLength = Integer.MIN_VALUE;
this.maxLength = Integer.MAX_VALUE;
}
public FixedSizeStringLogicalType(Schema schema) {
super(FixedSizeStringFactory.NAME);
this.minLength = getInteger(schema, MIN_LENGTH);
this.maxLength = getInteger(schema, MAX_LENGTH);
}
public Integer getMinLength() {
return minLength;
}
public Integer getMaxLength() {
return maxLength;
}
private int getInteger(Schema schema, String name) {
Object value = schema.getObjectProp(name);
if (isNull(value)) {
throw new IllegalArgumentException(String.format("Invalid %s: missing %s", FixedSizeStringFactory.NAME, name));
}
if (value instanceof Integer) {
return (int) value;
}
throw new IllegalArgumentException(
String.format("Expected integer %s but get %s", name, value.getClass().getSimpleName()));
}
}
| 7,092 |
0 | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro/codegentest/FixedSizeStringConversion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.codegentest;
import org.apache.avro.Conversion;
import org.apache.avro.LogicalType;
import org.apache.avro.Schema;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import static java.util.Objects.nonNull;
public class FixedSizeStringConversion extends Conversion<FixedSizeString> {
@Override
public Class<FixedSizeString> getConvertedType() {
return FixedSizeString.class;
}
@Override
public String getLogicalTypeName() {
return FixedSizeStringFactory.NAME;
}
@Override
public FixedSizeString fromBytes(ByteBuffer value, Schema schema, LogicalType type) {
String stringValue = StandardCharsets.UTF_8.decode(value).toString();
validate(stringValue, type);
return new FixedSizeString(stringValue);
}
@Override
public ByteBuffer toBytes(FixedSizeString value, Schema schema, LogicalType type) {
validate(value.getValue(), type);
return StandardCharsets.UTF_8.encode(value.getValue());
}
private void validate(String value, LogicalType logicalType) {
FixedSizeStringLogicalType fixedSizeStringLogicalType = (FixedSizeStringLogicalType) logicalType;
int minValue = fixedSizeStringLogicalType.getMinLength();
int maxValue = fixedSizeStringLogicalType.getMaxLength();
if (nonNull(value) && (value.length() < minValue || value.length() > maxValue)) {
throw new IllegalArgumentException(
String.format("Incorrect size. Must satisfy %d <= value <= %d", minValue, maxValue));
}
}
}
| 7,093 |
0 | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro/codegentest/CustomEnumType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.codegentest;
public class CustomEnumType {
private final String underlying;
CustomEnumType(String underlying) {
this.underlying = underlying;
}
public String getUnderlying() {
return underlying;
}
@Override
public String toString() {
return underlying;
}
}
| 7,094 |
0 | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro/codegentest/CustomEnumConversion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.codegentest;
import org.apache.avro.Conversion;
import org.apache.avro.LogicalType;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericEnumSymbol;
public class CustomEnumConversion extends Conversion<CustomEnumType> {
@Override
public Class<CustomEnumType> getConvertedType() {
return CustomEnumType.class;
}
@Override
public Schema getRecommendedSchema() {
return new LogicalType("custom-enum").addToSchema(Schema.create(Schema.Type.ENUM));
}
@Override
public String getLogicalTypeName() {
return "custom-enum";
}
@Override
public CustomEnumType fromEnumSymbol(GenericEnumSymbol value, Schema schema, LogicalType type) {
return new CustomEnumType(value.toString());
}
@Override
public GenericEnumSymbol toEnumSymbol(CustomEnumType value, Schema schema, LogicalType type) {
return new GenericData.EnumSymbol(schema, value.getUnderlying());
}
}
| 7,095 |
0 | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro/codegentest/CustomDecimal.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.codegentest;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.math.RoundingMode;
/**
* Wraps a BigDecimal just to demonstrate that it is possible to use custom
* implementation classes with custom conversions.
*/
public class CustomDecimal implements Comparable<CustomDecimal> {
private final BigDecimal internalValue;
public CustomDecimal(BigInteger value, int scale) {
internalValue = new BigDecimal(value, scale);
}
public byte[] toByteArray(int scale) {
final BigDecimal correctlyScaledValue;
if (scale != internalValue.scale()) {
correctlyScaledValue = internalValue.setScale(scale, RoundingMode.HALF_UP);
} else {
correctlyScaledValue = internalValue;
}
return correctlyScaledValue.unscaledValue().toByteArray();
}
int signum() {
return internalValue.signum();
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
CustomDecimal that = (CustomDecimal) o;
return internalValue.equals(that.internalValue);
}
@Override
public int hashCode() {
return internalValue.hashCode();
}
@Override
public int compareTo(CustomDecimal o) {
return this.internalValue.compareTo(o.internalValue);
}
}
| 7,096 |
0 | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro | Create_ds/avro/lang/java/integration-test/test-custom-conversions/src/main/java/org/apache/avro/codegentest/CustomEnumLogicalTypeFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.codegentest;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
public class CustomEnumLogicalTypeFactory implements LogicalTypes.LogicalTypeFactory {
private static final LogicalType CUSTOM_ENUM = new CustomEnumLogicalType("custom-enum");
@Override
public LogicalType fromSchema(Schema schema) {
return CUSTOM_ENUM;
}
@Override
public String getTypeName() {
return CUSTOM_ENUM.getName();
}
public static class CustomEnumLogicalType extends LogicalType {
public CustomEnumLogicalType(String logicalTypeName) {
super(logicalTypeName);
}
@Override
public void validate(Schema schema) {
super.validate(schema);
if (schema.getType() != Schema.Type.ENUM) {
throw new IllegalArgumentException("Custom Enum can only be used with an underlying Enum type");
}
}
}
}
| 7,097 |
0 | Create_ds/avro/lang/java/thrift/src/test/java/org/apache/avro | Create_ds/avro/lang/java/thrift/src/test/java/org/apache/avro/thrift/TestThrift.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.thrift;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.nio.ByteBuffer;
import java.util.Collections;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.Encoder;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.thrift.test.Test;
import org.apache.avro.thrift.test.FooOrBar;
import static org.junit.jupiter.api.Assertions.assertEquals;
import org.apache.avro.thrift.test.E;
import org.apache.avro.thrift.test.Nested;
public class TestThrift {
@org.junit.jupiter.api.Test
void testStruct() throws Exception {
System.out.println(ThriftData.get().getSchema(Test.class).toString(true));
Test test = new Test();
test.setBoolField(true);
test.setByteField((byte) 2);
test.setI16Field((short) 3);
test.setI16OptionalField((short) 14);
test.setI32Field(4);
test.setI64Field(5L);
test.setDoubleField(2.0);
test.setStringField("foo");
test.setBinaryField(ByteBuffer.wrap(new byte[] { 0, -1 }));
test.setMapField(Collections.singletonMap("x", 1));
test.setListField(Collections.singletonList(7));
test.setSetField(Collections.singleton(8));
test.setEnumField(E.X);
test.setStructField(new Nested(9));
test.setFooOrBar(FooOrBar.foo("x"));
System.out.println(test);
check(test);
}
@org.junit.jupiter.api.Test
void testOptionals() throws Exception {
Test test = new Test();
test.setBoolField(true);
test.setByteField((byte) 2);
test.setByteOptionalField((byte) 4);
test.setI16Field((short) 3);
test.setI16OptionalField((short) 15);
test.setI64Field(5L);
test.setDoubleField(2.0);
System.out.println(test);
check(test);
}
private void check(Test test) throws Exception {
ByteArrayOutputStream bao = new ByteArrayOutputStream();
ThriftDatumWriter<Test> w = new ThriftDatumWriter<>(Test.class);
Encoder e = EncoderFactory.get().binaryEncoder(bao, null);
w.write(test, e);
e.flush();
Object o = new ThriftDatumReader<>(Test.class).read(null,
DecoderFactory.get().binaryDecoder(new ByteArrayInputStream(bao.toByteArray()), null));
assertEquals(test, o);
}
}
| 7,098 |
0 | Create_ds/avro/lang/java/thrift/src/test/java/org/apache/avro/thrift | Create_ds/avro/lang/java/thrift/src/test/java/org/apache/avro/thrift/test/E.java | /**
* Autogenerated by Thrift Compiler (0.14.1)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package org.apache.avro.thrift.test;
@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-03-18")
public enum E implements org.apache.thrift.TEnum {
X(1), Y(2), Z(3);
private final int value;
private E(int value) {
this.value = value;
}
/**
* Get the integer value of this enum value, as defined in the Thrift IDL.
*/
public int getValue() {
return value;
}
/**
* Find a the enum type by its integer value, as defined in the Thrift IDL.
*
* @return null if the value is not found.
*/
@org.apache.thrift.annotation.Nullable
public static E findByValue(int value) {
switch (value) {
case 1:
return X;
case 2:
return Y;
case 3:
return Z;
default:
return null;
}
}
}
| 7,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.