index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/run/CrunchInputSplit.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.run;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.IOException;
import java.io.OutputStream;
import com.google.common.base.Supplier;
import org.apache.crunch.io.FormatBundle;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.SerializationFactory;
import org.apache.hadoop.io.serializer.Serializer;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.util.ReflectionUtils;
class CrunchInputSplit extends InputSplit implements Writable, Configurable, Supplier<InputSplit> {
private InputSplit inputSplit;
private int nodeIndex;
private FormatBundle<? extends InputFormat<?, ?>> bundle;
private Configuration conf;
public CrunchInputSplit() {
// default constructor
}
public CrunchInputSplit(
InputSplit inputSplit,
FormatBundle<? extends InputFormat<?, ?>> bundle,
int nodeIndex,
Configuration conf) {
this.inputSplit = inputSplit;
this.bundle = bundle;
this.nodeIndex = nodeIndex;
this.conf = conf;
}
@Override
public InputSplit get() {
return inputSplit;
}
@Override
public void setConf(Configuration conf) {
this.conf = new Configuration(conf);
if (bundle != null) {
this.bundle.configure(this.conf);
}
}
@Override
public Configuration getConf() {
return conf;
}
public int getNodeIndex() {
return nodeIndex;
}
public Class<? extends InputFormat<?, ?>> getInputFormatClass() {
return bundle.getFormatClass();
}
@Override
public long getLength() throws IOException, InterruptedException {
return inputSplit.getLength();
}
@Override
public String[] getLocations() throws IOException, InterruptedException {
return inputSplit.getLocations();
}
@Override
public String toString() {
return String.format("CrunchInputSplit(%s)", inputSplit);
}
public void readFields(DataInput in) throws IOException {
if (conf == null) {
conf = new Configuration();
}
nodeIndex = in.readInt();
bundle = new FormatBundle();
bundle.setConf(conf);
bundle.readFields(in);
bundle.configure(conf); // yay bootstrap!
Class<? extends InputSplit> inputSplitClass = readClass(in);
inputSplit = (InputSplit) ReflectionUtils.newInstance(inputSplitClass, conf);
if (inputSplit instanceof Writable) {
((Writable) inputSplit).readFields(in);
} else {
SerializationFactory factory = new SerializationFactory(conf);
Deserializer deserializer = factory.getDeserializer(inputSplitClass);
deserializer.open((DataInputStream) in);
inputSplit = (InputSplit) deserializer.deserialize(inputSplit);
deserializer.close();
}
}
public void write(DataOutput out) throws IOException {
out.writeInt(nodeIndex);
bundle.write(out);
Text.writeString(out, inputSplit.getClass().getName());
if (inputSplit instanceof Writable) {
((Writable) inputSplit).write(out);
} else {
SerializationFactory factory = new SerializationFactory(conf);
Serializer serializer = factory.getSerializer(inputSplit.getClass());
serializer.open((OutputStream) out);
serializer.serialize(inputSplit);
serializer.close();
}
}
private Class readClass(DataInput in) throws IOException {
String className = Text.readString(in);
try {
return conf.getClassByName(className);
} catch (ClassNotFoundException e) {
throw new RuntimeException("readObject can't find class", e);
}
}
}
| 2,700 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/run/UniformHashPartitioner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.run;
import org.apache.crunch.util.HashUtil;
import org.apache.hadoop.mapreduce.Partitioner;
/**
* Hash partitioner which applies a supplemental hashing function to the hash code of values to ensure better
* distribution of keys over partitions.
*/
public class UniformHashPartitioner<KEY, VALUE> extends Partitioner<KEY, VALUE> {
@Override
public int getPartition(KEY key, VALUE value, int numPartitions) {
return ((HashUtil.smearHash(key.hashCode()) & Integer.MAX_VALUE) % numPartitions);
}
}
| 2,701 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/run/RuntimeParameters.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.run;
/**
* Parameters used during the runtime execution.
*/
public final class RuntimeParameters {
public static final String DEBUG = "crunch.debug";
public static final String TMP_DIR = "crunch.tmp.dir";
public static final String LOG_JOB_PROGRESS = "crunch.log.job.progress";
/**
* Runtime property which indicates that a {@link org.apache.crunch.Source} should attempt to combine small files
* to reduce overhead by default splits. Unless overridden by the {@code Source} implementation it will default to
* {@code true}.
*/
public static final String DISABLE_COMBINE_FILE = "crunch.disable.combine.file";
public static final String COMBINE_FILE_BLOCK_SIZE = "crunch.combine.file.block.size";
public static final String CREATE_DIR = "mapreduce.jobcontrol.createdir.ifnotexist";
public static final String DISABLE_DEEP_COPY = "crunch.disable.deep.copy";
public static final String MAX_RUNNING_JOBS = "crunch.max.running.jobs";
public static final String FILE_TARGET_MAX_THREADS = "crunch.file.target.max.threads";
public static final String MAX_POLL_INTERVAL = "crunch.max.poll.interval";
public static final String FILE_TARGET_USE_DISTCP = "crunch.file.target.use.distcp";
public static final String FILE_TARGET_MAX_DISTCP_TASKS = "crunch.file.target.max.distcp.tasks";
public static final String FILE_TARGET_MAX_DISTCP_TASK_BANDWIDTH_MB = "crunch.file.target.max.distcp.task.bandwidth.mb";
// Not instantiated
private RuntimeParameters() {
}
}
| 2,702 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/run/CrunchMapper.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.run;
import java.util.List;
import org.apache.hadoop.mapreduce.Mapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class CrunchMapper extends Mapper<Object, Object, Object, Object> {
private static final Logger LOG = LoggerFactory.getLogger(CrunchMapper.class);
private RTNode node;
private CrunchTaskContext ctxt;
private boolean debug;
@Override
protected void setup(Mapper<Object, Object, Object, Object>.Context context) {
if (ctxt == null) {
ctxt = new CrunchTaskContext(context, NodeContext.MAP);
this.debug = ctxt.isDebugRun();
}
List<RTNode> nodes = ctxt.getNodes();
if (nodes.size() == 1) {
this.node = nodes.get(0);
} else {
CrunchInputSplit split = (CrunchInputSplit) context.getInputSplit();
this.node = nodes.get(split.getNodeIndex());
}
this.node.initialize(ctxt);
}
@Override
protected void map(Object k, Object v, Mapper<Object, Object, Object, Object>.Context context) {
if (debug) {
try {
node.process(k, v);
} catch (Exception e) {
LOG.error("Mapper exception", e);
}
} else {
node.process(k, v);
}
}
@Override
protected void cleanup(Mapper<Object, Object, Object, Object>.Context context) {
node.cleanup();
ctxt.cleanup();
}
}
| 2,703 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/run/CrunchInputFormat.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.run;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import org.apache.crunch.io.CrunchInputs;
import org.apache.crunch.io.FormatBundle;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.collect.Lists;
public class CrunchInputFormat<K, V> extends InputFormat<K, V> {
@Override
public List<InputSplit> getSplits(JobContext job) throws IOException, InterruptedException {
List<InputSplit> splits = Lists.newArrayList();
Configuration base = job.getConfiguration();
Map<FormatBundle, Map<Integer, List<Path>>> formatNodeMap = CrunchInputs.getFormatNodeMap(job);
// First, build a map of InputFormats to Paths
for (Map.Entry<FormatBundle, Map<Integer, List<Path>>> entry : formatNodeMap.entrySet()) {
FormatBundle inputBundle = entry.getKey();
Configuration conf = new Configuration(base);
inputBundle.configure(conf);
Job jobCopy = new Job(conf);
InputFormat<?, ?> format = (InputFormat<?, ?>) ReflectionUtils.newInstance(inputBundle.getFormatClass(),
jobCopy.getConfiguration());
if (format instanceof FileInputFormat && !conf.getBoolean(RuntimeParameters.DISABLE_COMBINE_FILE, true)) {
format = new CrunchCombineFileInputFormat<Object, Object>(jobCopy);
}
for (Map.Entry<Integer, List<Path>> nodeEntry : entry.getValue().entrySet()) {
Integer nodeIndex = nodeEntry.getKey();
List<Path> paths = nodeEntry.getValue();
FileInputFormat.setInputPaths(jobCopy, paths.toArray(new Path[paths.size()]));
// Get splits for each input path and tag with InputFormat
// and Mapper types by wrapping in a TaggedInputSplit.
List<InputSplit> pathSplits = format.getSplits(jobCopy);
for (InputSplit pathSplit : pathSplits) {
splits.add(new CrunchInputSplit(pathSplit, inputBundle, nodeIndex, jobCopy.getConfiguration()));
}
}
}
return splits;
}
@Override
public RecordReader<K, V> createRecordReader(InputSplit inputSplit, TaskAttemptContext context) throws IOException,
InterruptedException {
return new CrunchRecordReader<K, V>(inputSplit, context);
}
}
| 2,704 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/run/NodeContext.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.run;
import org.apache.crunch.impl.mr.plan.DoNode;
/**
* Enum that is associated with a serialized {@link DoNode} instance, so we know
* how to use it within the context of a particular MR job.
*
*/
public enum NodeContext {
MAP,
REDUCE,
COMBINE;
public String getConfigurationKey() {
return "crunch.donode." + toString().toLowerCase();
}
}
| 2,705 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist/DistributedPipeline.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.dist;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.crunch.CreateOptions;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.Pipeline;
import org.apache.crunch.PipelineCallable;
import org.apache.crunch.PipelineResult;
import org.apache.crunch.Source;
import org.apache.crunch.SourceTarget;
import org.apache.crunch.TableSource;
import org.apache.crunch.Target;
import org.apache.crunch.fn.IdentityFn;
import org.apache.crunch.impl.dist.collect.BaseInputCollection;
import org.apache.crunch.impl.dist.collect.BaseInputTable;
import org.apache.crunch.impl.dist.collect.BaseGroupedTable;
import org.apache.crunch.impl.dist.collect.BaseUnionCollection;
import org.apache.crunch.impl.dist.collect.BaseUnionTable;
import org.apache.crunch.impl.dist.collect.EmptyPCollection;
import org.apache.crunch.impl.dist.collect.EmptyPTable;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.dist.collect.PCollectionFactory;
import org.apache.crunch.impl.dist.collect.PTableBase;
import org.apache.crunch.io.From;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.ReadableSourceTarget;
import org.apache.crunch.io.To;
import org.apache.crunch.materialize.MaterializableIterable;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.writable.Writables;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
public abstract class DistributedPipeline implements Pipeline {
private static final Logger LOG = LoggerFactory.getLogger(DistributedPipeline.class);
private static final Random RANDOM = new Random();
private static final String CRUNCH_TMP_DIRS = "crunch.tmp.dirs";
private static final String CRUNCH_PRESERVE_TEMP_DIR = "crunch.preserve.tmp.dir";
private final String name;
protected final PCollectionFactory factory;
protected final Map<PCollectionImpl<?>, Set<Target>> outputTargets;
protected final Map<PCollectionImpl<?>, MaterializableIterable<?>> outputTargetsToMaterialize;
protected final Map<PipelineCallable<?>, Set<Target>> allPipelineCallables;
protected final Set<Target> appendedTargets;
private final boolean preserveTempDirectory;
private Path tempDirectory;
private int tempFileIndex;
private int nextAnonymousStageId;
private Configuration conf;
private PipelineCallable currentPipelineCallable;
/**
* Instantiate with a custom name and configuration.
*
* @param name Display name of the pipeline
* @param conf Configuration to be used within all MapReduce jobs run in the pipeline
*/
public DistributedPipeline(String name, Configuration conf, PCollectionFactory factory) {
this.name = name;
this.factory = factory;
this.outputTargets = Maps.newHashMap();
this.outputTargetsToMaterialize = Maps.newHashMap();
this.allPipelineCallables = Maps.newHashMap();
this.appendedTargets = Sets.newHashSet();
this.conf = conf;
this.tempFileIndex = 0;
this.nextAnonymousStageId = 0;
this.preserveTempDirectory = conf.getBoolean(CRUNCH_PRESERVE_TEMP_DIR, false);
}
public static boolean isTempDir(Job job, String outputPath) {
String tmpDirs = job.getConfiguration().get(CRUNCH_TMP_DIRS);
if (tmpDirs == null ) {
return false;
}
for (String p : tmpDirs.split(":")) {
if (outputPath.contains(p)) {
LOG.debug(String.format("Matched temporary directory : %s in %s", p, outputPath));
return true;
}
}
return false;
}
public PCollectionFactory getFactory() {
return factory;
}
@Override
public Configuration getConfiguration() {
return conf;
}
@Override
public void setConfiguration(Configuration conf) {
// Clear any existing temp dir
deleteTempDirectory();
this.conf = conf;
}
@Override
public PipelineResult done() {
PipelineResult res = PipelineResult.DONE;
if (!outputTargets.isEmpty()) {
res = run();
}
cleanup();
return res;
}
@Override
public <S> PCollection<S> union(List<PCollection<S>> collections) {
return factory.createUnionCollection(
Lists.transform(collections, new Function<PCollection<S>, PCollectionImpl<S>>() {
@Override
public PCollectionImpl<S> apply(PCollection<S> in) {
return (PCollectionImpl<S>) in;
}
}));
}
@Override
public <K, V> PTable<K, V> unionTables(List<PTable<K, V>> tables) {
return factory.createUnionTable(
Lists.transform(tables, new Function<PTable<K, V>, PTableBase<K, V>>() {
@Override
public PTableBase<K, V> apply(PTable<K, V> in) {
return (PTableBase<K, V>) in;
}
}));
}
@Override
public <Output> Output sequentialDo(PipelineCallable<Output> pipelineCallable) {
allPipelineCallables.put(pipelineCallable, getDependencies(pipelineCallable));
PipelineCallable last = currentPipelineCallable;
currentPipelineCallable = pipelineCallable;
Output out = pipelineCallable.generateOutput(this);
currentPipelineCallable = last;
return out;
}
public <S> PCollection<S> read(Source<S> source) {
return read(source, null);
}
public <S> PCollection<S> read(Source<S> source, String named) {
return factory.createInputCollection(source, named, this, getCurrentPDoOptions());
}
public <K, V> PTable<K, V> read(TableSource<K, V> source) {
return read(source, null);
}
public <K, V> PTable<K, V> read(TableSource<K, V> source, String named) {
return factory.createInputTable(source, named, this, getCurrentPDoOptions());
}
private ParallelDoOptions getCurrentPDoOptions() {
ParallelDoOptions.Builder pdb = ParallelDoOptions.builder();
if (currentPipelineCallable != null) {
pdb.targets(allPipelineCallables.get(currentPipelineCallable));
}
return pdb.build();
}
private Set<Target> getDependencies(PipelineCallable<?> callable) {
Set<Target> deps = Sets.newHashSet(callable.getAllTargets().values());
for (PCollection pc : callable.getAllPCollections().values()) {
PCollectionImpl pcImpl = (PCollectionImpl) pc;
deps.addAll(pcImpl.getTargetDependencies());
MaterializableIterable iter = (MaterializableIterable) pc.materialize();
Source pcSrc = iter.getSource();
if (pcSrc instanceof Target) {
deps.add((Target) pcSrc);
}
}
return deps;
}
public PCollection<String> readTextFile(String pathName) {
return read(From.textFile(pathName));
}
public void write(PCollection<?> pcollection, Target target) {
write(pcollection, target, Target.WriteMode.DEFAULT);
}
@SuppressWarnings("unchecked")
public void write(PCollection<?> pcollection, Target target,
Target.WriteMode writeMode) {
if (pcollection instanceof BaseGroupedTable) {
pcollection = ((BaseGroupedTable<?, ?>) pcollection).ungroup();
} else if (pcollection instanceof BaseUnionCollection || pcollection instanceof BaseUnionTable) {
pcollection = pcollection.parallelDo("UnionCollectionWrapper",
(MapFn) IdentityFn.<Object> getInstance(), pcollection.getPType());
}
// Last modified time is only relevant when write mode is checkpoint
long lastModifiedAt = (writeMode == Target.WriteMode.CHECKPOINT)
? ((PCollectionImpl) pcollection).getLastModifiedAt() : -1;
boolean exists = target.handleExisting(writeMode, lastModifiedAt, getConfiguration());
if (exists && writeMode == Target.WriteMode.CHECKPOINT) {
SourceTarget<?> st = target.asSourceTarget(pcollection.getPType());
if (st == null) {
throw new CrunchRuntimeException("Target " + target + " does not support checkpointing");
} else {
((PCollectionImpl) pcollection).materializeAt(st);
}
return;
} else if (writeMode != Target.WriteMode.APPEND && targetInCurrentRun(target)) {
throw new CrunchRuntimeException("Target " + target + " is already written in current run." +
" Use WriteMode.APPEND in order to write additional data to it.");
}
// Need special handling for append targets in the case of materialization
if (writeMode == Target.WriteMode.APPEND) {
appendedTargets.add(target);
}
addOutput((PCollectionImpl<?>) pcollection, target);
}
private boolean targetInCurrentRun(Target target) {
for (Set<Target> targets : outputTargets.values()) {
if (targets.contains(target)) {
return true;
}
}
return false;
}
private void addOutput(PCollectionImpl<?> impl, Target target) {
if (!outputTargets.containsKey(impl)) {
outputTargets.put(impl, Sets.<Target> newHashSet());
}
outputTargets.get(impl).add(target);
}
@Override
public <S> PCollection<S> emptyPCollection(PType<S> ptype) {
return new EmptyPCollection<S>(this, ptype);
}
@Override
public <K, V> PTable<K, V> emptyPTable(PTableType<K, V> ptype) {
return new EmptyPTable<K, V>(this, ptype);
}
@Override
public <S> PCollection<S> create(Iterable<S> contents, PType<S> ptype) {
return create(contents, ptype, CreateOptions.none());
}
@Override
public <S> PCollection<S> create(Iterable<S> contents, PType<S> ptype, CreateOptions options) {
if (Iterables.isEmpty(contents)) {
return emptyPCollection(ptype);
}
ReadableSource<S> src = null;
try {
src = ptype.createSourceTarget(getConfiguration(), createTempPath(), contents, options.getParallelism());
} catch (IOException e) {
throw new CrunchRuntimeException("Error creating PCollection: " + contents, e);
}
return read(src);
}
@Override
public <K, V> PTable<K, V> create(Iterable<Pair<K, V>> contents, PTableType<K, V> ptype) {
return create(contents, ptype, CreateOptions.none());
}
@Override
public <K, V> PTable<K, V> create(Iterable<Pair<K, V>> contents, PTableType<K, V> ptype, CreateOptions options) {
if (Iterables.isEmpty(contents)) {
return emptyPTable(ptype);
}
ReadableSource<Pair<K, V>> src = null;
try {
src = ptype.createSourceTarget(getConfiguration(), createTempPath(), contents, options.getParallelism());
} catch (IOException e) {
throw new CrunchRuntimeException("Error creating PTable: " + contents, e);
}
return read(src).parallelDo(IdentityFn.<Pair<K, V>>getInstance(), ptype);
}
/**
* Retrieve a ReadableSourceTarget that provides access to the contents of a {@link PCollection}.
* This is primarily intended as a helper method to {@link #materialize(PCollection)}. The
* underlying data of the ReadableSourceTarget may not be actually present until the pipeline is
* run.
*
* @param pcollection The collection for which the ReadableSourceTarget is to be retrieved
* @return The ReadableSourceTarget
* @throws IllegalArgumentException If no ReadableSourceTarget can be retrieved for the given
* PCollection
*/
public <T> ReadableSource<T> getMaterializeSourceTarget(PCollection<T> pcollection) {
PCollectionImpl<T> impl = toPCollectionImpl(pcollection);
// First, check to see if this is a readable input collection.
if (impl instanceof BaseInputCollection) {
BaseInputCollection<T> ic = (BaseInputCollection<T>) impl;
if (ic.getSource() instanceof ReadableSource) {
return (ReadableSource) ic.getSource();
} else {
throw new IllegalArgumentException(
"Cannot materialize non-readable input collection: " + ic);
}
} else if (impl instanceof BaseInputTable) {
BaseInputTable it = (BaseInputTable) impl;
if (it.getSource() instanceof ReadableSource) {
return (ReadableSource) it.getSource();
} else {
throw new IllegalArgumentException(
"Cannot materialize non-readable input table: " + it);
}
}
// Next, check to see if this pcollection has already been materialized.
SourceTarget<?> matTarget = impl.getMaterializedAt();
if (matTarget != null && matTarget instanceof ReadableSourceTarget) {
return (ReadableSourceTarget<T>) matTarget;
}
// Check to see if we plan on materializing this collection on the
// next run.
ReadableSourceTarget<T> srcTarget = null;
if (outputTargets.containsKey(pcollection)) {
for (Target target : outputTargets.get(impl)) {
if (target instanceof ReadableSourceTarget && !appendedTargets.contains(target)) {
return (ReadableSourceTarget<T>) target;
}
}
}
// If we're not planning on materializing it already, create a temporary
// output to hold the materialized records and return that.
SourceTarget<T> st = createIntermediateOutput(pcollection.getPType());
if (!(st instanceof ReadableSourceTarget)) {
throw new IllegalArgumentException("The PType for the given PCollection is not readable"
+ " and cannot be materialized");
} else {
srcTarget = (ReadableSourceTarget<T>) st;
addOutput(impl, srcTarget);
return srcTarget;
}
}
/**
* Safely cast a PCollection into a PCollectionImpl, including handling the case of
* UnionCollections.
*
* @param pcollection The PCollection to be cast/transformed
* @return The PCollectionImpl representation
*/
private <T> PCollectionImpl<T> toPCollectionImpl(PCollection<T> pcollection) {
PCollectionImpl<T> pcollectionImpl = null;
if (pcollection instanceof BaseUnionCollection || pcollection instanceof BaseUnionTable) {
pcollectionImpl = (PCollectionImpl<T>) pcollection.parallelDo("UnionCollectionWrapper",
(MapFn) IdentityFn.<Object> getInstance(), pcollection.getPType());
} else {
pcollectionImpl = (PCollectionImpl<T>) pcollection;
}
return pcollectionImpl;
}
public <T> SourceTarget<T> createIntermediateOutput(PType<T> ptype) {
return ptype.getDefaultFileSource(createTempPath());
}
public Path createTempPath() {
tempFileIndex++;
Path path = new Path(getTempDirectory(), "p" + tempFileIndex);
storeTempDirLocation(path);
return path;
}
@VisibleForTesting
protected void storeTempDirLocation(Path t) {
String tmpCfg = conf.get(CRUNCH_TMP_DIRS);
String tmpDir = t.toString();
LOG.debug(String.format("Temporary directory created: %s", tmpDir));
if (tmpCfg != null && !tmpCfg.contains(tmpDir)) {
conf.set(CRUNCH_TMP_DIRS, String.format("%s:%s", tmpCfg, tmpDir));
}
else if (tmpCfg == null) {
conf.set(CRUNCH_TMP_DIRS, tmpDir);
}
}
private synchronized Path getTempDirectory() {
if (tempDirectory == null) {
Path dir = createTemporaryPath(conf);
try {
FileSystem fs = dir.getFileSystem(conf);
fs.mkdirs(dir);
fs.deleteOnExit(dir);
} catch (IOException e) {
throw new RuntimeException("Cannot create job output directory " + dir, e);
}
tempDirectory = dir;
}
return tempDirectory;
}
private static Path createTemporaryPath(Configuration conf) {
//TODO: allow configurable
String baseDir = conf.get("crunch.tmp.dir", "/tmp");
return new Path(baseDir, "crunch-" + (RANDOM.nextInt() & Integer.MAX_VALUE));
}
@Override
public <T> void writeTextFile(PCollection<T> pcollection, String pathName) {
pcollection.parallelDo("asText", new StringifyFn<T>(), Writables.strings())
.write(To.textFile(pathName));
}
private static class StringifyFn<T> extends MapFn<T, String> {
@Override
public String map(T input) {
return input.toString();
}
}
@Override
public void cleanup(boolean force) {
if (force || outputTargets.isEmpty()) {
deleteTempDirectory();
} else {
LOG.warn("Not running cleanup while output targets remain.");
}
}
private void cleanup() {
cleanup(false);
}
private synchronized void deleteTempDirectory() {
Path toDelete = tempDirectory;
tempDirectory = null;
if (toDelete != null) {
try {
FileSystem fs = toDelete.getFileSystem(conf);
if (fs.exists(toDelete)) {
fs.delete(toDelete, true);
}
} catch (IOException e) {
LOG.info("Exception during cleanup", e);
}
}
}
@Override
protected void finalize() throws Throwable {
if (tempDirectory != null) {
LOG.warn("Temp directory {} still exists; was Pipeline.done() called?", tempDirectory);
if (!preserveTempDirectory) {
deleteTempDirectory();
}
}
super.finalize();
}
public int getNextAnonymousStageId() {
return nextAnonymousStageId++;
}
@Override
public void enableDebug() {
// Turn on Crunch runtime error catching.
//TODO: allow configurable
getConfiguration().setBoolean("crunch.debug", true);
}
@Override
public String getName() {
return name;
}
}
| 2,706 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist/collect/MRCollection.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.dist.collect;
import org.apache.crunch.impl.mr.plan.DoNode;
public interface MRCollection {
DoNode createDoNode();
}
| 2,707 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist/collect/BaseDoTable.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.dist.collect;
import com.google.common.collect.ImmutableList;
import org.apache.crunch.CombineFn;
import org.apache.crunch.DoFn;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.ReadableData;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.util.DelegatingReadableData;
import java.util.List;
public class BaseDoTable<K, V> extends PTableBase<K, V> implements PTable<K, V> {
private final PCollectionImpl<?> parent;
protected final DoFn<?, Pair<K, V>> combineFn;
protected final DoFn<?, Pair<K, V>> fn;
protected final PTableType<K, V> type;
private static <S, K, V> CombineFn<K, V> asCombineFn(final DoFn<S, Pair<K, V>> fn) {
if (fn instanceof CombineFn) {
return (CombineFn) fn;
}
return null;
}
protected <S> BaseDoTable(String name, PCollectionImpl<S> parent, DoFn<S, Pair<K, V>> fn, PTableType<K, V> ntype,
ParallelDoOptions options) {
this(name, parent, asCombineFn(fn), fn, ntype, options);
}
protected <S> BaseDoTable(
String name,
PCollectionImpl<S> parent,
CombineFn<K, V> combineFn,
DoFn<S, Pair<K, V>> fn,
PTableType<K, V> ntype) {
this(name, parent, combineFn, fn, ntype, ParallelDoOptions.builder().build());
}
protected <S> BaseDoTable(
String name,
PCollectionImpl<S> parent,
CombineFn<K, V> combineFn,
DoFn<S, Pair<K, V>> fn,
PTableType<K, V> ntype,
ParallelDoOptions options) {
super(name, parent.getPipeline(), options);
this.parent = parent;
this.combineFn = combineFn;
this.fn = fn;
this.type = ntype;
}
@Override
protected long getSizeInternal() {
long parentSize = parent.getSize();
if (parentSize == 0L) {
return parentSize;
}
return Math.max(1L, (long) (fn.scaleFactor() * parentSize));
}
@Override
public PTableType<K, V> getPTableType() {
return type;
}
@Override
protected ReadableData<Pair<K, V>> getReadableDataInternal() {
if (getOnlyParent() instanceof BaseGroupedTable) {
return materializedData();
}
return new DelegatingReadableData(getOnlyParent().asReadable(false), fn);
}
@Override
public PType<Pair<K, V>> getPType() {
return type;
}
@Override
public List<PCollectionImpl<?>> getParents() {
return ImmutableList.<PCollectionImpl<?>> of(parent);
}
@Override
public long getLastModifiedAt() {
return parent.getLastModifiedAt();
}
@Override
protected void acceptInternal(Visitor visitor) {
visitor.visitDoTable(this);
}
}
| 2,708 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist/collect/EmptyPTable.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.dist.collect;
import com.google.common.collect.ImmutableList;
import org.apache.crunch.Pair;
import org.apache.crunch.ReadableData;
import org.apache.crunch.impl.dist.DistributedPipeline;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import java.util.List;
public class EmptyPTable<K, V> extends PTableBase<K, V> {
private final PTableType<K, V> ptype;
public EmptyPTable(DistributedPipeline pipeline, PTableType<K, V> ptype) {
super("EMPTY", pipeline);
this.ptype = ptype;
}
@Override
protected void acceptInternal(Visitor visitor) {
// No-op
}
@Override
public List<PCollectionImpl<?>> getParents() {
return ImmutableList.of();
}
@Override
protected ReadableData<Pair<K, V>> getReadableDataInternal() {
return new EmptyReadableData<Pair<K, V>>();
}
@Override
protected long getSizeInternal() {
return 0;
}
@Override
public long getLastModifiedAt() {
return 0;
}
@Override
public PTableType<K, V> getPTableType() {
return ptype;
}
@Override
public PType<Pair<K, V>> getPType() {
return ptype;
}
}
| 2,709 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist/collect/BaseUnionCollection.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.dist.collect;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.crunch.ReadableData;
import org.apache.crunch.types.PType;
import org.apache.crunch.util.UnionReadableData;
import java.util.List;
public class BaseUnionCollection<S> extends PCollectionImpl<S> {
private List<PCollectionImpl<S>> parents;
private long size = 0;
private long lastModifiedAt = -1;
private static String flatName(List<? extends PCollectionImpl> collections) {
StringBuilder sb = new StringBuilder("union(");
for (int i = 0; i < collections.size(); i++) {
if (i != 0) {
sb.append(',');
}
sb.append(collections.get(i).getName());
}
return sb.append(')').toString();
}
protected BaseUnionCollection(List<? extends PCollectionImpl<S>> collections) {
super(flatName(collections), collections.get(0).getPipeline());
this.parents = ImmutableList.copyOf(collections);
for (PCollectionImpl<S> parent : parents) {
if (this.pipeline != parent.getPipeline()) {
throw new IllegalStateException("Cannot union PCollections from different Pipeline instances");
}
size += parent.getSize();
}
}
@Override
public void setBreakpoint() {
super.setBreakpoint();
for (PCollectionImpl<?> parent : getParents()) {
parent.setBreakpoint();
}
}
@Override
protected long getSizeInternal() {
return size;
}
@Override
public long getLastModifiedAt() {
if (lastModifiedAt == -1) {
for (PCollectionImpl<S> parent : parents) {
long parentLastModifiedAt = parent.getLastModifiedAt();
if (parentLastModifiedAt > lastModifiedAt) {
lastModifiedAt = parentLastModifiedAt;
}
}
}
return lastModifiedAt;
}
@Override
protected ReadableData<S> getReadableDataInternal() {
List<ReadableData<S>> prds = Lists.newArrayList();
for (PCollectionImpl<S> parent : parents) {
if (parent instanceof BaseGroupedTable) {
return materializedData();
} else {
prds.add(parent.asReadable(false));
}
}
return new UnionReadableData<S>(prds);
}
@Override
public PType<S> getPType() {
return parents.get(0).getPType();
}
@Override
public List<PCollectionImpl<?>> getParents() {
return ImmutableList.<PCollectionImpl<?>> copyOf(parents);
}
@Override
protected void acceptInternal(Visitor visitor) {
visitor.visitUnionCollection(this);
}
}
| 2,710 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist/collect/BaseInputCollection.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.dist.collect;
import com.google.common.collect.ImmutableList;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.ReadableData;
import org.apache.crunch.Source;
import org.apache.crunch.impl.dist.DistributedPipeline;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.types.PType;
import java.util.List;
public class BaseInputCollection<S> extends PCollectionImpl<S> {
protected final Source<S> source;
public BaseInputCollection(Source<S> source, DistributedPipeline pipeline) {
super(source.toString(), pipeline);
this.source = source;
}
public BaseInputCollection(Source<S> source, String name, DistributedPipeline pipeline, ParallelDoOptions doOpts) {
super(name == null ? source.toString() : name, pipeline, doOpts);
this.source = source;
}
@Override
protected ReadableData<S> getReadableDataInternal() {
if (source instanceof ReadableSource) {
return ((ReadableSource<S>) source).asReadable();
} else {
return materializedData();
}
}
@Override
protected void acceptInternal(Visitor visitor) {
visitor.visitInputCollection(this);
}
@Override
public PType<S> getPType() {
return source.getType();
}
public Source<S> getSource() {
return source;
}
@Override
protected boolean waitingOnTargets() {
return doOptions.getTargets().contains(source);
}
@Override
protected long getSizeInternal() {
long sz = source.getSize(pipeline.getConfiguration());
if (sz < 0) {
throw new IllegalStateException("Input source " + source + " does not exist!");
}
return sz;
}
@Override
public List<PCollectionImpl<?>> getParents() {
return ImmutableList.of();
}
@Override
public long getLastModifiedAt() {
return source.getLastModifiedAt(pipeline.getConfiguration());
}
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof BaseInputCollection)) {
return false;
}
return source.equals(((BaseInputCollection) obj).source);
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(source).toHashCode();
}
}
| 2,711 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist/collect/PCollectionImpl.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.dist.collect;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.crunch.Aggregator;
import org.apache.crunch.CachingOptions;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.DoFn;
import org.apache.crunch.FilterFn;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.PObject;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.PipelineCallable;
import org.apache.crunch.ReadableData;
import org.apache.crunch.SourceTarget;
import org.apache.crunch.Target;
import org.apache.crunch.fn.ExtractKeyFn;
import org.apache.crunch.fn.IdentityFn;
import org.apache.crunch.impl.dist.DistributedPipeline;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.lib.Aggregate;
import org.apache.crunch.materialize.pobject.CollectionPObject;
import org.apache.crunch.materialize.pobject.FirstElementPObject;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Set;
public abstract class PCollectionImpl<S> implements PCollection<S> {
private final String name;
protected DistributedPipeline pipeline;
private boolean materialized;
protected SourceTarget<S> materializedAt;
protected final ParallelDoOptions doOptions;
private long size = -1L;
private boolean breakpoint;
public PCollectionImpl(String name, DistributedPipeline pipeline) {
this(name, pipeline, ParallelDoOptions.builder().build());
}
public PCollectionImpl(String name, DistributedPipeline pipeline, ParallelDoOptions doOptions) {
this.name = name;
this.pipeline = pipeline;
this.doOptions = doOptions;
}
@Override
public String getName() {
return name;
}
@Override
public DistributedPipeline getPipeline() {
return pipeline;
}
public ParallelDoOptions getParallelDoOptions() {
return doOptions;
}
@Override
public String toString() {
return getName();
}
@Override
public Iterable<S> materialize() {
if (!waitingOnTargets() && getSize() == 0) {
System.err.println("Materializing an empty PCollection: " + this.getName());
return Collections.emptyList();
}
if (materializedAt != null && (materializedAt instanceof ReadableSource)) {
try {
return ((ReadableSource<S>) materializedAt).read(getPipeline().getConfiguration());
} catch (IOException e) {
throw new CrunchRuntimeException("Error reading materialized data", e);
}
}
materialized = true;
return pipeline.materialize(this);
}
@Override
public PCollection<S> cache() {
return cache(CachingOptions.DEFAULT);
}
@Override
public PCollection<S> cache(CachingOptions options) {
pipeline.cache(this, options);
return this;
}
@Override
public PCollection<S> union(PCollection<S> other) {
return union(new PCollection[] { other });
}
@Override
public PCollection<S> union(PCollection<S>... collections) {
List<PCollectionImpl<S>> internal = Lists.newArrayList();
internal.add(this);
for (PCollection<S> collection : collections) {
internal.add((PCollectionImpl<S>) collection.parallelDo(IdentityFn.<S>getInstance(), collection.getPType()));
}
return pipeline.getFactory().createUnionCollection(internal);
}
@Override
public <T> PCollection<T> parallelDo(DoFn<S, T> fn, PType<T> type) {
return parallelDo("S" + pipeline.getNextAnonymousStageId(), fn, type);
}
@Override
public <T> PCollection<T> parallelDo(String name, DoFn<S, T> fn, PType<T> type) {
return parallelDo(name, fn, type, ParallelDoOptions.builder().build());
}
@Override
public <T> PCollection<T> parallelDo(String name, DoFn<S, T> fn, PType<T> type,
ParallelDoOptions options) {
return pipeline.getFactory().createDoCollection(name, getChainingCollection(), fn, type, options);
}
@Override
public <K, V> PTable<K, V> parallelDo(DoFn<S, Pair<K, V>> fn, PTableType<K, V> type) {
return parallelDo("S" + pipeline.getNextAnonymousStageId(), fn, type);
}
@Override
public <K, V> PTable<K, V> parallelDo(String name, DoFn<S, Pair<K, V>> fn, PTableType<K, V> type) {
return parallelDo(name, fn, type, ParallelDoOptions.builder().build());
}
@Override
public <K, V> PTable<K, V> parallelDo(String name, DoFn<S, Pair<K, V>> fn, PTableType<K, V> type,
ParallelDoOptions options) {
return pipeline.getFactory().createDoTable(name, getChainingCollection(), fn, type, options);
}
public PCollection<S> write(Target target) {
if (materializedAt != null) {
getPipeline().write(
pipeline.getFactory().createInputCollection(materializedAt, getName(), pipeline, doOptions),
target);
} else {
getPipeline().write(this, target);
}
return this;
}
@Override
public PCollection<S> write(Target target, Target.WriteMode writeMode) {
if (materializedAt != null) {
getPipeline().write(
pipeline.getFactory().createInputCollection(materializedAt, getName(), pipeline, doOptions),
target,
writeMode);
} else {
getPipeline().write(this, target, writeMode);
}
return this;
}
public interface Visitor {
void visitInputCollection(BaseInputCollection<?> collection);
void visitUnionCollection(BaseUnionCollection<?> collection);
void visitDoCollection(BaseDoCollection<?> collection);
void visitDoTable(BaseDoTable<?, ?> collection);
void visitGroupedTable(BaseGroupedTable<?, ?> collection);
}
public void accept(Visitor visitor) {
if (materializedAt != null) {
visitor.visitInputCollection(
pipeline.getFactory().createInputCollection(materializedAt, getName(), pipeline, doOptions));
} else {
acceptInternal(visitor);
}
}
protected boolean waitingOnTargets() {
for (PCollectionImpl parent : getParents()) {
if (parent.waitingOnTargets()) {
return true;
}
}
return false;
}
protected abstract void acceptInternal(Visitor visitor);
public void setBreakpoint() {
this.breakpoint = true;
}
public boolean isBreakpoint() {
return breakpoint;
}
/** {@inheritDoc} */
@Override
public PObject<Collection<S>> asCollection() {
return new CollectionPObject<S>(this);
}
@Override
public PObject<S> first() { return new FirstElementPObject<S>(this); }
@Override
public <Output> Output sequentialDo(String label, PipelineCallable<Output> pipelineCallable) {
pipelineCallable.dependsOn(label, this);
return getPipeline().sequentialDo(pipelineCallable);
}
public SourceTarget<S> getMaterializedAt() {
return materializedAt;
}
public void materializeAt(SourceTarget<S> sourceTarget) {
this.materializedAt = sourceTarget;
this.size = materializedAt.getSize(getPipeline().getConfiguration());
}
@Override
public PCollection<S> filter(FilterFn<S> filterFn) {
return parallelDo("Filter with " + filterFn.getClass().getSimpleName(), filterFn, getPType());
}
@Override
public PCollection<S> filter(String name, FilterFn<S> filterFn) {
return parallelDo(name, filterFn, getPType());
}
@Override
public <K> PTable<K, S> by(MapFn<S, K> mapFn, PType<K> keyType) {
return parallelDo(new ExtractKeyFn<K, S>(mapFn), getTypeFamily().tableOf(keyType, getPType()));
}
@Override
public <K> PTable<K, S> by(String name, MapFn<S, K> mapFn, PType<K> keyType) {
return parallelDo(name, new ExtractKeyFn<K, S>(mapFn), getTypeFamily().tableOf(keyType, getPType()));
}
@Override
public PTable<S, Long> count() {
return Aggregate.count(this);
}
@Override
public PObject<Long> length() {
return Aggregate.length(this);
}
@Override
public PObject<S> max() {
return Aggregate.max(this);
}
@Override
public PObject<S> min() {
return Aggregate.min(this);
}
@Override
public PCollection<S> aggregate(Aggregator<S> aggregator) {
return Aggregate.aggregate(this, aggregator);
}
@Override
public PTypeFamily getTypeFamily() {
return getPType().getFamily();
}
public abstract List<PCollectionImpl<?>> getParents();
public PCollectionImpl<?> getOnlyParent() {
List<PCollectionImpl<?>> parents = getParents();
if (parents.size() != 1) {
throw new IllegalArgumentException("Expected exactly one parent PCollection");
}
return parents.get(0);
}
public Set<Target> getTargetDependencies() {
Set<Target> targetDeps = Sets.<Target>newHashSet(doOptions.getTargets());
for (PCollectionImpl<?> parent : getParents()) {
targetDeps.addAll(parent.getTargetDependencies());
}
return targetDeps;
}
public int getDepth() {
int parentMax = 0;
for (PCollectionImpl parent : getParents()) {
parentMax = Math.max(parent.getDepth(), parentMax);
}
return 1 + parentMax;
}
@Override
public ReadableData<S> asReadable(boolean materialize) {
if (materializedAt != null && (materializedAt instanceof ReadableSource)) {
return ((ReadableSource) materializedAt).asReadable();
} else if (materialized || materialize) {
return pipeline.getMaterializeSourceTarget(this).asReadable();
} else {
return getReadableDataInternal();
}
}
protected ReadableData<S> materializedData() {
materialized = true;
return pipeline.getMaterializeSourceTarget(this).asReadable();
}
protected abstract ReadableData<S> getReadableDataInternal();
@Override
public long getSize() {
if (size < 0) {
this.size = getSizeInternal();
}
return size;
}
protected abstract long getSizeInternal();
/**
* The time of the most recent modification to one of the input sources to the collection. If the time can
* not be determined then {@code -1} should be returned.
* @return time of the most recent modification to one of the input sources to the collection.
*/
public abstract long getLastModifiedAt();
/**
* Retrieve the PCollectionImpl to be used for chaining within PCollectionImpls further down the pipeline.
* @return The PCollectionImpl instance to be chained
*/
protected PCollectionImpl<S> getChainingCollection() {
return this;
}
}
| 2,712 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist/collect/BaseGroupedTable.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.dist.collect;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import org.apache.crunch.Aggregator;
import org.apache.crunch.CombineFn;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.MapFn;
import org.apache.crunch.PGroupedTable;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.ReadableData;
import org.apache.crunch.Target;
import org.apache.crunch.fn.Aggregators;
import org.apache.crunch.lib.PTables;
import org.apache.crunch.types.PGroupedTableType;
import org.apache.crunch.types.PType;
import java.util.List;
import java.util.Set;
public class BaseGroupedTable<K, V> extends PCollectionImpl<Pair<K, Iterable<V>>>
implements PGroupedTable<K, V> {
protected final PTableBase<K, V> parent;
protected final GroupingOptions groupingOptions;
protected final PGroupedTableType<K, V> ptype;
protected BaseGroupedTable(PTableBase<K, V> parent) {
this(parent, null);
}
protected BaseGroupedTable(PTableBase<K, V> parent, GroupingOptions groupingOptions) {
super("GBK", parent.getPipeline());
this.parent = parent;
this.groupingOptions = groupingOptions;
this.ptype = parent.getPTableType().getGroupedTableType();
}
@Override
protected ReadableData<Pair<K, Iterable<V>>> getReadableDataInternal() {
throw new UnsupportedOperationException("PGroupedTable does not currently support readability");
}
@Override
protected long getSizeInternal() {
return parent.getSizeInternal();
}
@Override
public PType<Pair<K, Iterable<V>>> getPType() {
return ptype;
}
@Override
public PTable<K, V> combineValues(CombineFn<K, V> combineFn, CombineFn<K, V> reduceFn) {
return pipeline.getFactory().createDoTable(
"combine",
getChainingCollection(),
combineFn,
reduceFn,
parent.getPTableType());
}
@Override
public PTable<K, V> combineValues(CombineFn<K, V> combineFn) {
return combineValues(combineFn, combineFn);
}
@Override
public PTable<K, V> combineValues(Aggregator<V> agg) {
return combineValues(Aggregators.<K, V>toCombineFn(agg, parent.getValueType()));
}
@Override
public PTable<K, V> combineValues(Aggregator<V> combineAgg, Aggregator<V> reduceAgg) {
return combineValues(Aggregators.<K, V>toCombineFn(combineAgg, parent.getValueType()),
Aggregators.<K, V>toCombineFn(reduceAgg, parent.getValueType()));
}
private static class Ungroup<K, V> extends DoFn<Pair<K, Iterable<V>>, Pair<K, V>> {
@Override
public void process(Pair<K, Iterable<V>> input, Emitter<Pair<K, V>> emitter) {
for (V v : input.second()) {
emitter.emit(Pair.of(input.first(), v));
}
}
}
@Override
public PTable<K, V> ungroup() {
return parallelDo("ungroup", new Ungroup<K, V>(), parent.getPTableType());
}
@Override
public <U> PTable<K, U> mapValues(MapFn<Iterable<V>, U> mapFn, PType<U> ptype) {
return PTables.mapValues(this, mapFn, ptype);
}
@Override
public <U> PTable<K, U> mapValues(String name, MapFn<Iterable<V>, U> mapFn, PType<U> ptype) {
return PTables.mapValues(name, this, mapFn, ptype);
}
@Override
public PGroupedTableType<K, V> getGroupedTableType() {
return ptype;
}
@Override
public Set<Target> getTargetDependencies() {
Set<Target> td = Sets.newHashSet(super.getTargetDependencies());
if (groupingOptions != null) {
td.addAll(groupingOptions.getSourceTargets());
}
return ImmutableSet.copyOf(td);
}
@Override
public List<PCollectionImpl<?>> getParents() {
return ImmutableList.<PCollectionImpl<?>> of(parent);
}
@Override
public long getLastModifiedAt() {
return parent.getLastModifiedAt();
}
@Override
protected void acceptInternal(Visitor visitor) {
visitor.visitGroupedTable(this);
}
@Override
protected PCollectionImpl<Pair<K, Iterable<V>>> getChainingCollection() {
// Use a copy for chaining to allow sending the output of a single grouped table to multiple outputs
// TODO This should be implemented in a cleaner way in the planner
return pipeline.getFactory().createGroupedTable(parent, groupingOptions);
}
}
| 2,713 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist/collect/PCollectionFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.dist.collect;
import org.apache.crunch.CombineFn;
import org.apache.crunch.DoFn;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.Source;
import org.apache.crunch.TableSource;
import org.apache.crunch.impl.dist.DistributedPipeline;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import java.util.List;
public interface PCollectionFactory {
<S> BaseInputCollection<S> createInputCollection(
Source<S> source,
String named,
DistributedPipeline distributedPipeline,
ParallelDoOptions doOpts);
<K, V> BaseInputTable<K, V> createInputTable(
TableSource<K,V> source,
String named,
DistributedPipeline distributedPipeline,
ParallelDoOptions doOpts);
<S> BaseUnionCollection<S> createUnionCollection(List<? extends PCollectionImpl<S>> internal);
<S, T> BaseDoCollection<T> createDoCollection(
String name,
PCollectionImpl<S> chainingCollection,
DoFn<S,T> fn,
PType<T> type,
ParallelDoOptions options);
<S, K, V> BaseDoTable<K, V> createDoTable(
String name,
PCollectionImpl<S> chainingCollection,
DoFn<S,Pair<K, V>> fn,
PTableType<K, V> type,
ParallelDoOptions options);
<S, K, V> BaseDoTable<K, V> createDoTable(
String name,
PCollectionImpl<S> chainingCollection,
CombineFn<K, V> combineFn,
DoFn<S,Pair<K, V>> fn,
PTableType<K, V> type);
<K, V> BaseGroupedTable<K, V> createGroupedTable(PTableBase<K,V> parent, GroupingOptions groupingOptions);
<K, V> PTable<K, V> createUnionTable(List<PTableBase<K, V>> internal);
}
| 2,714 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist/collect/BaseInputTable.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.dist.collect;
import com.google.common.collect.ImmutableList;
import org.apache.crunch.Pair;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.ReadableData;
import org.apache.crunch.TableSource;
import org.apache.crunch.impl.dist.DistributedPipeline;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import java.util.List;
public class BaseInputTable<K, V> extends PTableBase<K, V> {
protected final TableSource<K, V> source;
protected final BaseInputCollection<Pair<K, V>> asCollection;
public BaseInputTable(TableSource<K, V> source, DistributedPipeline pipeline) {
super(source.toString(), pipeline);
this.source = source;
this.asCollection = pipeline.getFactory().createInputCollection(
source, source.toString(), pipeline, ParallelDoOptions.builder().build());
}
public BaseInputTable(TableSource<K, V> source, String name, DistributedPipeline pipeline, ParallelDoOptions doOpts) {
super(source.toString(), pipeline, doOpts);
this.source = source;
this.asCollection = pipeline.getFactory().createInputCollection(source, name, pipeline, doOpts);
}
public TableSource<K, V> getSource() {
return source;
}
@Override
protected boolean waitingOnTargets() {
return asCollection.waitingOnTargets();
}
@Override
protected long getSizeInternal() {
return asCollection.getSizeInternal();
}
@Override
public PTableType<K, V> getPTableType() {
return source.getTableType();
}
@Override
public PType<Pair<K, V>> getPType() {
return source.getType();
}
@Override
public List<PCollectionImpl<?>> getParents() {
return ImmutableList.of();
}
@Override
protected ReadableData<Pair<K, V>> getReadableDataInternal() {
return asCollection.getReadableDataInternal();
}
@Override
public long getLastModifiedAt() {
return source.getLastModifiedAt(pipeline.getConfiguration());
}
@Override
protected void acceptInternal(Visitor visitor) {
visitor.visitInputCollection(asCollection);
}
@Override
public int hashCode() {
return asCollection.hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null || !(other instanceof BaseInputTable)) {
return false;
}
return asCollection.equals(other);
}
}
| 2,715 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist/collect/BaseDoCollection.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.dist.collect;
import com.google.common.collect.ImmutableList;
import org.apache.crunch.DoFn;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.ReadableData;
import org.apache.crunch.types.PType;
import org.apache.crunch.util.DelegatingReadableData;
import java.util.List;
public class BaseDoCollection<S> extends PCollectionImpl<S> {
private final PCollectionImpl<Object> parent;
protected final DoFn<Object, S> fn;
protected final PType<S> ptype;
protected <T> BaseDoCollection(
String name,
PCollectionImpl<T> parent,
DoFn<T, S> fn,
PType<S> ptype,
ParallelDoOptions options) {
super(name, parent.getPipeline(), options);
this.parent = (PCollectionImpl<Object>) parent;
this.fn = (DoFn<Object, S>) fn;
this.ptype = ptype;
}
@Override
protected long getSizeInternal() {
long parentSize = parent.getSize();
if (parentSize == 0L) {
return parentSize;
}
return Math.max(1L, (long) (fn.scaleFactor() * parentSize));
}
@Override
protected ReadableData<S> getReadableDataInternal() {
if (getOnlyParent() instanceof BaseGroupedTable) {
return materializedData();
}
return new DelegatingReadableData(getOnlyParent().asReadable(false), fn);
}
@Override
public PType<S> getPType() {
return ptype;
}
@Override
public List<PCollectionImpl<?>> getParents() {
return ImmutableList.<PCollectionImpl<?>> of(parent);
}
@Override
public long getLastModifiedAt() {
return parent.getLastModifiedAt();
}
@Override
protected void acceptInternal(Visitor visitor) {
visitor.visitDoCollection(this);
}
}
| 2,716 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist/collect/BaseUnionTable.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.dist.collect;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.crunch.Pair;
import org.apache.crunch.ReadableData;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.util.UnionReadableData;
import java.util.List;
public class BaseUnionTable<K, V> extends PTableBase<K, V> {
private PTableType<K, V> ptype;
private List<PCollectionImpl<Pair<K, V>>> parents;
private long size;
private long lastModifiedAt = -1;
private static <K, V> String flatName(List<PTableBase<K, V>> tables) {
StringBuilder sb = new StringBuilder("union(");
for (int i = 0; i < tables.size(); i++) {
if (i != 0) {
sb.append(',');
}
sb.append(tables.get(i).getName());
}
return sb.append(')').toString();
}
protected BaseUnionTable(List<PTableBase<K, V>> tables) {
super(flatName(tables), tables.get(0).getPipeline());
this.ptype = tables.get(0).getPTableType();
this.pipeline = tables.get(0).getPipeline();
this.parents = Lists.newArrayList();
for (PTableBase<K, V> parent : tables) {
if (pipeline != parent.getPipeline()) {
throw new IllegalStateException("Cannot union PTables from different Pipeline instances");
}
this.parents.add(parent);
size += parent.getSize();
}
}
@Override
public void setBreakpoint() {
super.setBreakpoint();
for (PCollectionImpl<?> parent : getParents()) {
parent.setBreakpoint();
}
}
@Override
protected long getSizeInternal() {
return size;
}
@Override
public long getLastModifiedAt() {
if (lastModifiedAt == -1) {
for (PCollectionImpl<Pair<K, V>> parent : parents) {
long parentLastModifiedAt = parent.getLastModifiedAt();
if (parentLastModifiedAt > lastModifiedAt) {
lastModifiedAt = parentLastModifiedAt;
}
}
}
return lastModifiedAt;
}
@Override
public PTableType<K, V> getPTableType() {
return ptype;
}
@Override
public PType<Pair<K, V>> getPType() {
return ptype;
}
@Override
public List<PCollectionImpl<?>> getParents() {
return ImmutableList.<PCollectionImpl<?>>copyOf(parents);
}
@Override
protected void acceptInternal(Visitor visitor) {
visitor.visitUnionCollection(pipeline.getFactory().createUnionCollection(parents));
}
@Override
protected ReadableData<Pair<K, V>> getReadableDataInternal() {
List<ReadableData<Pair<K, V>>> prds = Lists.newArrayList();
for (PCollectionImpl<Pair<K, V>> parent : parents) {
if (parent instanceof BaseGroupedTable) {
return materializedData();
} else {
prds.add(parent.asReadable(false));
}
}
return new UnionReadableData<Pair<K, V>>(prds);
}
}
| 2,717 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist/collect/PTableBase.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.dist.collect;
import com.google.common.collect.Lists;
import org.apache.crunch.CachingOptions;
import org.apache.crunch.FilterFn;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.PObject;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.TableSource;
import org.apache.crunch.Target;
import org.apache.crunch.impl.dist.DistributedPipeline;
import org.apache.crunch.lib.Aggregate;
import org.apache.crunch.lib.Cogroup;
import org.apache.crunch.lib.Join;
import org.apache.crunch.lib.PTables;
import org.apache.crunch.materialize.MaterializableMap;
import org.apache.crunch.materialize.pobject.MapPObject;
import org.apache.crunch.types.PType;
import java.util.Collection;
import java.util.List;
import java.util.Map;
public abstract class PTableBase<K, V> extends PCollectionImpl<Pair<K, V>> implements PTable<K, V> {
public PTableBase(String name, DistributedPipeline pipeline) {
super(name, pipeline);
}
public PTableBase(String name, DistributedPipeline pipeline, ParallelDoOptions options) {
super(name, pipeline, options);
}
public PType<K> getKeyType() {
return getPTableType().getKeyType();
}
public PType<V> getValueType() {
return getPTableType().getValueType();
}
public BaseGroupedTable<K, V> groupByKey() {
return pipeline.getFactory().createGroupedTable(this, GroupingOptions.builder().build());
}
public BaseGroupedTable<K, V> groupByKey(int numReduceTasks) {
return pipeline.getFactory().createGroupedTable(
this, GroupingOptions.builder().numReducers(numReduceTasks).build());
}
public BaseGroupedTable<K, V> groupByKey(GroupingOptions groupingOptions) {
return pipeline.getFactory().createGroupedTable(this, groupingOptions);
}
@Override
public PTable<K, V> union(PTable<K, V> other) {
return union(new PTable[] { other });
}
@Override
public PTable<K, V> union(PTable<K, V>... others) {
List<PTableBase<K, V>> internal = Lists.newArrayList();
internal.add(this);
for (PTable<K, V> table : others) {
internal.add((PTableBase<K, V>) table);
}
return pipeline.getFactory().createUnionTable(internal);
}
@Override
public PTable<K, V> write(Target target) {
if (getMaterializedAt() != null) {
getPipeline().write(pipeline.getFactory().createInputTable(
(TableSource<K, V>) getMaterializedAt(), getName(), pipeline, doOptions), target);
} else {
getPipeline().write(this, target);
}
return this;
}
@Override
public PTable<K, V> write(Target target, Target.WriteMode writeMode) {
if (getMaterializedAt() != null) {
getPipeline().write(pipeline.getFactory().createInputTable(
(TableSource<K, V>) getMaterializedAt(), getName(), pipeline, doOptions), target, writeMode);
} else {
getPipeline().write(this, target, writeMode);
}
return this;
}
@Override
public PTable<K, V> cache() {
return cache(CachingOptions.DEFAULT);
}
@Override
public PTable<K, V> cache(CachingOptions options) {
pipeline.cache(this, options);
return this;
}
@Override
public PTable<K, V> filter(FilterFn<Pair<K, V>> filterFn) {
return parallelDo(filterFn, getPTableType());
}
@Override
public PTable<K, V> filter(String name, FilterFn<Pair<K, V>> filterFn) {
return parallelDo(name, filterFn, getPTableType());
}
@Override
public <U> PTable<K, U> mapValues(MapFn<V, U> mapFn, PType<U> ptype) {
return PTables.mapValues(this, mapFn, ptype);
}
@Override
public <U> PTable<K, U> mapValues(String name, MapFn<V, U> mapFn, PType<U> ptype) {
return PTables.mapValues(name, this, mapFn, ptype);
}
@Override
public <K2> PTable<K2, V> mapKeys(MapFn<K, K2> mapFn, PType<K2> ptype) {
return PTables.mapKeys(this, mapFn, ptype);
}
@Override
public <K2> PTable<K2, V> mapKeys(String name, MapFn<K, K2> mapFn, PType<K2> ptype) {
return PTables.mapKeys(name, this, mapFn, ptype);
}
@Override
public PTable<K, V> top(int count) {
return Aggregate.top(this, count, true);
}
@Override
public PTable<K, V> bottom(int count) {
return Aggregate.top(this, count, false);
}
@Override
public PTable<K, Collection<V>> collectValues() {
return Aggregate.collectValues(this);
}
@Override
public <U> PTable<K, Pair<V, U>> join(PTable<K, U> other) {
return Join.join(this, other);
}
@Override
public <U> PTable<K, Pair<Collection<V>, Collection<U>>> cogroup(PTable<K, U> other) {
return Cogroup.cogroup(this, other);
}
@Override
public PCollection<K> keys() {
return PTables.keys(this);
}
@Override
public PCollection<V> values() {
return PTables.values(this);
}
/**
* Returns a Map<K, V> made up of the keys and values in this PTable.
*/
@Override
public Map<K, V> materializeToMap() {
return new MaterializableMap<K, V>(this.materialize());
}
/** {@inheritDoc} */
@Override
public PObject<Map<K, V>> asMap() {
return new MapPObject<K, V>(this);
}
}
| 2,718 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist/collect/EmptyPCollection.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.dist.collect;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import org.apache.crunch.ReadableData;
import org.apache.crunch.impl.dist.DistributedPipeline;
import org.apache.crunch.types.PType;
import java.util.List;
public class EmptyPCollection<T> extends PCollectionImpl<T> {
private final PType<T> ptype;
public EmptyPCollection(DistributedPipeline pipeline, PType<T> ptype) {
super("EMPTY", pipeline);
this.ptype = Preconditions.checkNotNull(ptype);
}
@Override
protected void acceptInternal(Visitor visitor) {
// No-op
}
@Override
public List<PCollectionImpl<?>> getParents() {
return ImmutableList.of();
}
@Override
protected ReadableData<T> getReadableDataInternal() {
return new EmptyReadableData<T>();
}
@Override
protected long getSizeInternal() {
return 0;
}
@Override
public long getLastModifiedAt() {
return 0;
}
@Override
public PType<T> getPType() {
return ptype;
}
}
| 2,719 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/dist/collect/EmptyReadableData.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.dist.collect;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import org.apache.crunch.ReadableData;
import org.apache.crunch.SourceTarget;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import java.io.IOException;
import java.util.Set;
class EmptyReadableData<T> implements ReadableData<T> {
@Override
public Set<SourceTarget<?>> getSourceTargets() {
return ImmutableSet.of();
}
@Override
public void configure(Configuration conf) {
}
@Override
public Iterable<T> read(TaskInputOutputContext<?, ?, ?, ?> context) throws IOException {
return ImmutableList.of();
}
}
| 2,720 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mem/CountersWrapper.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mem;
import com.google.common.base.Function;
import com.google.common.collect.Iterables;
import com.google.common.collect.Iterators;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.CounterGroup;
import org.apache.hadoop.mapreduce.Counters;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
class CountersWrapper extends Counters {
private Counters active;
private final Map<String, Map<String, Counter>> lookupCache = Maps.newHashMap();
private Set<Counters> allCounters = Sets.newHashSet();
CountersWrapper() {
this.active = new Counters();
allCounters.add(active);
}
CountersWrapper(org.apache.hadoop.mapred.Counters counters) {
this.active = new Counters(counters);
allCounters.add(active);
}
@Override
public Counter findCounter(String groupName, String counterName) {
Map<String, Counter> c = lookupCache.get(groupName);
if (c == null) {
c = Maps.newHashMap();
lookupCache.put(groupName, c);
}
Counter counter = c.get(counterName);
if (counter == null) {
try {
counter = active.findCounter(groupName, counterName);
} catch (Exception e) {
// Recover from this by creating a new active instance
active = new Counters();
allCounters.add(active);
counter = active.findCounter(groupName, counterName);
}
c.put(counterName, counter);
}
return counter;
}
@Override
public synchronized Counter findCounter(Enum<?> key) {
return findCounter(key.getClass().getName(), key.name());
}
@Override
public synchronized Collection<String> getGroupNames() {
return lookupCache.keySet();
}
@Override
public Iterator<CounterGroup> iterator() {
return Iterators.concat(Iterables.transform(allCounters, new Function<Counters, Iterator<CounterGroup>>() {
@Override
public Iterator<CounterGroup> apply(Counters input) {
return input.iterator();
}
}).iterator());
}
@Override
public synchronized CounterGroup getGroup(String groupName) {
if (allCounters.size() == 1) {
return active.getGroup(groupName);
} else {
throw new UnsupportedOperationException(
"CounterWrapper cannot return CounterGroup when there are too many Counters");
}
}
public synchronized void write(DataOutput out) throws IOException {
throw new UnsupportedOperationException("CountersWrapper may not be written");
}
public synchronized void readFields(DataInput in) throws IOException {
throw new UnsupportedOperationException("CountersWrapper may not be read");
}
@Override
public synchronized int countCounters() {
int cntrs = 0;
for (Counters c : allCounters) {
cntrs += c.countCounters();
}
return cntrs;
}
public synchronized void incrAllCounters(Counters other) {
for (CounterGroup cg : other) {
for (Counter c : cg) {
findCounter(cg.getName(), c.getName()).increment(c.getValue());
}
}
}
}
| 2,721 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mem/MemPipeline.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mem;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import com.google.common.base.Charsets;
import com.google.common.collect.Iterables;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.io.DatumWriter;
import org.apache.crunch.CachingOptions;
import org.apache.crunch.CreateOptions;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.PCollection;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.Pipeline;
import org.apache.crunch.PipelineCallable;
import org.apache.crunch.PipelineExecution;
import org.apache.crunch.PipelineResult;
import org.apache.crunch.Source;
import org.apache.crunch.TableSource;
import org.apache.crunch.Target;
import org.apache.crunch.impl.mem.collect.MemCollection;
import org.apache.crunch.impl.mem.collect.MemTable;
import org.apache.crunch.io.At;
import org.apache.crunch.io.PathTarget;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.avro.AvroFileTarget;
import org.apache.crunch.io.seq.SeqFileTarget;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.avro.AvroType;
import org.apache.crunch.types.avro.Avros;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.mapreduce.Counters;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.AbstractFuture;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MemPipeline implements Pipeline {
private static final Logger LOG = LoggerFactory.getLogger(MemPipeline.class);
private static Counters COUNTERS = new CountersWrapper();
private static final MemPipeline INSTANCE = new MemPipeline();
private int outputIndex = 0;
public static Counters getCounters() {
return COUNTERS;
}
public static void clearCounters() {
COUNTERS = new CountersWrapper();
}
public static Pipeline getInstance() {
return INSTANCE;
}
public static <T> PCollection<T> collectionOf(T... ts) {
return new MemCollection<T>(ImmutableList.copyOf(ts));
}
public static <T> PCollection<T> collectionOf(Iterable<T> collect) {
return new MemCollection<T>(collect);
}
public static <T> PCollection<T> typedCollectionOf(PType<T> ptype, T... ts) {
return new MemCollection<T>(ImmutableList.copyOf(ts), ptype, null);
}
public static <T> PCollection<T> typedCollectionOf(PType<T> ptype, Iterable<T> collect) {
return new MemCollection<T>(collect, ptype, null);
}
public static <S, T> PTable<S, T> tableOf(S s, T t, Object... more) {
List<Pair<S, T>> pairs = Lists.newArrayList();
pairs.add(Pair.of(s, t));
for (int i = 0; i < more.length; i += 2) {
pairs.add(Pair.of((S) more[i], (T) more[i + 1]));
}
return new MemTable<S, T>(pairs);
}
public static <S, T> PTable<S, T> typedTableOf(PTableType<S, T> ptype, S s, T t, Object... more) {
List<Pair<S, T>> pairs = Lists.newArrayList();
pairs.add(Pair.of(s, t));
for (int i = 0; i < more.length; i += 2) {
pairs.add(Pair.of((S) more[i], (T) more[i + 1]));
}
return new MemTable<S, T>(pairs, ptype, null);
}
public static <S, T> PTable<S, T> tableOf(Iterable<Pair<S, T>> pairs) {
return new MemTable<S, T>(pairs);
}
public static <S, T> PTable<S, T> typedTableOf(PTableType<S, T> ptype, Iterable<Pair<S, T>> pairs) {
return new MemTable<S, T>(pairs, ptype, null);
}
private Configuration conf = new Configuration();
private Set<Target> activeTargets = Sets.newHashSet();
private MemPipeline() {
}
@Override
public void setConfiguration(Configuration conf) {
this.conf = conf;
}
@Override
public Configuration getConfiguration() {
return conf;
}
@Override
public <T> PCollection<T> read(Source<T> source) {
return read(source, null);
}
@Override
public <T> PCollection<T> read(Source<T> source, String named) {
String name = named == null ? source.toString() : named;
if (source instanceof ReadableSource) {
try {
Iterable<T> iterable = ((ReadableSource<T>) source).read(conf);
return new MemCollection<T>(iterable, source.getType(), name);
} catch (IOException e) {
LOG.error("Exception reading source: " + name, e);
throw new IllegalStateException(e);
}
}
LOG.error("Source {} is not readable", name);
throw new IllegalStateException("Source " + name + " is not readable");
}
@Override
public <K, V> PTable<K, V> read(TableSource<K, V> source) {
return read(source, null);
}
@Override
public <K, V> PTable<K, V> read(TableSource<K, V> source, String named) {
String name = named == null ? source.toString() : named;
if (source instanceof ReadableSource) {
try {
Iterable<Pair<K, V>> iterable = ((ReadableSource<Pair<K, V>>) source).read(conf);
return new MemTable<K, V>(iterable, source.getTableType(), name);
} catch (IOException e) {
LOG.error("Exception reading source: " + name, e);
throw new IllegalStateException(e);
}
}
LOG.error("Source {} is not readable", name);
throw new IllegalStateException("Source " + name + " is not readable");
}
@Override
public void write(PCollection<?> collection, Target target) {
write(collection, target, Target.WriteMode.DEFAULT);
}
@Override
public void write(PCollection<?> collection, Target target, Target.WriteMode writeMode) {
// Last modified time does not need to be retrieved for this
// pipeline implementation
target.handleExisting(writeMode, -1, getConfiguration());
if (writeMode != Target.WriteMode.APPEND && activeTargets.contains(target)) {
throw new CrunchRuntimeException("Target " + target
+ " is already written in the current run."
+ " Use WriteMode.APPEND in order to write additional data to it.");
}
activeTargets.add(target);
if (target instanceof PathTarget) {
if (collection.getPType() != null) {
collection.getPType().initialize(getConfiguration());
}
Path path = ((PathTarget) target).getPath();
try {
FileSystem fs = path.getFileSystem(conf);
outputIndex++;
if (target instanceof SeqFileTarget) {
Path outputPath = new Path(path, "out" + outputIndex + ".seq");
if (collection instanceof PTable) {
writeSequenceFileFromPTable(fs, outputPath, (PTable<?, ?>) collection);
} else {
writeSequenceFileFromPCollection(fs, outputPath, collection);
}
} else {
if (target instanceof AvroFileTarget){
Path outputPath = new Path(path, "out" + outputIndex + ".avro");
FSDataOutputStream os = fs.create(outputPath);
writeAvroFile(os, collection);
os.close();
} else {
LOG.warn("Defaulting to write to a text file from MemPipeline");
Path outputPath = new Path(path, "out" + outputIndex + ".txt");
FSDataOutputStream os = fs.create(outputPath);
byte[] newLine = "\r\n".getBytes(Charsets.UTF_8);
if (collection instanceof PTable) {
byte[] tab = "\t".getBytes(Charsets.UTF_8);
for (Object o : collection.materialize()) {
Pair p = (Pair) o;
os.write(p.first().toString().getBytes(Charsets.UTF_8));
os.write(tab);
os.write(p.second().toString().getBytes(Charsets.UTF_8));
os.write(newLine);
}
} else {
for (Object o : collection.materialize()) {
os.write(o.toString().getBytes(Charsets.UTF_8));
os.write(newLine);
}
}
os.close();
}
}
} catch (IOException e) {
LOG.error("Exception writing target: " + target, e);
}
} else {
LOG.error("Target {} is not a PathTarget instance", target);
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
private void writeAvroFile(FSDataOutputStream outputStream, PCollection recordCollection) throws IOException {
AvroType avroType = (AvroType)recordCollection.getPType();
if (avroType == null) {
throw new IllegalStateException("Can't write a non-typed Avro collection");
}
DatumWriter datumWriter = Avros.newWriter((AvroType)recordCollection.getPType());
DataFileWriter dataFileWriter = new DataFileWriter(datumWriter);
dataFileWriter.create(avroType.getSchema(), outputStream);
for (Object record : recordCollection.materialize()) {
dataFileWriter.append(avroType.getOutputMapFn().map(record));
}
dataFileWriter.close();
outputStream.close();
}
@SuppressWarnings({ "rawtypes", "unchecked" })
private void writeSequenceFileFromPTable(final FileSystem fs, final Path path, final PTable table)
throws IOException {
final PTableType pType = table.getPTableType();
final Class<?> keyClass = pType.getConverter().getKeyClass();
final Class<?> valueClass = pType.getConverter().getValueClass();
final SequenceFile.Writer writer = new SequenceFile.Writer(fs, fs.getConf(), path, keyClass,
valueClass);
for (final Object o : table.materialize()) {
final Pair<?,?> p = (Pair) o;
final Object key = pType.getKeyType().getOutputMapFn().map(p.first());
final Object value = pType.getValueType().getOutputMapFn().map(p.second());
writer.append(key, value);
}
writer.close();
}
private void writeSequenceFileFromPCollection(final FileSystem fs, final Path path,
final PCollection collection) throws IOException {
final PType pType = collection.getPType();
final Converter converter = pType.getConverter();
final Class valueClass = converter.getValueClass();
final SequenceFile.Writer writer = new SequenceFile.Writer(fs, fs.getConf(), path,
NullWritable.class, valueClass);
for (final Object o : collection.materialize()) {
final Object value = pType.getOutputMapFn().map(o);
writer.append(NullWritable.get(), value);
}
writer.close();
}
@Override
public PCollection<String> readTextFile(String pathName) {
return read(At.textFile(pathName));
}
@Override
public <T> void writeTextFile(PCollection<T> collection, String pathName) {
write(collection, At.textFile(pathName));
}
@Override
public <T> Iterable<T> materialize(PCollection<T> pcollection) {
return pcollection.materialize();
}
@Override
public <T> void cache(PCollection<T> pcollection, CachingOptions options) {
// No-op
}
@Override
public <T> PCollection<T> emptyPCollection(PType<T> ptype) {
return typedCollectionOf(ptype, ImmutableList.<T>of());
}
@Override
public <K, V> PTable<K, V> emptyPTable(PTableType<K, V> ptype) {
return typedTableOf(ptype, ImmutableList.<Pair<K, V>>of());
}
@Override
public <T> PCollection<T> create(Iterable<T> contents, PType<T> ptype) {
return create(contents, ptype, CreateOptions.none());
}
@Override
public <T> PCollection<T> create(Iterable<T> iterable, PType<T> ptype, CreateOptions options) {
return typedCollectionOf(ptype, iterable);
}
@Override
public <K, V> PTable<K, V> create(Iterable<Pair<K, V>> contents, PTableType<K, V> ptype) {
return create(contents, ptype, CreateOptions.none());
}
@Override
public <K, V> PTable<K, V> create(Iterable<Pair<K, V>> contents, PTableType<K, V> ptype, CreateOptions options) {
return typedTableOf(ptype, contents);
}
@Override
public <S> PCollection<S> union(List<PCollection<S>> collections) {
List<S> output = Lists.newArrayList();
for (PCollection<S> pcollect : collections) {
Iterables.addAll(output, pcollect.materialize());
}
return new MemCollection<S>(output, collections.get(0).getPType());
}
@Override
public <K, V> PTable<K, V> unionTables(List<PTable<K, V>> tables) {
List<Pair<K, V>> values = Lists.newArrayList();
for (PTable<K, V> table : tables) {
Iterables.addAll(values, table.materialize());
}
return new MemTable<K, V>(values, tables.get(0).getPTableType(), null);
}
@Override
public <Output> Output sequentialDo(PipelineCallable<Output> callable) {
Output out = callable.generateOutput(this);
try {
if (PipelineCallable.Status.FAILURE == callable.call()) {
throw new IllegalStateException("PipelineCallable " + callable + " failed in in-memory Crunch pipeline");
}
} catch (Throwable t) {
t.printStackTrace();
}
return out;
}
@Override
public PipelineExecution runAsync() {
activeTargets.clear();
return new MemExecution();
}
@Override
public PipelineResult run() {
try {
return runAsync().get();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public void cleanup(boolean force) {
//no-op
}
@Override
public PipelineResult done() {
return run();
}
@Override
public void enableDebug() {
LOG.info("Note: in-memory pipelines do not have debug logging");
}
@Override
public String getName() {
return "Memory Pipeline";
}
private static class MemExecution extends AbstractFuture<PipelineResult> implements PipelineExecution {
private PipelineResult res;
public MemExecution() {
this.res = new PipelineResult(
ImmutableList.of(new PipelineResult.StageResult("MemPipelineStage", COUNTERS)),
PipelineExecution.Status.SUCCEEDED);
}
@Override
public String getPlanDotFile() {
return "";
}
@Override
public Map<String, String> getNamedDotFiles() {
return ImmutableMap.of("", "");
}
@Override
public void waitFor(long timeout, TimeUnit timeUnit) throws InterruptedException {
set(res);
}
@Override
public void waitUntilDone() throws InterruptedException {
set(res);
}
@Override
public PipelineResult get() throws ExecutionException, InterruptedException {
set(res);
return super.get();
}
@Override
public PipelineResult get(long timeout, TimeUnit timeUnit) throws InterruptedException, ExecutionException,
TimeoutException {
set(res);
return super.get(timeout, timeUnit);
}
@Override
public Status getStatus() {
return isDone() ? Status.SUCCEEDED : Status.READY;
}
@Override
public PipelineResult getResult() {
return isDone() ? res : null;
}
@Override
public void kill() {
// No-op
}
}
}
| 2,722 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mem/package-info.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* In-memory Pipeline implementation for rapid prototyping and testing.
*/
package org.apache.crunch.impl.mem;
| 2,723 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mem | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mem/collect/MemReadableData.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mem.collect;
import com.google.common.collect.ImmutableSet;
import org.apache.crunch.ReadableData;
import org.apache.crunch.SourceTarget;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import java.io.IOException;
import java.util.Collection;
import java.util.Set;
class MemReadableData<T> implements ReadableData<T> {
private Collection<T> collection;
public MemReadableData(Collection<T> collection) {
this.collection = collection;
}
@Override
public Set<SourceTarget<?>> getSourceTargets() {
return ImmutableSet.of();
}
@Override
public void configure(Configuration conf) {
// No-op
}
@Override
public Iterable<T> read(TaskInputOutputContext<?, ?, ?, ?> ctxt) throws IOException {
return collection;
}
}
| 2,724 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mem | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mem/collect/Shuffler.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mem.collect;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
import com.google.common.collect.ImmutableList;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.Pair;
import org.apache.crunch.Pipeline;
import org.apache.crunch.impl.SingleUseIterable;
import org.apache.crunch.types.PType;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.base.Function;
import com.google.common.collect.Iterables;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
/**
* In-memory versions of common MapReduce patterns for aggregating key-value data.
*/
abstract class Shuffler<K, V> implements Iterable<Pair<K, Iterable<V>>> {
public abstract void add(Pair<K, V> record);
private static <K, V> Map<K, V> getMapForKeyType(PType<?> ptype) {
if (ptype != null && Comparable.class.isAssignableFrom(ptype.getTypeClass())) {
return new TreeMap<K, V>();
} else {
return Maps.newHashMap();
}
}
public static <S, T> Shuffler<S, T> create(PType<S> keyType, GroupingOptions options,
Pipeline pipeline) {
Map<Object, Collection<T>> map = getMapForKeyType(keyType);
if (options != null) {
Job job;
try {
job = new Job(pipeline.getConfiguration());
} catch (IOException e) {
throw new IllegalStateException("Could not create Job instance", e);
}
options.configure(job);
if (Pair.class.equals(keyType.getTypeClass()) && options.getGroupingComparatorClass() != null) {
PType<?> pairKey = keyType.getSubTypes().get(0);
return new SecondarySortShuffler(getMapForKeyType(pairKey));
} else if (options.getSortComparatorClass() != null) {
RawComparator rc = ReflectionUtils.newInstance(
options.getSortComparatorClass(),
job.getConfiguration());
map = new TreeMap<Object, Collection<T>>(rc);
return new MapShuffler<S, T>(map, keyType);
}
}
return new MapShuffler<S, T>(map);
}
private static class HFunction<K, V> implements Function<Map.Entry<Object, Collection<V>>, Pair<K, Iterable<V>>> {
private final PType<K> keyType;
public HFunction(PType<K> keyType) {
this.keyType = keyType;
}
@Override
public Pair<K, Iterable<V>> apply(Map.Entry<Object, Collection<V>> input) {
K key;
if (keyType == null) {
key = (K) input.getKey();
} else {
Object k = keyType.getConverter().convertInput(input.getKey(), null);
key = keyType.getInputMapFn().map(k);
}
return Pair.<K, Iterable<V>>of(key, new SingleUseIterable<V>(input.getValue()));
}
}
private static class MapShuffler<K, V> extends Shuffler<K, V> {
private final Map<Object, Collection<V>> map;
private final PType<K> keyType;
public MapShuffler(Map<Object, Collection<V>> map) {
this(map, null);
}
public MapShuffler(Map<Object, Collection<V>> map, PType<K> keyType) {
this.map = map;
this.keyType = keyType;
}
@Override
public Iterator<Pair<K, Iterable<V>>> iterator() {
return Iterators.transform(map.entrySet().iterator(),
new HFunction<K, V>(keyType));
}
@Override
public void add(Pair<K, V> record) {
Object key = record.first();
if (keyType != null) {
key = keyType.getConverter().outputKey(keyType.getOutputMapFn().map((K) key));
}
if (!map.containsKey(key)) {
Collection<V> values = Lists.newArrayList();
map.put(key, values);
}
map.get(key).add(record.second());
}
}
private static class SSFunction<K, SK, V> implements
Function<Map.Entry<K, List<Pair<SK, V>>>, Pair<Pair<K, SK>, Iterable<V>>> {
@Override
public Pair<Pair<K, SK>, Iterable<V>> apply(Entry<K, List<Pair<SK, V>>> input) {
List<Pair<SK, V>> values = input.getValue();
Collections.sort(values, new Comparator<Pair<SK, V>>() {
@Override
public int compare(Pair<SK, V> o1, Pair<SK, V> o2) {
return ((Comparable) o1.first()).compareTo(o2.first());
}
});
Pair<K, SK> key = Pair.of(input.getKey(), values.get(0).first());
return Pair.of(key, Iterables.transform(values, new Function<Pair<SK, V>, V>() {
@Override
public V apply(Pair<SK, V> input) {
return input.second();
}
}));
}
}
private static class SecondarySortShuffler<K, SK, V> extends Shuffler<Pair<K, SK>, V> {
private Map<K, List<Pair<SK, V>>> map;
public SecondarySortShuffler(Map<K, List<Pair<SK, V>>> map) {
this.map = map;
}
@Override
public Iterator<Pair<Pair<K, SK>, Iterable<V>>> iterator() {
return Iterators.transform(map.entrySet().iterator(), new SSFunction<K, SK, V>());
}
@Override
public void add(Pair<Pair<K, SK>, V> record) {
K primary = record.first().first();
if (!map.containsKey(primary)) {
map.put(primary, Lists.<Pair<SK, V>>newArrayList());
}
map.get(primary).add(Pair.of(record.first().second(), record.second()));
}
}
}
| 2,725 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mem | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mem/collect/MemGroupedTable.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mem.collect;
import org.apache.crunch.Aggregator;
import org.apache.crunch.CombineFn;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.PGroupedTable;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.Target;
import org.apache.crunch.fn.Aggregators;
import org.apache.crunch.lib.PTables;
import org.apache.crunch.types.PGroupedTableType;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
class MemGroupedTable<K, V> extends MemCollection<Pair<K, Iterable<V>>> implements PGroupedTable<K, V> {
private final MemTable<K, V> parent;
private static <S, T> Iterable<Pair<S, Iterable<T>>> buildMap(MemTable<S, T> parent, GroupingOptions options) {
PType<S> keyType = parent.getKeyType();
Shuffler<S, T> shuffler = Shuffler.create(keyType, options, parent.getPipeline());
for (Pair<S, T> pair : parent.materialize()) {
shuffler.add(pair);
}
return shuffler;
}
public MemGroupedTable(MemTable<K, V> parent, GroupingOptions options) {
super(buildMap(parent, options));
this.parent = parent;
}
@Override
public PCollection<Pair<K, Iterable<V>>> union(PCollection<Pair<K, Iterable<V>>>... collections) {
throw new UnsupportedOperationException();
}
@Override
public PCollection<Pair<K, Iterable<V>>> write(Target target) {
getPipeline().write(this.ungroup(), target);
return this;
}
@Override
public PType<Pair<K, Iterable<V>>> getPType() {
return getGroupedTableType();
}
@Override
public PGroupedTableType<K, V> getGroupedTableType() {
PTableType<K, V> parentType = parent.getPTableType();
if (parentType != null) {
return parentType.getGroupedTableType();
}
return null;
}
@Override
public PTypeFamily getTypeFamily() {
return parent.getTypeFamily();
}
@Override
public long getSize() {
return 1; // getSize is only used for pipeline optimization in MR
}
@Override
public String getName() {
return "MemGrouped(" + parent.getName() + ")";
}
@Override
public PTable<K, V> combineValues(CombineFn<K, V> combineFn) {
return parallelDo(combineFn, parent.getPTableType());
}
@Override
public PTable<K, V> combineValues(CombineFn<K, V> combineFn, CombineFn<K, V> reduceFn) {
//no need for special map-side combiner in memory mode
return combineValues(reduceFn);
}
@Override
public PTable<K, V> combineValues(Aggregator<V> agg) {
return combineValues(Aggregators.<K, V>toCombineFn(agg, parent.getValueType()));
}
@Override
public PTable<K, V> combineValues(Aggregator<V> combineAgg, Aggregator<V> reduceAgg) {
return combineValues(Aggregators.<K, V>toCombineFn(combineAgg, parent.getValueType()),
Aggregators.<K, V>toCombineFn(reduceAgg, parent.getValueType()));
}
@Override
public <U> PTable<K, U> mapValues(MapFn<Iterable<V>, U> mapFn, PType<U> ptype) {
return PTables.mapValues(this, mapFn, ptype);
}
@Override
public <U> PTable<K, U> mapValues(String name, MapFn<Iterable<V>, U> mapFn, PType<U> ptype) {
return PTables.mapValues(name, this, mapFn, ptype);
}
@Override
public PTable<K, V> ungroup() {
return parallelDo("ungroup", new UngroupFn<K, V>(), parent.getPTableType());
}
private static class UngroupFn<K, V> extends DoFn<Pair<K, Iterable<V>>, Pair<K, V>> {
@Override
public void process(Pair<K, Iterable<V>> input, Emitter<Pair<K, V>> emitter) {
for (V v : input.second()) {
emitter.emit(Pair.of(input.first(), v));
}
}
}
}
| 2,726 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mem | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mem/collect/MemTable.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mem.collect;
import java.util.Collection;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import org.apache.crunch.CachingOptions;
import org.apache.crunch.FilterFn;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.PGroupedTable;
import org.apache.crunch.PObject;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.Target;
import org.apache.crunch.lib.Aggregate;
import org.apache.crunch.lib.Cogroup;
import org.apache.crunch.lib.Join;
import org.apache.crunch.lib.PTables;
import org.apache.crunch.materialize.MaterializableMap;
import org.apache.crunch.materialize.pobject.MapPObject;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
public class MemTable<K, V> extends MemCollection<Pair<K, V>> implements PTable<K, V> {
private PTableType<K, V> ptype;
public MemTable(Iterable<Pair<K, V>> collect) {
this(collect, null, null);
}
public MemTable(Iterable<Pair<K, V>> collect, PTableType<K, V> ptype, String name) {
super(collect, ptype, name);
this.ptype = ptype;
}
@Override
public PTable<K, V> union(PTable<K, V> other) {
return union(new PTable[] { other });
}
@Override
public PTable<K, V> union(PTable<K, V>... others) {
return getPipeline().unionTables(
ImmutableList.<PTable<K, V>>builder().add(this).add(others).build());
}
@Override
public PGroupedTable<K, V> groupByKey() {
return groupByKey(null);
}
@Override
public PGroupedTable<K, V> groupByKey(int numPartitions) {
return groupByKey(null);
}
@Override
public PGroupedTable<K, V> groupByKey(GroupingOptions options) {
return new MemGroupedTable<K, V>(this, options);
}
@Override
public PTable<K, V> write(Target target) {
super.write(target);
return this;
}
@Override
public PTable<K, V> write(Target target, Target.WriteMode writeMode) {
getPipeline().write(this, target, writeMode);
return this;
}
@Override
public PTable<K, V> cache() {
// No-op
return this;
}
@Override
public PTable<K, V> cache(CachingOptions options) {
// No-op
return this;
}
@Override
public PTableType<K, V> getPTableType() {
return ptype;
}
@Override
public PType<K> getKeyType() {
if (ptype != null) {
return ptype.getKeyType();
}
return null;
}
@Override
public PType<V> getValueType() {
if (ptype != null) {
return ptype.getValueType();
}
return null;
}
@Override
public PTable<K, V> filter(FilterFn<Pair<K, V>> filterFn) {
return parallelDo(filterFn, getPTableType());
}
@Override
public PTable<K, V> filter(String name, FilterFn<Pair<K, V>> filterFn) {
return parallelDo(name, filterFn, getPTableType());
}
@Override
public <U> PTable<K, U> mapValues(MapFn<V, U> mapFn, PType<U> ptype) {
return PTables.mapValues(this, mapFn, ptype);
}
@Override
public <U> PTable<K, U> mapValues(String name, MapFn<V, U> mapFn, PType<U> ptype) {
return PTables.mapValues(name, this, mapFn, ptype);
}
@Override
public <K2> PTable<K2, V> mapKeys(MapFn<K, K2> mapFn, PType<K2> ptype) {
return PTables.mapKeys(this, mapFn, ptype);
}
@Override
public <K2> PTable<K2, V> mapKeys(String name, MapFn<K, K2> mapFn, PType<K2> ptype) {
return PTables.mapKeys(name, this, mapFn, ptype);
}
@Override
public PTable<K, V> top(int count) {
return Aggregate.top(this, count, true);
}
@Override
public PTable<K, V> bottom(int count) {
return Aggregate.top(this, count, false);
}
@Override
public PTable<K, Collection<V>> collectValues() {
return Aggregate.collectValues(this);
}
@Override
public <U> PTable<K, Pair<V, U>> join(PTable<K, U> other) {
return Join.join(this, other);
}
@Override
public <U> PTable<K, Pair<Collection<V>, Collection<U>>> cogroup(PTable<K, U> other) {
return Cogroup.cogroup(this, other);
}
@Override
public PCollection<K> keys() {
return PTables.keys(this);
}
@Override
public PCollection<V> values() {
return PTables.values(this);
}
@Override
public Map<K, V> materializeToMap() {
return new MaterializableMap<K, V>(this.materialize());
}
@Override
public PObject<Map<K, V>> asMap() {
return new MapPObject<K, V>(this);
}
}
| 2,727 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mem | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mem/collect/MemCollection.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mem.collect;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInputStream;
import java.lang.reflect.Method;
import java.util.Collection;
import java.util.Set;
import com.google.common.collect.Iterables;
import javassist.util.proxy.MethodFilter;
import javassist.util.proxy.MethodHandler;
import javassist.util.proxy.ProxyFactory;
import org.apache.commons.lang.SerializationException;
import org.apache.commons.lang.SerializationUtils;
import org.apache.crunch.Aggregator;
import org.apache.crunch.CachingOptions;
import org.apache.crunch.DoFn;
import org.apache.crunch.FilterFn;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.PObject;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.Pipeline;
import org.apache.crunch.ReadableData;
import org.apache.crunch.PipelineCallable;
import org.apache.crunch.Target;
import org.apache.crunch.fn.ExtractKeyFn;
import org.apache.crunch.impl.mem.MemPipeline;
import org.apache.crunch.impl.mem.emit.InMemoryEmitter;
import org.apache.crunch.lib.Aggregate;
import org.apache.crunch.materialize.pobject.CollectionPObject;
import org.apache.crunch.materialize.pobject.FirstElementPObject;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.util.ClassloaderFallbackObjectInputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.StatusReporter;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
public class MemCollection<S> implements PCollection<S> {
private final Iterable<S> collect;
private final PType<S> ptype;
private String name;
public MemCollection(Iterable<S> collect) {
this(collect, null, null);
}
public MemCollection(Iterable<S> collect, PType<S> ptype) {
this(collect, ptype, null);
}
public MemCollection(Iterable<S> collect, PType<S> ptype, String name) {
this.collect = collect;
this.ptype = ptype;
this.name = name;
}
@Override
public Pipeline getPipeline() {
return MemPipeline.getInstance();
}
@Override
public PCollection<S> union(PCollection<S> other) {
return union(new PCollection[] { other });
}
@Override
public PCollection<S> union(PCollection<S>... collections) {
return getPipeline().union(
ImmutableList.<PCollection<S>>builder().add(this).add(collections).build());
}
private <S, T> DoFn<S, T> verifySerializable(String name, DoFn<S, T> doFn) {
try {
return (DoFn<S, T>) deserialize(SerializationUtils.serialize(doFn));
} catch (SerializationException e) {
throw new IllegalStateException(
doFn.getClass().getSimpleName() + " named '" + name + "' cannot be serialized",
e);
}
}
// Use a custom deserialize implementation (not SerializationUtils) so we can fall back
// to using the thread context classloader, which is needed when running Scrunch in
// the Scala REPL
private static Object deserialize(InputStream inputStream) {
if (inputStream == null) {
throw new IllegalArgumentException("The InputStream must not be null");
}
ObjectInputStream in = null;
try {
// stream closed in the finally
in = new ClassloaderFallbackObjectInputStream(inputStream);
return in.readObject();
} catch (ClassNotFoundException ex) {
throw new SerializationException(ex);
} catch (IOException ex) {
throw new SerializationException(ex);
} finally {
try {
if (in != null) {
in.close();
}
} catch (IOException ex) {
// ignore close exception
}
}
}
private static Object deserialize(byte[] objectData) {
if (objectData == null) {
throw new IllegalArgumentException("The byte[] must not be null");
}
ByteArrayInputStream bais = new ByteArrayInputStream(objectData);
return deserialize(bais);
}
@Override
public <T> PCollection<T> parallelDo(DoFn<S, T> doFn, PType<T> type) {
return parallelDo(null, doFn, type);
}
@Override
public <T> PCollection<T> parallelDo(String name, DoFn<S, T> doFn, PType<T> type) {
return parallelDo(name, doFn, type, ParallelDoOptions.builder().build());
}
@Override
public <T> PCollection<T> parallelDo(String name, DoFn<S, T> doFn, PType<T> type,
ParallelDoOptions options) {
doFn = verifySerializable(name, doFn);
InMemoryEmitter<T> emitter = new InMemoryEmitter<T>();
Configuration conf = getPipeline().getConfiguration();
doFn.configure(conf);
doFn.setContext(getInMemoryContext(conf));
doFn.initialize();
for (S s : collect) {
doFn.process(s, emitter);
}
doFn.cleanup(emitter);
return new MemCollection<T>(emitter.getOutput(), type, name);
}
@Override
public <K, V> PTable<K, V> parallelDo(DoFn<S, Pair<K, V>> doFn, PTableType<K, V> type) {
return parallelDo(null, doFn, type);
}
@Override
public <K, V> PTable<K, V> parallelDo(String name, DoFn<S, Pair<K, V>> doFn, PTableType<K, V> type) {
return parallelDo(name, doFn, type, ParallelDoOptions.builder().build());
}
@Override
public <K, V> PTable<K, V> parallelDo(String name, DoFn<S, Pair<K, V>> doFn, PTableType<K, V> type,
ParallelDoOptions options) {
InMemoryEmitter<Pair<K, V>> emitter = new InMemoryEmitter<Pair<K, V>>();
Configuration conf = getPipeline().getConfiguration();
doFn.configure(conf);
doFn.setContext(getInMemoryContext(conf));
doFn.initialize();
for (S s : collect) {
doFn.process(s, emitter);
}
doFn.cleanup(emitter);
return new MemTable<K, V>(emitter.getOutput(), type, name);
}
@Override
public PCollection<S> write(Target target) {
getPipeline().write(this, target);
return this;
}
@Override
public PCollection<S> write(Target target, Target.WriteMode writeMode) {
getPipeline().write(this, target, writeMode);
return this;
}
@Override
public Iterable<S> materialize() {
return collect;
}
@Override
public PCollection<S> cache() {
// No-op
return this;
}
@Override
public PCollection<S> cache(CachingOptions options) {
// No-op
return this;
}
/** {@inheritDoc} */
@Override
public PObject<Collection<S>> asCollection() {
return new CollectionPObject<S>(this);
}
@Override
public PObject<S> first() { return new FirstElementPObject<S>(this); }
@Override
public <Output> Output sequentialDo(String label, PipelineCallable<Output> pipelineCallable) {
pipelineCallable.dependsOn(label, this);
return getPipeline().sequentialDo(pipelineCallable);
}
@Override
public ReadableData<S> asReadable(boolean materialize) {
return new MemReadableData<S>(ImmutableList.copyOf(collect));
}
public Collection<S> getCollection() {
return ImmutableList.copyOf(collect);
}
@Override
public PType<S> getPType() {
return ptype;
}
@Override
public PTypeFamily getTypeFamily() {
if (ptype != null) {
return ptype.getFamily();
}
return null;
}
@Override
public long getSize() {
return Iterables.isEmpty(collect) ? 0 : 1; // getSize is only used for pipeline optimization in MR
}
@Override
public String getName() {
return name;
}
@Override
public String toString() {
return collect.toString();
}
@Override
public PTable<S, Long> count() {
return Aggregate.count(this);
}
@Override
public PObject<Long> length() {
return Aggregate.length(this);
}
@Override
public PObject<S> max() {
return Aggregate.max(this);
}
@Override
public PObject<S> min() {
return Aggregate.min(this);
}
@Override
public PCollection<S> aggregate(Aggregator<S> aggregator) {
return Aggregate.aggregate(this, aggregator);
}
@Override
public PCollection<S> filter(FilterFn<S> filterFn) {
return parallelDo(filterFn, getPType());
}
@Override
public PCollection<S> filter(String name, FilterFn<S> filterFn) {
return parallelDo(name, filterFn, getPType());
}
@Override
public <K> PTable<K, S> by(MapFn<S, K> mapFn, PType<K> keyType) {
return parallelDo(new ExtractKeyFn<K, S>(mapFn), getTypeFamily().tableOf(keyType, getPType()));
}
@Override
public <K> PTable<K, S> by(String name, MapFn<S, K> mapFn, PType<K> keyType) {
return parallelDo(name, new ExtractKeyFn<K, S>(mapFn), getTypeFamily().tableOf(keyType, getPType()));
}
/**
* The method creates a {@link TaskInputOutputContext} that will just provide
* {@linkplain Configuration}. The method has been implemented with javaassist
* as there are API changes in versions of Hadoop. In hadoop 1.0.3 the
* {@linkplain TaskInputOutputContext} is abstract class while in version 2
* the same is an interface.
* <p>
* Note: The intention of this is to provide the bare essentials that are
* required to make the {@linkplain MemPipeline} work. It lacks even the basic
* things that can proved some support for unit testing pipeline.
*/
private static TaskInputOutputContext<?, ?, ?, ?> getInMemoryContext(final Configuration conf) {
ProxyFactory factory = new ProxyFactory();
Class<TaskInputOutputContext> superType = TaskInputOutputContext.class;
Class[] types = new Class[0];
Object[] args = new Object[0];
final TaskAttemptID taskAttemptId = new TaskAttemptID();
if (superType.isInterface()) {
factory.setInterfaces(new Class[] { superType });
} else {
types = new Class[] { Configuration.class, TaskAttemptID.class, RecordWriter.class, OutputCommitter.class,
StatusReporter.class };
args = new Object[] { conf, taskAttemptId, null, null, null };
factory.setSuperclass(superType);
}
final Set<String> handledMethods = ImmutableSet.of("getConfiguration", "getCounter",
"progress", "getNumReduceTasks", "getTaskAttemptID");
factory.setFilter(new MethodFilter() {
@Override
public boolean isHandled(Method m) {
return handledMethods.contains(m.getName());
}
});
MethodHandler handler = new MethodHandler() {
@Override
public Object invoke(Object arg0, Method m, Method arg2, Object[] args) throws Throwable {
String name = m.getName();
if ("getConfiguration".equals(name)) {
return conf;
} else if ("progress".equals(name)) {
// no-op
return null;
} else if ("getTaskAttemptID".equals(name)) {
return taskAttemptId;
} else if ("getNumReduceTasks".equals(name)) {
return 1;
} else if ("getCounter".equals(name)){ // getCounter
if (args.length == 1) {
return MemPipeline.getCounters().findCounter((Enum<?>) args[0]);
} else {
return MemPipeline.getCounters().findCounter((String) args[0], (String) args[1]);
}
} else {
throw new IllegalStateException("Unhandled method " + name);
}
}
};
try {
Object newInstance = factory.create(types, args, handler);
return (TaskInputOutputContext<?, ?, ?, ?>) newInstance;
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
}
| 2,728 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mem | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mem/emit/InMemoryEmitter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mem.emit;
import java.util.List;
import org.apache.crunch.Emitter;
import com.google.common.collect.Lists;
import org.apache.crunch.Pair;
/**
* An {@code Emitter} instance that writes emitted records to a backing
* {@code List}.
*
* @param <T>
*/
public class InMemoryEmitter<T> implements Emitter<T> {
private final List<T> output;
public static <T> InMemoryEmitter<T> create() {
return new InMemoryEmitter<T>();
}
public InMemoryEmitter() {
this(Lists.<T> newArrayList());
}
public InMemoryEmitter(List<T> output) {
this.output = output;
}
@Override
public void emit(T emitted) {
output.add(emitted);
}
@Override
public void flush() {
output.clear();
}
public List<T> getOutput() {
return output;
}
}
| 2,729 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/PGroupedTableType.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types;
import java.util.Iterator;
import java.util.List;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.MapFn;
import org.apache.crunch.PGroupedTable;
import org.apache.crunch.Pair;
import org.apache.crunch.io.ReadableSourceTarget;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import com.google.common.collect.Iterables;
/**
* The {@code PType} instance for {@link PGroupedTable} instances. Its settings
* are derived from the {@code PTableType} that was grouped to create the
* {@code PGroupedTable} instance.
*
*/
public abstract class PGroupedTableType<K, V> implements PType<Pair<K, Iterable<V>>> {
protected static class PTypeIterable<V> implements Iterable<V> {
private final Iterable<Object> iterable;
private final HoldLastIterator<V> holdLastIter;
public PTypeIterable(MapFn<Object, V> mapFn, Iterable<Object> iterable) {
this.iterable = iterable;
this.holdLastIter = new HoldLastIterator<V>(mapFn);
}
public Iterator<V> iterator() {
return holdLastIter.reset(iterable.iterator());
}
@Override
public String toString() {
return holdLastIter.toString();
}
}
protected static class HoldLastIterator<V> implements Iterator<V> {
private Iterator<Object> iter;
private V lastReturned = null;
private final MapFn<Object, V> mapFn;
public HoldLastIterator(MapFn<Object, V> mapFn) {
this.mapFn = mapFn;
}
public HoldLastIterator<V> reset(Iterator<Object> iter) {
this.iter = iter;
return this;
}
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public V next() {
lastReturned = mapFn.map(iter.next());
return lastReturned;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder().append('[');
if (lastReturned != null) {
sb.append(lastReturned).append(", ...]");
} else if (iter != null) {
sb.append("...]");
}
return sb.toString();
}
}
public static class PairIterableMapFn<K, V> extends MapFn<Pair<Object, Iterable<Object>>, Pair<K, Iterable<V>>> {
private final MapFn<Object, K> keys;
private final MapFn<Object, V> values;
public PairIterableMapFn(MapFn<Object, K> keys, MapFn<Object, V> values) {
this.keys = keys;
this.values = values;
}
@Override
public void configure(Configuration conf) {
keys.configure(conf);
values.configure(conf);
}
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
keys.setContext(context);
values.setContext(context);
}
@Override
public void initialize() {
keys.initialize();
values.initialize();
}
@Override
public Pair<K, Iterable<V>> map(Pair<Object, Iterable<Object>> input) {
return Pair.<K, Iterable<V>> of(keys.map(input.first()), new PTypeIterable(values, input.second()));
}
}
protected final PTableType<K, V> tableType;
public PGroupedTableType(PTableType<K, V> tableType) {
this.tableType = tableType;
}
public PTableType<K, V> getTableType() {
return tableType;
}
@Override
public PTypeFamily getFamily() {
return tableType.getFamily();
}
@Override
public List<PType> getSubTypes() {
return tableType.getSubTypes();
}
@Override
public Converter getConverter() {
return tableType.getConverter();
}
public abstract Converter getGroupingConverter();
public abstract void configureShuffle(Job job, GroupingOptions options);
@Override
public ReadableSourceTarget<Pair<K, Iterable<V>>> getDefaultFileSource(Path path) {
throw new UnsupportedOperationException("Grouped tables cannot be written out directly");
}
}
| 2,730 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/TupleDeepCopier.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types;
import java.util.List;
import org.apache.crunch.Tuple;
import org.apache.hadoop.conf.Configuration;
import com.google.common.collect.Lists;
/**
* Performs deep copies (based on underlying PType deep copying) of Tuple-based objects.
*
* @param <T> The type of Tuple implementation being copied
*/
public class TupleDeepCopier<T extends Tuple> implements DeepCopier<T> {
private final TupleFactory<T> tupleFactory;
private final List<PType> elementTypes;
public TupleDeepCopier(Class<T> tupleClass, PType... elementTypes) {
tupleFactory = TupleFactory.getTupleFactory(tupleClass);
this.elementTypes = Lists.newArrayList(elementTypes);
}
@Override
public void initialize(Configuration conf) {
for (PType elementType : elementTypes) {
elementType.initialize(conf);
}
}
@Override
public T deepCopy(T source) {
if (source == null) {
return null;
}
Object[] deepCopyValues = new Object[source.size()];
for (int valueIndex = 0; valueIndex < elementTypes.size(); valueIndex++) {
PType elementType = elementTypes.get(valueIndex);
deepCopyValues[valueIndex] = elementType.getDetachedValue(source.get(valueIndex));
}
return tupleFactory.makeTuple(deepCopyValues);
}
}
| 2,731 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/PTypeFamily.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Map;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.crunch.Tuple;
import org.apache.crunch.Tuple3;
import org.apache.crunch.Tuple4;
import org.apache.crunch.TupleN;
import org.apache.crunch.Union;
/**
* An abstract factory for creating {@code PType} instances that have the same
* serialization/storage backing format.
*
*/
public interface PTypeFamily {
PType<Void> nulls();
PType<String> strings();
PType<Long> longs();
PType<Integer> ints();
PType<Float> floats();
PType<Double> doubles();
PType<Boolean> booleans();
PType<ByteBuffer> bytes();
<T> PType<T> records(Class<T> clazz);
<T> PType<Collection<T>> collections(PType<T> ptype);
<T> PType<Map<String, T>> maps(PType<T> ptype);
<V1, V2> PType<Pair<V1, V2>> pairs(PType<V1> p1, PType<V2> p2);
<V1, V2, V3> PType<Tuple3<V1, V2, V3>> triples(PType<V1> p1, PType<V2> p2, PType<V3> p3);
<V1, V2, V3, V4> PType<Tuple4<V1, V2, V3, V4>> quads(PType<V1> p1, PType<V2> p2, PType<V3> p3, PType<V4> p4);
PType<TupleN> tuples(PType<?>... ptypes);
<T extends Tuple> PType<T> tuples(Class<T> clazz, PType<?>... ptypes);
<S, T> PType<T> derived(Class<T> clazz, MapFn<S, T> inputFn, MapFn<T, S> outputFn, PType<S> base);
/**
* A derived type whose values are immutable. This variaion of derived exists to optimize for the case
* where deep-copying of data is never needed.
*/
<S, T> PType<T> derivedImmutable(Class<T> clazz, MapFn<S, T> inputFn, MapFn<T, S> outputFn, PType<S> base);
PType<Union> unionOf(PType<?>... ptypes);
<K, V> PTableType<K, V> tableOf(PType<K> key, PType<V> value);
/**
* Returns the equivalent of the given ptype for this family, if it exists.
*/
<T> PType<T> as(PType<T> ptype);
}
| 2,732 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/PTypeUtils.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types;
import java.util.Collection;
import java.util.List;
import org.apache.crunch.Pair;
import org.apache.crunch.Tuple;
import org.apache.crunch.Tuple3;
import org.apache.crunch.Tuple4;
import org.apache.crunch.TupleN;
/**
* Utilities for converting between {@code PType}s from different
* {@code PTypeFamily} implementations.
*
*/
public class PTypeUtils {
public static <T> PType<T> convert(PType<T> ptype, PTypeFamily tf) {
if (ptype instanceof PTableType) {
PTableType ptt = (PTableType) ptype;
return tf.tableOf(tf.as(ptt.getKeyType()), tf.as(ptt.getValueType()));
}
Class<T> typeClass = ptype.getTypeClass();
if (Tuple.class.isAssignableFrom(typeClass)) {
List<PType> subTypes = ptype.getSubTypes();
if (Pair.class.equals(typeClass)) {
return tf.pairs(tf.as(subTypes.get(0)), tf.as(subTypes.get(1)));
} else if (Tuple3.class.equals(typeClass)) {
return tf.triples(tf.as(subTypes.get(0)), tf.as(subTypes.get(1)), tf.as(subTypes.get(2)));
} else if (Tuple4.class.equals(typeClass)) {
return tf.quads(tf.as(subTypes.get(0)), tf.as(subTypes.get(1)), tf.as(subTypes.get(2)), tf.as(subTypes.get(3)));
} else if (TupleN.class.equals(typeClass)) {
PType[] newPTypes = subTypes.toArray(new PType[subTypes.size()]);
for (int i = 0; i < newPTypes.length; i++) {
newPTypes[i] = tf.as(subTypes.get(i));
}
return (PType<T>) tf.tuples(newPTypes);
}
}
if (Collection.class.isAssignableFrom(typeClass)) {
return tf.collections(tf.as(ptype.getSubTypes().get(0)));
}
return tf.records(typeClass);
}
private PTypeUtils() {
}
}
| 2,733 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/PType.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types;
import java.io.IOException;
import java.io.Serializable;
import java.util.List;
import org.apache.crunch.DoFn;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.ReadableSourceTarget;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* A {@code PType} defines a mapping between a data type that is used in a Crunch pipeline and a
* serialization and storage format that is used to read/write data from/to HDFS. Every
* {@link PCollection} has an associated {@code PType} that tells Crunch how to read/write data from
* that {@code PCollection}.
*
*/
public interface PType<T> extends Serializable {
/**
* Returns the Java type represented by this {@code PType}.
*/
Class<T> getTypeClass();
/**
* Returns the {@code PTypeFamily} that this {@code PType} belongs to.
*/
PTypeFamily getFamily();
MapFn<Object, T> getInputMapFn();
MapFn<T, Object> getOutputMapFn();
Converter getConverter();
/**
* Initialize this PType for use within a DoFn. This generally only needs to be called when using
* a PType for {@link #getDetachedValue(Object)}.
*
* @param conf Configuration object
* @see PType#getDetachedValue(Object)
*/
void initialize(Configuration conf);
/**
* Returns a copy of a value (or the value itself) that can safely be retained.
* <p>
* This is useful when iterable values being processed in a DoFn (via a reducer) need to be held
* on to for more than the scope of a single iteration, as a reducer (and therefore also a DoFn
* that has an Iterable as input) re-use deserialized values. More information on object reuse is
* available in the {@link DoFn} class documentation.
*
* @param value The value to be deep-copied
* @return A deep copy of the input value
*/
T getDetachedValue(T value);
/**
* Returns a {@code SourceTarget} that is able to read/write data using the serialization format
* specified by this {@code PType}.
*/
ReadableSourceTarget<T> getDefaultFileSource(Path path);
/**
* Returns a {@code ReadableSource} that contains the data in the given {@code Iterable}.
*
* @param conf The Configuration to use
* @param path The path to write the data to
* @param contents The contents to write
* @param parallelism The desired parallelism
* @return A new instance of ReadableSource
*/
ReadableSource<T> createSourceTarget(Configuration conf, Path path, Iterable<T> contents, int parallelism)
throws IOException;
/**
* Returns the sub-types that make up this PType if it is a composite instance, such as a tuple.
*/
List<PType> getSubTypes();
}
| 2,734 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/Protos.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types;
import java.util.Iterator;
import java.util.List;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.MapFn;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.base.Splitter;
import com.google.protobuf.Descriptors.FieldDescriptor;
import com.google.protobuf.Message;
import com.google.protobuf.Message.Builder;
/**
* Utility functions for working with protocol buffers in Crunch.
*/
public class Protos {
/**
* Utility function for creating a default PB Messgae from a Class object that
* works with both protoc 2.3.0 and 2.4.x.
* @param clazz The class of the protocol buffer to create
* @return An instance of a protocol buffer
*/
public static <M extends Message> M getDefaultInstance(Class<M> clazz) {
if (clazz.getConstructors().length > 0) {
// Protobuf 2.3.0
return ReflectionUtils.newInstance(clazz, null);
} else {
// Protobuf 2.4.x
try {
Message.Builder mb = (Message.Builder) clazz.getDeclaredMethod("newBuilder").invoke(null);
return (M) mb.getDefaultInstanceForType();
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
}
public static <M extends Message, K> MapFn<M, K> extractKey(String fieldName) {
return new ExtractKeyFn<M, K>(fieldName);
}
public static <M extends Message> DoFn<String, M> lineParser(String sep, Class<M> msgClass) {
return new TextToProtoFn<M>(sep, msgClass);
}
private static class ExtractKeyFn<M extends Message, K> extends MapFn<M, K> {
private final String fieldName;
private transient FieldDescriptor fd;
public ExtractKeyFn(String fieldName) {
this.fieldName = fieldName;
}
@Override
public K map(M input) {
if (input == null) {
throw new IllegalArgumentException("Null inputs not supported by Protos.ExtractKeyFn");
} else if (fd == null) {
fd = input.getDescriptorForType().findFieldByName(fieldName);
if (fd == null) {
throw new IllegalStateException("Could not find field: " + fieldName + " in message: " + input);
}
}
return (K) input.getField(fd);
}
}
private static class TextToProtoFn<M extends Message> extends DoFn<String, M> {
private final String sep;
private final Class<M> msgClass;
private transient M msgInstance;
private transient List<FieldDescriptor> fields;
private transient Splitter splitter;
enum ParseErrors {
TOTAL,
NUMBER_FORMAT
};
public TextToProtoFn(String sep, Class<M> msgClass) {
this.sep = sep;
this.msgClass = msgClass;
}
@Override
public void initialize() {
this.msgInstance = getDefaultInstance(msgClass);
this.fields = msgInstance.getDescriptorForType().getFields();
this.splitter = Splitter.on(sep);
}
@Override
public void process(String input, Emitter<M> emitter) {
if (input != null && !input.isEmpty()) {
Builder b = msgInstance.newBuilderForType();
Iterator<String> iter = splitter.split(input).iterator();
boolean parseError = false;
for (FieldDescriptor fd : fields) {
if (iter.hasNext()) {
String value = iter.next();
if (value != null && !value.isEmpty()) {
Object parsedValue = null;
try {
switch (fd.getJavaType()) {
case STRING:
parsedValue = value;
break;
case INT:
parsedValue = Integer.valueOf(value);
break;
case LONG:
parsedValue = Long.valueOf(value);
break;
case FLOAT:
parsedValue = Float.valueOf(value);
break;
case DOUBLE:
parsedValue = Double.valueOf(value);
break;
case BOOLEAN:
parsedValue = Boolean.valueOf(value);
break;
case ENUM:
parsedValue = fd.getEnumType().findValueByName(value);
break;
}
b.setField(fd, parsedValue);
} catch (NumberFormatException nfe) {
increment(ParseErrors.NUMBER_FORMAT);
parseError = true;
break;
}
}
}
}
if (parseError) {
increment(ParseErrors.TOTAL);
} else {
emitter.emit((M) b.build());
}
}
}
}
}
| 2,735 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/CollectionDeepCopier.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import com.google.common.collect.Lists;
/**
* Performs deep copies (based on underlying PType deep copying) of Collections.
*
* @param <T> The type of Tuple implementation being copied
*/
public class CollectionDeepCopier<T> implements DeepCopier<Collection<T>> {
private PType<T> elementType;
public CollectionDeepCopier(PType<T> elementType) {
this.elementType = elementType;
}
@Override
public void initialize(Configuration conf) {
this.elementType.initialize(conf);
}
@Override
public Collection<T> deepCopy(Collection<T> source) {
if (source == null) {
return null;
}
List<T> copiedCollection = Lists.newArrayListWithCapacity(source.size());
for (T value : source) {
copiedCollection.add(elementType.getDetachedValue(value));
}
return copiedCollection;
}
}
| 2,736 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/MapDeepCopier.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.hadoop.conf.Configuration;
import com.google.common.collect.Maps;
public class MapDeepCopier<T> implements DeepCopier<Map<String, T>> {
private final PType<T> ptype;
public MapDeepCopier(PType<T> ptype) {
this.ptype = ptype;
}
@Override
public void initialize(Configuration conf) {
this.ptype.initialize(conf);
}
@Override
public Map<String, T> deepCopy(Map<String, T> source) {
if (source == null) {
return null;
}
Map<String, T> deepCopyMap = Maps.newHashMap();
for (Entry<String, T> entry : source.entrySet()) {
deepCopyMap.put(entry.getKey(), ptype.getDetachedValue(entry.getValue()));
}
return deepCopyMap;
}
}
| 2,737 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/UnionDeepCopier.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types;
import com.google.common.collect.Lists;
import org.apache.crunch.Union;
import org.apache.hadoop.conf.Configuration;
import java.util.List;
public class UnionDeepCopier implements DeepCopier<Union> {
private final List<PType> elementTypes;
public UnionDeepCopier(PType... elementTypes) {
this.elementTypes = Lists.newArrayList(elementTypes);
}
@Override
public void initialize(Configuration conf) {
for (PType elementType : elementTypes) {
elementType.initialize(conf);
}
}
@Override
public Union deepCopy(Union source) {
if (source == null) {
return null;
}
int index = source.getIndex();
Object copy = elementTypes.get(index).getDetachedValue(source.getValue());
return new Union(index, copy);
}
}
| 2,738 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/Converter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types;
import java.io.Serializable;
import org.apache.crunch.DoFn;
/**
* Converts the input key/value from a MapReduce task into the input to a
* {@link DoFn}, or takes the output of a {@code DoFn} and write it to the
* output key/values.
*/
public interface Converter<K, V, S, T> extends Serializable {
S convertInput(K key, V value);
T convertIterableInput(K key, Iterable<V> value);
K outputKey(S value);
V outputValue(S value);
Class<K> getKeyClass();
Class<V> getValueClass();
/**
* If true, convert the inputs or outputs from this {@code Converter} instance
* before (for outputs) or after (for inputs) using the associated PType#getInputMapFn
* and PType#getOutputMapFn calls.
*/
boolean applyPTypeTransforms();
}
| 2,739 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/PTableType.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
/**
* An extension of {@code PType} specifically for {@link PTable} objects. It
* allows separate access to the {@code PType}s of the key and value for the
* {@code PTable}.
*
*/
public interface PTableType<K, V> extends PType<Pair<K, V>> {
/**
* Returns the key type for the table.
*/
PType<K> getKeyType();
/**
* Returns the value type for the table.
*/
PType<V> getValueType();
/**
* Returns the grouped table version of this type.
*/
PGroupedTableType<K, V> getGroupedTableType();
}
| 2,740 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/TupleFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types;
import java.io.Serializable;
import java.lang.reflect.Constructor;
import java.util.Map;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.Pair;
import org.apache.crunch.Tuple;
import org.apache.crunch.Tuple3;
import org.apache.crunch.Tuple4;
import org.apache.crunch.TupleN;
import com.google.common.collect.Maps;
public abstract class TupleFactory<T extends Tuple> implements Serializable {
public void initialize() {
}
public abstract T makeTuple(Object... values);
private static final Map<Class, TupleFactory> customTupleFactories = Maps.newHashMap();
/**
* Get the {@link TupleFactory} for a given Tuple implementation.
*
* @param tupleClass
* The class for which the factory is to be retrieved
* @return The appropriate TupleFactory
*/
public static <T extends Tuple> TupleFactory<T> getTupleFactory(Class<T> tupleClass) {
if (tupleClass == Pair.class) {
return (TupleFactory<T>) PAIR;
} else if (tupleClass == Tuple3.class) {
return (TupleFactory<T>) TUPLE3;
} else if (tupleClass == Tuple4.class) {
return (TupleFactory<T>) TUPLE4;
} else if (tupleClass == TupleN.class) {
return (TupleFactory<T>) TUPLEN;
} else if (customTupleFactories.containsKey(tupleClass)) {
return (TupleFactory<T>) customTupleFactories.get(tupleClass);
} else {
throw new IllegalArgumentException("Can't create TupleFactory for " + tupleClass);
}
}
public static final TupleFactory<Pair> PAIR = new TupleFactory<Pair>() {
@Override
public Pair makeTuple(Object... values) {
return Pair.of(values[0], values[1]);
}
};
public static final TupleFactory<Tuple3> TUPLE3 = new TupleFactory<Tuple3>() {
@Override
public Tuple3 makeTuple(Object... values) {
return Tuple3.of(values[0], values[1], values[2]);
}
};
public static final TupleFactory<Tuple4> TUPLE4 = new TupleFactory<Tuple4>() {
@Override
public Tuple4 makeTuple(Object... values) {
return Tuple4.of(values[0], values[1], values[2], values[3]);
}
};
public static final TupleFactory<TupleN> TUPLEN = new TupleFactory<TupleN>() {
@Override
public TupleN makeTuple(Object... values) {
return new TupleN(values);
}
};
public static <T extends Tuple> TupleFactory<T> create(Class<T> clazz, Class... typeArgs) {
if (customTupleFactories.containsKey(clazz)) {
return (TupleFactory<T>) customTupleFactories.get(clazz);
}
TupleFactory<T> custom = new CustomTupleFactory<T>(clazz, typeArgs);
customTupleFactories.put(clazz, custom);
return custom;
}
private static class CustomTupleFactory<T extends Tuple> extends TupleFactory<T> {
private final Class<T> clazz;
private final Class[] typeArgs;
private transient Constructor<T> constructor;
public CustomTupleFactory(Class<T> clazz, Class[] typeArgs) {
this.clazz = clazz;
this.typeArgs = typeArgs;
}
@Override
public void initialize() {
try {
constructor = clazz.getConstructor(typeArgs);
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
@Override
public T makeTuple(Object... values) {
try {
return constructor.newInstance(values);
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
}
}
| 2,741 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/DeepCopier.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types;
import java.io.Serializable;
import org.apache.hadoop.conf.Configuration;
/**
* Performs deep copies of values.
*
* @param <T> The type of value that will be copied
*/
public interface DeepCopier<T> extends Serializable {
/**
* Initialize the deep copier with a job-specific configuration
*
* @param conf Job-specific configuration
*/
void initialize(Configuration conf);
/**
* Create a deep copy of a value.
*
* @param source The value to be copied
* @return The deep copy of the value
*/
T deepCopy(T source);
}
| 2,742 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/PTypes.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.util.UUID;
import com.google.protobuf.ExtensionRegistry;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.MapFn;
import org.apache.crunch.util.SerializableSupplier;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.thrift.TBase;
import org.apache.thrift.TDeserializer;
import org.apache.thrift.TException;
import org.apache.thrift.TSerializer;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.codehaus.jackson.map.ObjectMapper;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.Message;
/**
* Utility functions for creating common types of derived PTypes, e.g., for JSON
* data, protocol buffers, and Thrift records.
*
*/
public class PTypes {
/**
* A PType for Java's {@link BigInteger} type.
*/
public static PType<BigInteger> bigInt(PTypeFamily typeFamily) {
return typeFamily.derivedImmutable(BigInteger.class, BYTE_TO_BIGINT, BIGINT_TO_BYTE, typeFamily.bytes());
}
/**
* A PType for Java's {@link BigDecimal} type.
*/
public static PType<BigDecimal> bigDecimal(PTypeFamily typeFamily) {
return typeFamily.derivedImmutable(BigDecimal.class, BYTE_TO_BIGDECIMAL, BIGDECIMAL_TO_BYTE, typeFamily.bytes());
}
/**
* A PType for Java's {@link UUID} type.
*/
public static PType<UUID> uuid(PTypeFamily ptf) {
return ptf.derivedImmutable(UUID.class, BYTE_TO_UUID, UUID_TO_BYTE, ptf.bytes());
}
/**
* Constructs a PType for reading a Java type from a JSON string using Jackson's {@link ObjectMapper}.
*/
public static <T> PType<T> jsonString(Class<T> clazz, PTypeFamily typeFamily) {
return typeFamily
.derived(clazz, new JacksonInputMapFn<T>(clazz), new JacksonOutputMapFn<T>(), typeFamily.strings());
}
/**
* Constructs a PType for the given protocol buffer.
*/
public static <T extends Message> PType<T> protos(Class<T> clazz, PTypeFamily typeFamily) {
return typeFamily.derivedImmutable(clazz, new ProtoInputMapFn<T>(clazz), new ProtoOutputMapFn<T>(), typeFamily.bytes());
}
/**
* Constructs a PType for a protocol buffer, using the given {@code SerializableSupplier} to provide
* an {@link ExtensionRegistry} to use in reading the given protobuf.
*/
public static <T extends Message> PType<T> protos(
Class<T> clazz,
PTypeFamily typeFamily,
SerializableSupplier<ExtensionRegistry> supplier) {
return typeFamily.derivedImmutable(clazz,
new ProtoInputMapFn<T>(clazz, supplier),
new ProtoOutputMapFn<T>(),
typeFamily.bytes());
}
/**
* Constructs a PType for a Thrift record.
*/
public static <T extends TBase> PType<T> thrifts(Class<T> clazz, PTypeFamily typeFamily) {
return typeFamily.derived(clazz, new ThriftInputMapFn<T>(clazz), new ThriftOutputMapFn<T>(), typeFamily.bytes());
}
/**
* Constructs a PType for a Java {@code Enum} type.
*/
public static <T extends Enum> PType<T> enums(Class<T> type, PTypeFamily typeFamily) {
return typeFamily.derivedImmutable(type, new EnumInputMapper<T>(type), new EnumOutputMapper<T>(),
typeFamily.strings());
}
public static final MapFn<ByteBuffer, BigInteger> BYTE_TO_BIGINT = new MapFn<ByteBuffer, BigInteger>() {
@Override
public BigInteger map(ByteBuffer input) {
return input == null ? null : new BigInteger(input.array());
}
};
public static final MapFn<BigInteger, ByteBuffer> BIGINT_TO_BYTE = new MapFn<BigInteger, ByteBuffer>() {
@Override
public ByteBuffer map(BigInteger input) {
return input == null ? null : ByteBuffer.wrap(input.toByteArray());
}
};
public static final MapFn<ByteBuffer, BigDecimal> BYTE_TO_BIGDECIMAL = new MapFn<ByteBuffer, BigDecimal>() {
@Override
public BigDecimal map(ByteBuffer input) {
return input == null ? null : byteBufferToBigDecimal(input);
}
};
public static final MapFn<BigDecimal, ByteBuffer> BIGDECIMAL_TO_BYTE = new MapFn<BigDecimal, ByteBuffer>() {
@Override
public ByteBuffer map(BigDecimal input) {
return input == null ? null : bigDecimalToByteBuffer(input);
}
};
private static class JacksonInputMapFn<T> extends MapFn<String, T> {
private final Class<T> clazz;
private transient ObjectMapper mapper;
JacksonInputMapFn(Class<T> clazz) {
this.clazz = clazz;
}
@Override
public void initialize() {
this.mapper = new ObjectMapper();
}
@Override
public T map(String input) {
try {
return mapper.readValue(input, clazz);
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
}
private static class JacksonOutputMapFn<T> extends MapFn<T, String> {
private transient ObjectMapper mapper;
@Override
public void initialize() {
this.mapper = new ObjectMapper();
}
@Override
public String map(T input) {
try {
return mapper.writeValueAsString(input);
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
}
private static class ProtoInputMapFn<T extends Message> extends MapFn<ByteBuffer, T> {
private final Class<T> clazz;
private final SerializableSupplier<ExtensionRegistry> extensionSupplier;
private transient T instance;
private transient ExtensionRegistry registry;
ProtoInputMapFn(Class<T> clazz) {
this(clazz, null);
}
ProtoInputMapFn(Class<T> clazz, SerializableSupplier<ExtensionRegistry> extensionSupplier) {
this.clazz = clazz;
this.extensionSupplier = extensionSupplier;
}
@Override
public void initialize() {
this.instance = Protos.getDefaultInstance(clazz);
if (this.extensionSupplier != null) {
this.registry = extensionSupplier.get();
} else {
this.registry = ExtensionRegistry.getEmptyRegistry();
}
}
@Override
public T map(ByteBuffer bb) {
try {
return (T) instance.newBuilderForType().mergeFrom(bb.array(), bb.position(), bb.limit(), registry).build();
} catch (InvalidProtocolBufferException e) {
throw new CrunchRuntimeException(e);
}
}
}
private static class ProtoOutputMapFn<T extends Message> extends MapFn<T, ByteBuffer> {
ProtoOutputMapFn() {
}
@Override
public ByteBuffer map(T proto) {
return ByteBuffer.wrap(proto.toByteArray());
}
}
private static class ThriftInputMapFn<T extends TBase> extends MapFn<ByteBuffer, T> {
private final Class<T> clazz;
private transient T instance;
private transient TDeserializer deserializer;
private transient byte[] bytes;
ThriftInputMapFn(Class<T> clazz) {
this.clazz = clazz;
}
@Override
public void initialize() {
this.instance = ReflectionUtils.newInstance(clazz, null);
this.deserializer = new TDeserializer(new TBinaryProtocol.Factory());
this.bytes = new byte[0];
}
@Override
public T map(ByteBuffer bb) {
T next = (T) instance.deepCopy();
int len = bb.limit() - bb.position();
if (len != bytes.length) {
bytes = new byte[len];
}
System.arraycopy(bb.array(), bb.position(), bytes, 0, len);
try {
deserializer.deserialize(next, bytes);
} catch (TException e) {
throw new CrunchRuntimeException(e);
}
return next;
}
}
private static class ThriftOutputMapFn<T extends TBase> extends MapFn<T, ByteBuffer> {
private transient TSerializer serializer;
ThriftOutputMapFn() {
}
@Override
public void initialize() {
this.serializer = new TSerializer(new TBinaryProtocol.Factory());
}
@Override
public ByteBuffer map(T t) {
try {
return ByteBuffer.wrap(serializer.serialize(t));
} catch (TException e) {
throw new CrunchRuntimeException(e);
}
}
}
private static class EnumInputMapper<T extends Enum> extends MapFn<String, T> {
private final Class<T> type;
EnumInputMapper(Class<T> type) {
this.type = type;
}
@Override
public T map(String input) {
return (T) Enum.valueOf(type, input);
}
}
private static class EnumOutputMapper<T extends Enum> extends MapFn<T, String> {
@Override
public String map(T input) {
return input.name();
}
}
private static final MapFn<ByteBuffer, UUID> BYTE_TO_UUID = new MapFn<ByteBuffer, UUID>() {
@Override
public UUID map(ByteBuffer input) {
return new UUID(input.getLong(), input.getLong());
}
};
private static final MapFn<UUID, ByteBuffer> UUID_TO_BYTE = new MapFn<UUID, ByteBuffer>() {
@Override
public ByteBuffer map(UUID input) {
ByteBuffer bb = ByteBuffer.wrap(new byte[16]);
bb.asLongBuffer().put(input.getMostSignificantBits()).put(input.getLeastSignificantBits());
return bb;
}
};
private static BigDecimal byteBufferToBigDecimal(ByteBuffer input) {
int scale = input.getInt();
byte[] bytes = new byte[input.remaining()];
input.get(bytes, 0, input.remaining());
BigInteger bi = new BigInteger(bytes);
BigDecimal bigDecValue = new BigDecimal(bi, scale);
return bigDecValue;
}
private static ByteBuffer bigDecimalToByteBuffer(BigDecimal input) {
byte[] unScaledBytes = input.unscaledValue().toByteArray();
byte[] scaleBytes = ByteBuffer.allocate(4).putInt(input.scale()).array();
byte[] bytes = new byte[scaleBytes.length + unScaledBytes.length];
System.arraycopy(scaleBytes, 0, bytes, 0, scaleBytes.length);
System.arraycopy(unScaledBytes, 0, bytes, scaleBytes.length, unScaledBytes.length);
return ByteBuffer.wrap(bytes);
}
}
| 2,743 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/package-info.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Common functionality for business object serialization.
*/
package org.apache.crunch.types;
| 2,744 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/NoOpDeepCopier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types;
import org.apache.hadoop.conf.Configuration;
/**
* A {@code DeepCopier} that does nothing, and just returns the input value without copying anything.
*/
public class NoOpDeepCopier<T> implements DeepCopier<T> {
private NoOpDeepCopier() {}
/**
* Static factory method.
*/
public static <T> NoOpDeepCopier<T> create() {
return new NoOpDeepCopier<T>();
}
@Override
public T deepCopy(T source) {
return source;
}
@Override
public void initialize(Configuration conf) {
// No initialization needed
}
}
| 2,745 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/writable/WritableValueConverter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.writable;
import org.apache.crunch.types.Converter;
import org.apache.hadoop.io.NullWritable;
class WritableValueConverter<W> implements Converter<Object, W, W, Iterable<W>> {
private final Class<W> serializationClass;
public WritableValueConverter(Class<W> serializationClass) {
this.serializationClass = serializationClass;
}
@Override
public W convertInput(Object key, W value) {
return value;
}
@Override
public Object outputKey(W value) {
return NullWritable.get();
}
@Override
public W outputValue(W value) {
return value;
}
@Override
public Class<Object> getKeyClass() {
return (Class<Object>) (Class<?>) NullWritable.class;
}
@Override
public Class<W> getValueClass() {
return serializationClass;
}
@Override
public boolean applyPTypeTransforms() {
return true;
}
@Override
public Iterable<W> convertIterableInput(Object key, Iterable<W> value) {
return value;
}
} | 2,746 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/writable/WritablePairConverter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.writable;
import org.apache.crunch.Pair;
import org.apache.crunch.types.Converter;
class WritablePairConverter<K, V> implements Converter<K, V, Pair<K, V>, Pair<K, Iterable<V>>> {
private final Class<K> keyClass;
private final Class<V> valueClass;
public WritablePairConverter(Class<K> keyClass, Class<V> valueClass) {
this.keyClass = keyClass;
this.valueClass = valueClass;
}
@Override
public Pair<K, V> convertInput(K key, V value) {
return Pair.of(key, value);
}
@Override
public K outputKey(Pair<K, V> value) {
return value.first();
}
@Override
public V outputValue(Pair<K, V> value) {
return value.second();
}
@Override
public Class<K> getKeyClass() {
return keyClass;
}
@Override
public Class<V> getValueClass() {
return valueClass;
}
@Override
public boolean applyPTypeTransforms() {
return true;
}
@Override
public Pair<K, Iterable<V>> convertIterableInput(K key, Iterable<V> value) {
return Pair.of(key, value);
}
}
| 2,747 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/writable/TextMapWritable.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import com.google.common.collect.Maps;
class TextMapWritable implements Writable {
private final Map<Text, BytesWritable> instance;
public TextMapWritable() {
this.instance = Maps.newHashMap();
}
public void put(Text txt, BytesWritable value) {
instance.put(txt, value);
}
public Set<Map.Entry<Text, BytesWritable>> entrySet() {
return instance.entrySet();
}
@Override
public void readFields(DataInput in) throws IOException {
instance.clear();
int entries = WritableUtils.readVInt(in);
for (int i = 0; i < entries; i++) {
Text txt = new Text();
txt.readFields(in);
BytesWritable value = new BytesWritable();
value.readFields(in);
instance.put(txt, value);
}
}
@Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, instance.size());
for (Map.Entry<Text, BytesWritable> e : instance.entrySet()) {
e.getKey().write(out);
e.getValue().write(out);
}
}
}
| 2,748 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/writable/TupleWritable.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
import com.google.common.base.Preconditions;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.util.ReflectionUtils;
/**
* A serialization format for {@link org.apache.crunch.Tuple}.
*
* <pre>
* tuple_writable ::= card field+
* card ::= vint
* field ::= code [body_size body]
* code ::= vint
* body_size ::= vint
* body ::= byte[]
* </pre>
*/
public class TupleWritable extends Configured implements WritableComparable<TupleWritable> {
private int[] written;
private Writable[] values;
private boolean comparablesLoaded = false;
/**
* Create an empty tuple with no allocated storage for writables.
*/
public TupleWritable() {
}
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (conf == null) return;
// only reload comparables if this particular writable hasn't already done so;
// While there's also some caching within the static `reloadWritableComparableCodes`,
// it has to hit the configuration, which runs some embarrassing regexes and stuff.
// As for why SequenceFile$Reader calls `setConf` every single time it reads a new value
// (thus trigering this expensive path), I don't know.
if (!comparablesLoaded) {
try {
Writables.reloadWritableComparableCodes(conf);
} catch (Exception e) {
throw new CrunchRuntimeException("Error reloading writable comparable codes", e);
}
comparablesLoaded = true;
}
}
private static int[] getCodes(Writable[] writables) {
int[] b = new int[writables.length];
for (int i = 0; i < b.length; i++) {
if (writables[i] != null) {
b[i] = getCode(writables[i].getClass());
}
}
return b;
}
public TupleWritable(Writable[] values) {
this(values, getCodes(values));
}
/**
* Initialize tuple with storage; unknown whether any of them contain
* "written" values.
*/
public TupleWritable(Writable[] values, int[] written) {
Preconditions.checkArgument(values.length == written.length);
this.written = written;
this.values = values;
}
/**
* Return true if tuple has an element at the position provided.
*/
public boolean has(int i) {
return written[i] != 0;
}
/**
* Get ith Writable from Tuple.
*/
public Writable get(int i) {
return values[i];
}
/**
* The number of children in this Tuple.
*/
public int size() {
return values.length;
}
/**
* {@inheritDoc}
*/
public boolean equals(Object other) {
if (other instanceof TupleWritable) {
TupleWritable that = (TupleWritable) other;
if (this.size() != that.size()) {
return false;
}
for (int i = 0; i < values.length; ++i) {
if (!has(i))
continue;
if (written[i] != that.written[i] || !values[i].equals(that.values[i])) {
return false;
}
}
return true;
}
return false;
}
public int hashCode() {
HashCodeBuilder builder = new HashCodeBuilder();
builder.append(written);
for (Writable v : values) {
builder.append(v);
}
return builder.toHashCode();
}
/**
* Convert Tuple to String as in the following.
* <tt>[<child1>,<child2>,...,<childn>]</tt>
*/
public String toString() {
StringBuffer buf = new StringBuffer("[");
for (int i = 0; i < values.length; ++i) {
if (has(i)) {
buf.append(values[i].toString());
}
buf.append(",");
}
if (values.length != 0)
buf.setCharAt(buf.length() - 1, ']');
else
buf.append(']');
return buf.toString();
}
public void clear() {
Arrays.fill(written, (byte) 0);
}
public void set(int index, Writable w) {
written[index] = getCode(w.getClass());
values[index] = w;
}
/**
* Writes each Writable to <code>out</code>.
*/
public void write(DataOutput out) throws IOException {
DataOutputBuffer tmp = new DataOutputBuffer();
WritableUtils.writeVInt(out, values.length);
for (int i = 0; i < values.length; ++i) {
WritableUtils.writeVInt(out, written[i]);
if (written[i] != 0) {
tmp.reset();
values[i].write(tmp);
WritableUtils.writeVInt(out, tmp.getLength());
out.write(tmp.getData(), 0, tmp.getLength());
}
}
}
/**
* {@inheritDoc}
*/
public void readFields(DataInput in) throws IOException {
int card = WritableUtils.readVInt(in);
values = new Writable[card];
written = new int[card];
for (int i = 0; i < card; ++i) {
written[i] = WritableUtils.readVInt(in);
if (written[i] != 0) {
values[i] = getWritable(written[i], getConf());
WritableUtils.readVInt(in); // skip "bodySize"
values[i].readFields(in);
}
}
}
static int getCode(Class<? extends Writable> clazz) {
if (Writables.WRITABLE_CODES.inverse().containsKey(clazz)) {
return Writables.WRITABLE_CODES.inverse().get(clazz);
} else {
return 1; // default for BytesWritable
}
}
static Writable getWritable(int code, Configuration conf) {
Class<? extends Writable> clazz = Writables.WRITABLE_CODES.get(code);
if (clazz != null) {
return WritableFactories.newInstance(clazz, conf);
} else {
throw new IllegalStateException("Unknown Writable code: " + code);
}
}
@Override
public int compareTo(TupleWritable that) {
for (int i = 0; i < Math.min(this.size(), that.size()); i++) {
if (!this.has(i) && !that.has(i)) {
continue;
}
if (this.has(i) && !that.has(i)) {
return 1;
}
if (!this.has(i) && that.has(i)) {
return -1;
}
if (this.written[i] != that.written[i]) {
return this.written[i] - that.written[i];
}
Writable v1 = this.values[i];
Writable v2 = that.values[i];
int cmp;
if (v1 instanceof WritableComparable && v2 instanceof WritableComparable) {
cmp = ((WritableComparable) v1).compareTo(v2);
} else {
cmp = v1.hashCode() - v2.hashCode();
}
if (cmp != 0) {
return cmp;
}
}
return this.size() - that.size();
}
public static class Comparator extends WritableComparator implements Configurable {
private static final Comparator INSTANCE = new Comparator();
public static Comparator getInstance() {
return INSTANCE;
}
public Comparator() {
super(TupleWritable.class);
}
@Override
public void setConf(Configuration conf) {
if (conf == null) return;
try {
Writables.reloadWritableComparableCodes(conf);
} catch (Exception e) {
throw new CrunchRuntimeException("Error reloading writable comparable codes", e);
}
}
@Override
public Configuration getConf() {
return null;
}
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
DataInputBuffer buffer1 = new DataInputBuffer();
DataInputBuffer buffer2 = new DataInputBuffer();
try {
buffer1.reset(b1, s1, l1);
buffer2.reset(b2, s2, l2);
int card1 = WritableUtils.readVInt(buffer1);
int card2 = WritableUtils.readVInt(buffer2);
int minCard = Math.min(card1, card2);
for (int i = 0; i < minCard; i++) {
int cmp = compareField(buffer1, buffer2);
if (cmp != 0) {
return cmp;
}
}
return card1 - card2;
} catch (IOException e) {
throw new CrunchRuntimeException(e);
}
}
private int compareField(DataInputBuffer buffer1, DataInputBuffer buffer2) throws IOException {
int written1 = WritableUtils.readVInt(buffer1);
int written2 = WritableUtils.readVInt(buffer2);
boolean hasValue1 = (written1 != 0);
boolean hasValue2 = (written2 != 0);
if (!hasValue1 && !hasValue2) {
return 0;
}
if (hasValue1 && !hasValue2) {
return 1;
}
if (!hasValue1 && hasValue2) {
return -1;
}
// both side have value
if (written1 != written2) {
return written1 - written2;
}
int bodySize1 = WritableUtils.readVInt(buffer1);
int bodySize2 = WritableUtils.readVInt(buffer2);
Class<? extends Writable> clazz = Writables.WRITABLE_CODES.get(written1);
if (WritableComparable.class.isAssignableFrom(clazz)) {
int cmp = WritableComparator.get(clazz.asSubclass(WritableComparable.class)).compare(
buffer1.getData(), buffer1.getPosition(), bodySize1,
buffer2.getData(), buffer2.getPosition(), bodySize2);
long skipped1 = buffer1.skip(bodySize1);
long skipped2 = buffer2.skip(bodySize2);
Preconditions.checkState(skipped1 == bodySize1);
Preconditions.checkState(skipped2 == bodySize2);
return cmp;
} else {
// fallback to deserialization
Writable w1 = ReflectionUtils.newInstance(clazz, null);
Writable w2 = ReflectionUtils.newInstance(clazz, null);
w1.readFields(buffer1);
w2.readFields(buffer2);
return w1.hashCode() - w2.hashCode();
}
}
@Override
public int compare(WritableComparable a, WritableComparable b) {
return super.compare(a, b);
}
}
static {
// Register the comparator to Hadoop. It will be used to perform fast comparison over buffers
// without any deserialization overhead.
WritableComparator.define(TupleWritable.class, Comparator.getInstance());
}
}
| 2,749 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/writable/WritableType.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.writable;
import java.io.IOException;
import java.util.List;
import com.google.common.base.Charsets;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.crunch.MapFn;
import org.apache.crunch.impl.mr.run.RuntimeParameters;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.ReadableSourceTarget;
import org.apache.crunch.io.seq.SeqFileSource;
import org.apache.crunch.io.seq.SeqFileSourceTarget;
import org.apache.crunch.io.text.NLineFileSource;
import org.apache.crunch.io.text.TextFileSource;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.DeepCopier;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class WritableType<T, W extends Writable> implements PType<T> {
private static final Logger LOG = LoggerFactory.getLogger(WritableType.class);
private final Class<T> typeClass;
private final Class<W> writableClass;
private final Converter converter;
private final MapFn<W, T> inputFn;
private final MapFn<T, W> outputFn;
private final DeepCopier<W> deepCopier;
private final List<PType> subTypes;
private boolean initialized = false;
/**
* Factory method for a new WritableType instance whose type class is immutable.
* <p/>
* No checking is done to ensure that instances of the type class are immutable, but deep copying will be skipped
* for instances denoted by the created PType.
*/
public static <T, W extends Writable> WritableType<T, W> immutableType(Class<T> typeClass, Class<W> writableClass,
MapFn<W, T> inputDoFn, MapFn<T, W> outputDoFn,
PType... subTypes) {
return new WritableType<T, W>(typeClass, writableClass, inputDoFn, outputDoFn,
null, subTypes);
}
public WritableType(Class<T> typeClass, Class<W> writableClass, MapFn<W, T> inputDoFn,
MapFn<T, W> outputDoFn, PType... subTypes) {
this(typeClass, writableClass, inputDoFn, outputDoFn, new WritableDeepCopier<W>(writableClass), subTypes);
}
private WritableType(Class<T> typeClass, Class<W> writableClass, MapFn<W, T> inputDoFn,
MapFn<T, W> outputDoFn, DeepCopier<W> deepCopier, PType... subTypes) {
this.typeClass = typeClass;
this.writableClass = writableClass;
this.inputFn = inputDoFn;
this.outputFn = outputDoFn;
this.converter = new WritableValueConverter(writableClass);
this.deepCopier = deepCopier;
this.subTypes = ImmutableList.<PType> builder().add(subTypes).build();
}
@Override
public PTypeFamily getFamily() {
return WritableTypeFamily.getInstance();
}
@Override
public Class<T> getTypeClass() {
return typeClass;
}
@Override
public Converter getConverter() {
return converter;
}
@Override
public MapFn getInputMapFn() {
return inputFn;
}
@Override
public MapFn getOutputMapFn() {
return outputFn;
}
@Override
public List<PType> getSubTypes() {
return subTypes;
}
public Class<W> getSerializationClass() {
return writableClass;
}
@Override
public ReadableSourceTarget<T> getDefaultFileSource(Path path) {
return new SeqFileSourceTarget<T>(path, this);
}
@Override
public ReadableSource<T> createSourceTarget(Configuration conf, Path path, Iterable<T> contents, int parallelism)
throws IOException {
FileSystem fs = FileSystem.get(conf);
outputFn.setConfiguration(conf);
outputFn.initialize();
if (Text.class.equals(writableClass) && parallelism > 1) {
FSDataOutputStream out = fs.create(path);
byte[] newLine = "\r\n".getBytes(Charsets.UTF_8);
double contentSize = 0;
for (T value : contents) {
Text txt = (Text) outputFn.map(value);
out.write(txt.toString().getBytes(Charsets.UTF_8));
out.write(newLine);
contentSize++;
}
out.close();
return new NLineFileSource<T>(path, this, (int) Math.ceil(contentSize / parallelism));
} else { // Use sequence files
fs.mkdirs(path);
List<SequenceFile.Writer> writers = Lists.newArrayListWithExpectedSize(parallelism);
for (int i = 0; i < parallelism; i++) {
Path out = new Path(path, "out" + i);
writers.add(SequenceFile.createWriter(fs, conf, out, NullWritable.class, writableClass));
}
int target = 0;
for (T value : contents) {
writers.get(target).append(NullWritable.get(), outputFn.map(value));
target = (target + 1) % parallelism;
}
for (SequenceFile.Writer writer : writers) {
writer.close();
}
ReadableSource<T> ret = new SeqFileSource<T>(path, this);
ret.inputConf(RuntimeParameters.DISABLE_COMBINE_FILE, "true");
return ret;
}
}
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof WritableType)) {
return false;
}
WritableType wt = (WritableType) obj;
return (typeClass.equals(wt.typeClass) && writableClass.equals(wt.writableClass) && subTypes
.equals(wt.subTypes));
}
@Override
public void initialize(Configuration conf) {
this.inputFn.setConfiguration(conf);
this.outputFn.setConfiguration(conf);
this.inputFn.initialize();
this.outputFn.initialize();
for (PType subType : subTypes) {
subType.initialize(conf);
}
this.initialized = true;
}
@Override
public T getDetachedValue(T value) {
if (deepCopier == null) {
return value;
}
if (!initialized) {
throw new IllegalStateException("Cannot call getDetachedValue on an uninitialized PType");
}
W writableValue = outputFn.map(value);
W deepCopy = this.deepCopier.deepCopy(writableValue);
return inputFn.map(deepCopy);
}
@Override
public int hashCode() {
HashCodeBuilder hcb = new HashCodeBuilder();
hcb.append(typeClass).append(writableClass).append(subTypes);
return hcb.toHashCode();
}
}
| 2,750 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/writable/WritableDeepCopier.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.writable;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.types.DeepCopier;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Writable;
/**
* Performs deep copies of Writable values.
*
* @param <T> The type of Writable that can be copied
*/
public class WritableDeepCopier<T extends Writable> implements DeepCopier<T> {
private Class<T> writableClass;
public WritableDeepCopier(Class<T> writableClass) {
this.writableClass = writableClass;
}
@Override
public void initialize(Configuration conf) {
}
@Override
public T deepCopy(T source) {
if (source == null) {
return null;
}
ByteArrayOutputStream byteOutStream = new ByteArrayOutputStream();
DataOutputStream dataOut = new DataOutputStream(byteOutStream);
T copiedValue = null;
try {
source.write(dataOut);
dataOut.flush();
ByteArrayInputStream byteInStream = new ByteArrayInputStream(byteOutStream.toByteArray());
DataInput dataInput = new DataInputStream(byteInStream);
copiedValue = writableClass.newInstance();
copiedValue.readFields(dataInput);
} catch (Exception e) {
throw new CrunchRuntimeException("Error while deep copying " + source, e);
}
return copiedValue;
}
}
| 2,751 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/writable/WritableTypeFamily.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.writable;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Map;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.crunch.Tuple;
import org.apache.crunch.Tuple3;
import org.apache.crunch.Tuple4;
import org.apache.crunch.TupleN;
import org.apache.crunch.Union;
import org.apache.crunch.types.PGroupedTableType;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.types.PTypeUtils;
import org.apache.hadoop.io.Writable;
/**
* The {@link Writable}-based implementation of the
* {@link org.apache.crunch.types.PTypeFamily} interface.
*/
public class WritableTypeFamily implements PTypeFamily {
private static final WritableTypeFamily INSTANCE = new WritableTypeFamily();
public static WritableTypeFamily getInstance() {
return INSTANCE;
}
// Disallow construction
private WritableTypeFamily() {
}
public PType<Void> nulls() {
return Writables.nulls();
}
public PType<String> strings() {
return Writables.strings();
}
public PType<Long> longs() {
return Writables.longs();
}
public PType<Integer> ints() {
return Writables.ints();
}
public PType<Float> floats() {
return Writables.floats();
}
public PType<Double> doubles() {
return Writables.doubles();
}
public PType<Boolean> booleans() {
return Writables.booleans();
}
public PType<ByteBuffer> bytes() {
return Writables.bytes();
}
public <T> PType<T> records(Class<T> clazz) {
return Writables.records(clazz);
}
public <W extends Writable> PType<W> writables(Class<W> clazz) {
return Writables.writables(clazz);
}
public <K, V> PTableType<K, V> tableOf(PType<K> key, PType<V> value) {
return Writables.tableOf(key, value);
}
public <V1, V2> PType<Pair<V1, V2>> pairs(PType<V1> p1, PType<V2> p2) {
return Writables.pairs(p1, p2);
}
public <V1, V2, V3> PType<Tuple3<V1, V2, V3>> triples(PType<V1> p1, PType<V2> p2, PType<V3> p3) {
return Writables.triples(p1, p2, p3);
}
public <V1, V2, V3, V4> PType<Tuple4<V1, V2, V3, V4>> quads(PType<V1> p1, PType<V2> p2, PType<V3> p3, PType<V4> p4) {
return Writables.quads(p1, p2, p3, p4);
}
public PType<TupleN> tuples(PType<?>... ptypes) {
return Writables.tuples(ptypes);
}
public <T> PType<Collection<T>> collections(PType<T> ptype) {
return Writables.collections(ptype);
}
public <T> PType<Map<String, T>> maps(PType<T> ptype) {
return Writables.maps(ptype);
}
@Override
public <T> PType<T> as(PType<T> ptype) {
if (ptype instanceof WritableType || ptype instanceof WritableTableType
|| ptype instanceof WritableGroupedTableType) {
return ptype;
}
if (ptype instanceof PGroupedTableType) {
PTableType ptt = ((PGroupedTableType) ptype).getTableType();
return new WritableGroupedTableType((WritableTableType) as(ptt));
}
PType<T> prim = Writables.getPrimitiveType(ptype.getTypeClass());
if (prim != null) {
return prim;
}
return PTypeUtils.convert(ptype, this);
}
@Override
public <T extends Tuple> PType<T> tuples(Class<T> clazz, PType<?>... ptypes) {
return Writables.tuples(clazz, ptypes);
}
@Override
public <S, T> PType<T> derived(Class<T> clazz, MapFn<S, T> inputFn, MapFn<T, S> outputFn, PType<S> base) {
return Writables.derived(clazz, inputFn, outputFn, base);
}
@Override
public <S, T> PType<T> derivedImmutable(Class<T> clazz, MapFn<S, T> inputFn, MapFn<T, S> outputFn, PType<S> base) {
return Writables.derivedImmutable(clazz, inputFn, outputFn, base);
}
@Override
public PType<Union> unionOf(PType<?>... ptypes) {
return Writables.unionOf(ptypes);
}
}
| 2,752 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/writable/WritableTableType.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.writable;
import java.io.IOException;
import java.util.List;
import com.google.common.collect.Lists;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.crunch.fn.PairMapFn;
import org.apache.crunch.impl.mr.run.RuntimeParameters;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.ReadableSourceTarget;
import org.apache.crunch.io.seq.SeqFileSource;
import org.apache.crunch.io.seq.SeqFileTableSource;
import org.apache.crunch.io.seq.SeqFileTableSourceTarget;
import org.apache.crunch.lib.PTables;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.PGroupedTableType;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Writable;
import com.google.common.collect.ImmutableList;
class WritableTableType<K, V> implements PTableType<K, V> {
private final WritableType<K, Writable> keyType;
private final WritableType<V, Writable> valueType;
private final MapFn inputFn;
private final MapFn outputFn;
private final Converter converter;
public WritableTableType(WritableType<K, Writable> keyType, WritableType<V, Writable> valueType) {
this.keyType = keyType;
this.valueType = valueType;
this.inputFn = new PairMapFn(keyType.getInputMapFn(), valueType.getInputMapFn());
this.outputFn = new PairMapFn(keyType.getOutputMapFn(), valueType.getOutputMapFn());
this.converter = new WritablePairConverter(keyType.getSerializationClass(),
valueType.getSerializationClass());
}
@Override
public Class<Pair<K, V>> getTypeClass() {
return (Class<Pair<K, V>>) Pair.of(null, null).getClass();
}
@Override
public List<PType> getSubTypes() {
return ImmutableList.<PType> of(keyType, valueType);
}
@Override
public MapFn getInputMapFn() {
return inputFn;
}
@Override
public MapFn getOutputMapFn() {
return outputFn;
}
@Override
public Converter getConverter() {
return converter;
}
@Override
public PTypeFamily getFamily() {
return WritableTypeFamily.getInstance();
}
public PType<K> getKeyType() {
return keyType;
}
public PType<V> getValueType() {
return valueType;
}
@Override
public PGroupedTableType<K, V> getGroupedTableType() {
return new WritableGroupedTableType<K, V>(this);
}
@Override
public ReadableSourceTarget<Pair<K, V>> getDefaultFileSource(Path path) {
return new SeqFileTableSourceTarget<K, V>(path, this);
}
@Override
public ReadableSource<Pair<K, V>> createSourceTarget(
Configuration conf, Path path, Iterable<Pair<K, V>> contents, int parallelism) throws IOException {
FileSystem fs = FileSystem.get(conf);
outputFn.setConfiguration(conf);
outputFn.initialize();
fs.mkdirs(path);
List<SequenceFile.Writer> writers = Lists.newArrayListWithExpectedSize(parallelism);
for (int i = 0; i < parallelism; i++) {
Path out = new Path(path, "out" + i);
writers.add(SequenceFile.createWriter(fs, conf, out, keyType.getSerializationClass(),
valueType.getSerializationClass()));
}
int target = 0;
for (Pair<K, V> value : contents) {
Pair writablePair = (Pair) outputFn.map(value);
writers.get(target).append(writablePair.first(), writablePair.second());
target = (target + 1) % parallelism;
}
for (SequenceFile.Writer writer : writers) {
writer.close();
}
ReadableSource<Pair<K, V>> ret = new SeqFileTableSource<K, V>(path, this);
ret.inputConf(RuntimeParameters.DISABLE_COMBINE_FILE, "true");
return ret;
}
@Override
public void initialize(Configuration conf) {
keyType.initialize(conf);
valueType.initialize(conf);
}
@Override
public Pair<K, V> getDetachedValue(Pair<K, V> value) {
return PTables.getDetachedValue(this, value);
}
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof WritableTableType)) {
return false;
}
WritableTableType that = (WritableTableType) obj;
return keyType.equals(that.keyType) && valueType.equals(that.valueType);
}
@Override
public int hashCode() {
HashCodeBuilder hcb = new HashCodeBuilder();
return hcb.append(keyType).append(valueType).toHashCode();
}
} | 2,753 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/writable/WritableGroupedTableType.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.writable;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.lib.PTables;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.PGroupedTableType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import java.io.IOException;
class WritableGroupedTableType<K, V> extends PGroupedTableType<K, V> {
private final MapFn inputFn;
private final MapFn outputFn;
private final Converter converter;
WritableGroupedTableType(WritableTableType<K, V> tableType) {
super(tableType);
WritableType keyType = (WritableType) tableType.getKeyType();
WritableType valueType = (WritableType) tableType.getValueType();
this.inputFn = new PairIterableMapFn(keyType.getInputMapFn(), valueType.getInputMapFn());
this.outputFn = tableType.getOutputMapFn();
this.converter = new WritablePairConverter(keyType.getSerializationClass(),
valueType.getSerializationClass());
}
@Override
public Class<Pair<K, Iterable<V>>> getTypeClass() {
return (Class<Pair<K, Iterable<V>>>) Pair.of(null, null).getClass();
}
@Override
public Converter getGroupingConverter() {
return converter;
}
@Override
public MapFn getInputMapFn() {
return inputFn;
}
@Override
public MapFn getOutputMapFn() {
return outputFn;
}
@Override
public void initialize(Configuration conf) {
this.tableType.initialize(conf);
}
@Override
public Pair<K, Iterable<V>> getDetachedValue(Pair<K, Iterable<V>> value) {
return PTables.getGroupedDetachedValue(this, value);
}
@Override
public ReadableSource<Pair<K, Iterable<V>>> createSourceTarget(
Configuration conf,
Path path,
Iterable<Pair<K, Iterable<V>>> contents,
int parallelism) throws IOException {
throw new UnsupportedOperationException("GroupedTableTypes do not support creating ReadableSources");
}
@Override
public void configureShuffle(Job job, GroupingOptions options) {
if (options != null) {
options.configure(job);
}
WritableType keyType = (WritableType) tableType.getKeyType();
WritableType valueType = (WritableType) tableType.getValueType();
job.setMapOutputKeyClass(keyType.getSerializationClass());
job.setMapOutputValueClass(valueType.getSerializationClass());
if ((options == null || options.getSortComparatorClass() == null) &&
TupleWritable.class.equals(keyType.getSerializationClass())) {
job.setSortComparatorClass(TupleWritable.Comparator.class);
}
}
}
| 2,754 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/writable/UnionWritable.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.writable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableUtils;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class UnionWritable implements WritableComparable<UnionWritable> {
private int index;
private BytesWritable value;
public UnionWritable() {
// no-arg constructor for writables
}
public UnionWritable(int index, BytesWritable value) {
this.index = index;
this.value = value;
}
public int getIndex() {
return index;
}
public BytesWritable getValue() {
return value;
}
@Override
public int compareTo(UnionWritable other) {
if (index == other.getIndex()) {
return value.compareTo(other.getValue());
}
return index - other.getIndex();
}
@Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, index);
value.write(out);
}
@Override
public void readFields(DataInput in) throws IOException {
this.index = WritableUtils.readVInt(in);
if (value == null) {
value = new BytesWritable();
}
value.readFields(in);
}
}
| 2,755 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/writable/Writables.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.writable;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;
import com.google.common.collect.ImmutableBiMap;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.crunch.Tuple;
import org.apache.crunch.Tuple3;
import org.apache.crunch.Tuple4;
import org.apache.crunch.TupleN;
import org.apache.crunch.Union;
import org.apache.crunch.fn.CompositeMapFn;
import org.apache.crunch.fn.IdentityFn;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypes;
import org.apache.crunch.types.TupleFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Defines static methods that are analogous to the methods defined in
* {@link WritableTypeFamily} for convenient static importing.
*
*/
public class Writables {
private static final Logger LOG = LoggerFactory.getLogger(Writables.class);
static BiMap<Integer, Class<? extends Writable>> WRITABLE_CODES = HashBiMap.create(ImmutableBiMap.<Integer, Class<? extends Writable>>builder()
.put(1, BytesWritable.class)
.put(2, Text.class)
.put(3, IntWritable.class)
.put(4, LongWritable.class)
.put(5, FloatWritable.class)
.put(6, DoubleWritable.class)
.put(7, BooleanWritable.class)
.put(8, TupleWritable.class)
.put(9, TextMapWritable.class)
.put(10, UnionWritable.class)
.build());
/**
* Registers a {@code WritableComparable} class so that it can be used for comparing the fields inside of
* tuple types (e.g., {@code pairs}, {@code trips}, {@code tupleN}, etc.) for use in sorts and
* secondary sorts.
*
* @param clazz The WritableComparable class to register
* @return the integer code that was assigned to serialized instances of this class
*/
public static void registerComparable(Class<? extends WritableComparable> clazz) {
int code = clazz.hashCode();
if (code < 0) {
code = -code;
}
if (code < WRITABLE_CODES.size()) {
code += WRITABLE_CODES.size();
}
registerComparable(clazz, code);
}
/**
* Registers a {@code WritableComparable} class with a given integer code to use for serializing
* and deserializing instances of this class that are defined inside of tuple types (e.g., {@code pairs},
* {@code trips}, {@code tupleN}, etc.) Unregistered Writables are always serialized to bytes and
* cannot be used in comparisons (e.g., sorts and secondary sorts) according to their underlying types.
*
* @param clazz The class to register
* @param code The unique registration code for the class, which must be greater than or equal to 8
*/
public static void registerComparable(Class<? extends WritableComparable> clazz, int code) {
if (WRITABLE_CODES.containsKey(code) && !clazz.equals(WRITABLE_CODES.get(code))) {
throw new IllegalArgumentException(String.format(
"Already have writable class %s assigned to code = %d",
clazz,
code));
}
WRITABLE_CODES.put(code, clazz);
}
private static final String WRITABLE_COMPARABLE_CODES = "crunch.writable.comparable.codes";
private static int WRITABLE_CODES_LOADED = 0;
static void serializeWritableComparableCodes(Configuration conf) throws IOException {
Map<Integer, String> codeToClassNameMap = Maps.transformValues(WRITABLE_CODES,
new Function<Class<? extends Writable>, String>() {
@Override
public String apply(Class<? extends Writable> input) {
return input.getName();
}
});
conf.set(WRITABLE_COMPARABLE_CODES, Joiner.on(';').withKeyValueSeparator(":").join(codeToClassNameMap));
}
static void reloadWritableComparableCodes(Configuration conf) throws Exception {
if (conf.get(WRITABLE_COMPARABLE_CODES) != null) {
String writableCodes = conf.get(WRITABLE_COMPARABLE_CODES);
if (writableCodes != null && writableCodes.hashCode() != WRITABLE_CODES_LOADED) {
Map<String, String> codeToClassName = Splitter.on(';').withKeyValueSeparator(":").split(writableCodes);
for (Map.Entry<String, String> codeToClassNameEntry : codeToClassName.entrySet()) {
WRITABLE_CODES.put(
Integer.parseInt(codeToClassNameEntry.getKey()),
(Class<? extends Writable>) Class.forName(codeToClassNameEntry.getValue()));
}
WRITABLE_CODES_LOADED = writableCodes.hashCode();
}
}
}
private static final MapFn<NullWritable, Void> NULL_WRITABLE_TO_VOID = new MapFn<NullWritable, Void>() {
@Override
public Void map(NullWritable input) {
return null;
}
};
private static final MapFn<Void, NullWritable> VOID_TO_NULL_WRITABLE = new MapFn<Void, NullWritable>() {
@Override
public NullWritable map(Void input) {
return NullWritable.get();
}
};
private static final MapFn<Text, String> TEXT_TO_STRING = new MapFn<Text, String>() {
@Override
public String map(Text input) {
return input.toString();
}
};
private static final MapFn<String, Text> STRING_TO_TEXT = new MapFn<String, Text>() {
@Override
public Text map(String input) {
return new Text(input);
}
};
private static final MapFn<IntWritable, Integer> IW_TO_INT = new MapFn<IntWritable, Integer>() {
@Override
public Integer map(IntWritable input) {
return input.get();
}
};
private static final MapFn<Integer, IntWritable> INT_TO_IW = new MapFn<Integer, IntWritable>() {
@Override
public IntWritable map(Integer input) {
return new IntWritable(input);
}
};
private static final MapFn<LongWritable, Long> LW_TO_LONG = new MapFn<LongWritable, Long>() {
@Override
public Long map(LongWritable input) {
return input.get();
}
};
private static final MapFn<Long, LongWritable> LONG_TO_LW = new MapFn<Long, LongWritable>() {
@Override
public LongWritable map(Long input) {
return new LongWritable(input);
}
};
private static final MapFn<FloatWritable, Float> FW_TO_FLOAT = new MapFn<FloatWritable, Float>() {
@Override
public Float map(FloatWritable input) {
return input.get();
}
};
private static final MapFn<Float, FloatWritable> FLOAT_TO_FW = new MapFn<Float, FloatWritable>() {
@Override
public FloatWritable map(Float input) {
return new FloatWritable(input);
}
};
private static final MapFn<DoubleWritable, Double> DW_TO_DOUBLE = new MapFn<DoubleWritable, Double>() {
@Override
public Double map(DoubleWritable input) {
return input.get();
}
};
private static final MapFn<Double, DoubleWritable> DOUBLE_TO_DW = new MapFn<Double, DoubleWritable>() {
@Override
public DoubleWritable map(Double input) {
return new DoubleWritable(input);
}
};
private static final MapFn<BooleanWritable, Boolean> BW_TO_BOOLEAN = new MapFn<BooleanWritable, Boolean>() {
@Override
public Boolean map(BooleanWritable input) {
return input.get();
}
};
private static final BooleanWritable TRUE = new BooleanWritable(true);
private static final BooleanWritable FALSE = new BooleanWritable(false);
private static final MapFn<Boolean, BooleanWritable> BOOLEAN_TO_BW = new MapFn<Boolean, BooleanWritable>() {
@Override
public BooleanWritable map(Boolean input) {
return input ? TRUE : FALSE;
}
};
private static final MapFn<BytesWritable, ByteBuffer> BW_TO_BB = new MapFn<BytesWritable, ByteBuffer>() {
@Override
public ByteBuffer map(BytesWritable input) {
return ByteBuffer.wrap(input.getBytes(), 0, input.getLength());
}
};
private static final MapFn<ByteBuffer, BytesWritable> BB_TO_BW = new MapFn<ByteBuffer, BytesWritable>() {
@Override
public BytesWritable map(ByteBuffer input) {
BytesWritable bw = new BytesWritable();
bw.set(input.array(), input.arrayOffset(), input.limit());
return bw;
}
};
private static final WritableType<Void, NullWritable> nulls = WritableType.immutableType(
Void.class, NullWritable.class, NULL_WRITABLE_TO_VOID, VOID_TO_NULL_WRITABLE);
private static final WritableType<String, Text> strings = WritableType.immutableType(
String.class, Text.class, TEXT_TO_STRING, STRING_TO_TEXT);
private static final WritableType<Long, LongWritable> longs = WritableType.immutableType(
Long.class, LongWritable.class, LW_TO_LONG, LONG_TO_LW);
private static final WritableType<Integer, IntWritable> ints = WritableType.immutableType(
Integer.class, IntWritable.class, IW_TO_INT, INT_TO_IW);
private static final WritableType<Float, FloatWritable> floats = WritableType.immutableType(
Float.class, FloatWritable.class, FW_TO_FLOAT, FLOAT_TO_FW);
private static final WritableType<Double, DoubleWritable> doubles = WritableType.immutableType(
Double.class, DoubleWritable.class, DW_TO_DOUBLE, DOUBLE_TO_DW);
private static final WritableType<Boolean, BooleanWritable> booleans = WritableType.immutableType(
Boolean.class, BooleanWritable.class, BW_TO_BOOLEAN, BOOLEAN_TO_BW);
private static final WritableType<ByteBuffer, BytesWritable> bytes = new WritableType(
ByteBuffer.class, BytesWritable.class, BW_TO_BB, BB_TO_BW);
private static final Map<Class<?>, PType<?>> PRIMITIVES = ImmutableMap.<Class<?>, PType<?>> builder()
.put(String.class, strings).put(Long.class, longs).put(Integer.class, ints).put(Float.class, floats)
.put(Double.class, doubles).put(Boolean.class, booleans).put(ByteBuffer.class, bytes).build();
private static final Map<Class<?>, WritableType<?, ?>> EXTENSIONS = Maps.newHashMap();
public static <T> PType<T> getPrimitiveType(Class<T> clazz) {
return (PType<T>) PRIMITIVES.get(clazz);
}
public static <T> void register(Class<T> clazz, WritableType<T, ? extends Writable> ptype) {
EXTENSIONS.put(clazz, ptype);
}
public static final WritableType<Void, NullWritable> nulls() {
return nulls;
}
public static final WritableType<String, Text> strings() {
return strings;
}
public static final WritableType<Long, LongWritable> longs() {
return longs;
}
public static final WritableType<Integer, IntWritable> ints() {
return ints;
}
public static final WritableType<Float, FloatWritable> floats() {
return floats;
}
public static final WritableType<Double, DoubleWritable> doubles() {
return doubles;
}
public static final WritableType<Boolean, BooleanWritable> booleans() {
return booleans;
}
public static final WritableType<ByteBuffer, BytesWritable> bytes() {
return bytes;
}
public static final <T, W extends Writable> WritableType<T, W> records(Class<T> clazz) {
if (EXTENSIONS.containsKey(clazz)) {
return (WritableType<T, W>) EXTENSIONS.get(clazz);
}
if (Writable.class.isAssignableFrom(clazz)) {
return (WritableType<T, W>) writables(clazz.asSubclass(Writable.class));
} else {
throw new IllegalArgumentException(
"Cannot create Writable records from non-Writable class"+ clazz.getCanonicalName());
}
}
public static <W extends Writable> WritableType<W, W> writables(Class<W> clazz) {
MapFn wIdentity = IdentityFn.getInstance();
return new WritableType(clazz, clazz, wIdentity, wIdentity);
}
public static <K, V> WritableTableType<K, V> tableOf(PType<K> key, PType<V> value) {
if (key instanceof WritableTableType) {
WritableTableType wtt = (WritableTableType) key;
key = pairs(wtt.getKeyType(), wtt.getValueType());
} else if (!(key instanceof WritableType)) {
throw new IllegalArgumentException("Key type must be of class WritableType");
}
if (value instanceof WritableTableType) {
WritableTableType wtt = (WritableTableType) value;
value = pairs(wtt.getKeyType(), wtt.getValueType());
} else if (!(value instanceof WritableType)) {
throw new IllegalArgumentException("Value type must be of class WritableType");
}
return new WritableTableType((WritableType) key, (WritableType) value);
}
private static BytesWritable asBytesWritable(Writable w) {
if (w instanceof BytesWritable) {
return (BytesWritable) w;
} else {
return new BytesWritable(WritableUtils.toByteArray(w));
}
}
private static <W extends Writable> W create(Class<W> clazz, Writable writable) {
if (clazz.equals(writable.getClass())) {
return (W) writable;
} else {
W instance = (W) WritableFactories.newInstance(clazz);
BytesWritable bytes = (BytesWritable) writable;
try {
instance.readFields(new DataInputStream(new ByteArrayInputStream(bytes.getBytes())));
} catch (IOException e) {
throw new CrunchRuntimeException(e);
}
return instance;
}
}
/**
* For mapping from {@link TupleWritable} instances to {@link Tuple}s.
*
*/
private static class TWTupleMapFn extends MapFn<TupleWritable, Tuple> {
private final TupleFactory<?> tupleFactory;
private final List<MapFn> fns;
private final List<Class<Writable>> writableClasses;
private transient Object[] values;
public TWTupleMapFn(TupleFactory<?> tupleFactory, WritableType<?, ?>... ptypes) {
this.tupleFactory = tupleFactory;
this.fns = Lists.newArrayList();
this.writableClasses = Lists.newArrayList();
for (WritableType ptype : ptypes) {
fns.add(ptype.getInputMapFn());
Class<Writable> clazz = ptype.getSerializationClass();
if (WritableComparable.class.isAssignableFrom(clazz)) {
if (!WRITABLE_CODES.inverse().containsKey(clazz)) {
LOG.warn("WritableComparable class {} in tuple type should be registered with Writables.registerComparable",
clazz.toString());
}
}
writableClasses.add(clazz);
}
}
@Override
public void configure(Configuration conf) {
try {
serializeWritableComparableCodes(conf);
} catch (IOException e) {
throw new CrunchRuntimeException("Error serializing writable comparable codes", e);
}
for (MapFn fn : fns) {
fn.configure(conf);
}
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
for (MapFn fn : fns) {
fn.setContext(context);
}
}
@Override
public void initialize() {
for (MapFn fn : fns) {
fn.initialize();
}
// The rest of the methods allocate new
// objects each time. However this one
// uses Tuple.tuplify which does a copy
this.values = new Object[fns.size()];
tupleFactory.initialize();
}
@Override
public Tuple map(TupleWritable in) {
for (int i = 0; i < values.length; i++) {
if (in.has(i)) {
Writable w = create(writableClasses.get(i), in.get(i));
values[i] = fns.get(i).map(w);
} else {
values[i] = null;
}
}
return tupleFactory.makeTuple(values);
}
}
/**
* For mapping from {@code Tuple}s to {@code TupleWritable}s.
*
*/
private static class TupleTWMapFn extends MapFn<Tuple, TupleWritable> {
private final List<MapFn> fns;
private transient int[] written;
private transient Writable[] values;
public TupleTWMapFn(PType<?>... ptypes) {
this.fns = Lists.newArrayList();
for (PType<?> ptype : ptypes) {
fns.add(ptype.getOutputMapFn());
}
this.written = new int[fns.size()];
this.values = new Writable[fns.size()];
}
@Override
public void configure(Configuration conf) {
try {
serializeWritableComparableCodes(conf);
} catch (IOException e) {
throw new CrunchRuntimeException("Error serializing writable comparable codes", e);
}
for (MapFn fn : fns) {
fn.configure(conf);
}
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
for (MapFn fn : fns) {
fn.setContext(context);
}
}
@Override
public void initialize() {
for (MapFn fn : fns) {
fn.initialize();
}
this.written = new int[fns.size()];
this.values = new Writable[fns.size()];
}
@Override
public TupleWritable map(Tuple input) {
Arrays.fill(written, (byte) 0);
Arrays.fill(values, null);
for (int i = 0; i < input.size(); i++) {
Object value = input.get(i);
if (value != null) {
Writable w = (Writable) fns.get(i).map(value);
if (WRITABLE_CODES.inverse().containsKey(w.getClass())) {
values[i] = w;
written[i] = WRITABLE_CODES.inverse().get(w.getClass());
} else {
values[i] = asBytesWritable(w);
written[i] = 1; // code for BytesWritable
}
}
}
return new TupleWritable(values, written);
}
}
public static <V1, V2> WritableType<Pair<V1, V2>, TupleWritable> pairs(PType<V1> p1, PType<V2> p2) {
TWTupleMapFn input = new TWTupleMapFn(TupleFactory.PAIR, (WritableType) p1, (WritableType) p2);
TupleTWMapFn output = new TupleTWMapFn(p1, p2);
return new WritableType(Pair.class, TupleWritable.class, input, output, p1, p2);
}
public static <V1, V2, V3> WritableType<Tuple3<V1, V2, V3>, TupleWritable> triples(PType<V1> p1, PType<V2> p2,
PType<V3> p3) {
TWTupleMapFn input = new TWTupleMapFn(TupleFactory.TUPLE3, (WritableType) p1,
(WritableType) p2, (WritableType) p3);
TupleTWMapFn output = new TupleTWMapFn(p1, p2, p3);
return new WritableType(Tuple3.class, TupleWritable.class, input, output, p1, p2, p3);
}
public static <V1, V2, V3, V4> WritableType<Tuple4<V1, V2, V3, V4>, TupleWritable> quads(PType<V1> p1, PType<V2> p2,
PType<V3> p3, PType<V4> p4) {
TWTupleMapFn input = new TWTupleMapFn(TupleFactory.TUPLE4, (WritableType) p1,
(WritableType) p2, (WritableType) p3, (WritableType) p4);
TupleTWMapFn output = new TupleTWMapFn(p1, p2, p3, p4);
return new WritableType(Tuple4.class, TupleWritable.class, input, output, p1, p2, p3, p4);
}
public static WritableType<TupleN, TupleWritable> tuples(PType... ptypes) {
WritableType[] wt = new WritableType[ptypes.length];
for (int i = 0; i < wt.length; i++) {
wt[i] = (WritableType) ptypes[i];
}
TWTupleMapFn input = new TWTupleMapFn(TupleFactory.TUPLEN, wt);
TupleTWMapFn output = new TupleTWMapFn(ptypes);
return new WritableType(TupleN.class, TupleWritable.class, input, output, ptypes);
}
public static <T extends Tuple> PType<T> tuples(Class<T> clazz, PType... ptypes) {
Class[] typeArgs = new Class[ptypes.length];
WritableType[] wt = new WritableType[ptypes.length];
for (int i = 0; i < typeArgs.length; i++) {
typeArgs[i] = ptypes[i].getTypeClass();
wt[i] = (WritableType) ptypes[i];
}
TupleFactory<T> factory = TupleFactory.create(clazz, typeArgs);
TWTupleMapFn input = new TWTupleMapFn(factory, wt);
TupleTWMapFn output = new TupleTWMapFn(ptypes);
return new WritableType(clazz, TupleWritable.class, input, output, ptypes);
}
/**
* For mapping from {@link TupleWritable} instances to {@link Tuple}s.
*
*/
private static class UWInputFn extends MapFn<UnionWritable, Union> {
private final List<MapFn> fns;
private final List<Class<Writable>> writableClasses;
public UWInputFn(WritableType<?, ?>... ptypes) {
this.fns = Lists.newArrayList();
this.writableClasses = Lists.newArrayList();
for (WritableType ptype : ptypes) {
fns.add(ptype.getInputMapFn());
writableClasses.add(ptype.getSerializationClass());
}
}
@Override
public void configure(Configuration conf) {
for (MapFn fn : fns) {
fn.configure(conf);
}
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
for (MapFn fn : fns) {
fn.setContext(context);
}
}
@Override
public void initialize() {
for (MapFn fn : fns) {
fn.initialize();
}
}
@Override
public Union map(UnionWritable in) {
int index = in.getIndex();
Writable w = create(writableClasses.get(index), in.getValue());
return new Union(index, fns.get(index).map(w));
}
}
/**
* For mapping from {@code Tuple}s to {@code TupleWritable}s.
*
*/
private static class UWOutputFn extends MapFn<Union, UnionWritable> {
private final List<MapFn> fns;
public UWOutputFn(PType<?>... ptypes) {
this.fns = Lists.newArrayList();
for (PType<?> ptype : ptypes) {
fns.add(ptype.getOutputMapFn());
}
}
@Override
public void configure(Configuration conf) {
for (MapFn fn : fns) {
fn.configure(conf);
}
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
for (MapFn fn : fns) {
fn.setContext(context);
}
}
@Override
public void initialize() {
for (MapFn fn : fns) {
fn.initialize();
}
}
@Override
public UnionWritable map(Union input) {
int index = input.getIndex();
Writable w = (Writable) fns.get(index).map(input.getValue());
return new UnionWritable(index, asBytesWritable(w));
}
}
public static PType<Union> unionOf(PType<?>... ptypes) {
WritableType[] wt = new WritableType[ptypes.length];
for (int i = 0; i < wt.length; i++) {
wt[i] = (WritableType) ptypes[i];
}
UWInputFn input= new UWInputFn(wt);
UWOutputFn output = new UWOutputFn(ptypes);
return new WritableType(Union.class, UnionWritable.class, input, output, ptypes);
}
public static <S, T> PType<T> derived(Class<T> clazz, MapFn<S, T> inputFn, MapFn<T, S> outputFn, PType<S> base) {
WritableType<S, ?> wt = (WritableType<S, ?>) base;
MapFn input = new CompositeMapFn(wt.getInputMapFn(), inputFn);
MapFn output = new CompositeMapFn(outputFn, wt.getOutputMapFn());
return new WritableType(clazz, wt.getSerializationClass(), input, output, base.getSubTypes().toArray(new PType[0]));
}
public static <S, T> PType<T> derivedImmutable(Class<T> clazz, MapFn<S, T> inputFn, MapFn<T, S> outputFn, PType<S> base) {
WritableType<S, ?> wt = (WritableType<S, ?>) base;
MapFn input = new CompositeMapFn(wt.getInputMapFn(), inputFn);
MapFn output = new CompositeMapFn(outputFn, wt.getOutputMapFn());
return WritableType.immutableType(clazz, wt.getSerializationClass(), input, output, base.getSubTypes().toArray(new PType[0]));
}
private static class ArrayCollectionMapFn<T> extends MapFn<GenericArrayWritable, Collection<T>> {
private Class<Writable> clazz;
private final MapFn<Object, T> mapFn;
public ArrayCollectionMapFn(Class<Writable> clazz, MapFn<Object, T> mapFn) {
this.clazz = clazz;
this.mapFn = mapFn;
}
@Override
public void configure(Configuration conf) {
mapFn.configure(conf);
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
mapFn.setContext(context);
}
@Override
public void initialize() {
mapFn.initialize();
}
@Override
public Collection<T> map(GenericArrayWritable input) {
Collection<T> collection = Lists.newArrayList();
for (BytesWritable raw : input.get()) {
Writable w = create(clazz, raw);
collection.add(mapFn.map(w));
}
return collection;
}
}
private static class CollectionArrayMapFn<T> extends MapFn<Collection<T>, GenericArrayWritable> {
private final MapFn<T, Object> mapFn;
public CollectionArrayMapFn(MapFn<T, Object> mapFn) {
this.mapFn = mapFn;
}
@Override
public void configure(Configuration conf) {
mapFn.configure(conf);
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
mapFn.setContext(context);
}
@Override
public void initialize() {
mapFn.initialize();
}
@Override
public GenericArrayWritable map(Collection<T> input) {
GenericArrayWritable arrayWritable = new GenericArrayWritable();
BytesWritable[] w = new BytesWritable[input.size()];
int index = 0;
for (T in : input) {
w[index++] = asBytesWritable((Writable) mapFn.map(in));
}
arrayWritable.set(w);
return arrayWritable;
}
}
public static <T> WritableType<Collection<T>, GenericArrayWritable> collections(PType<T> ptype) {
WritableType<T, ?> wt = (WritableType<T, ?>) ptype;
return new WritableType(Collection.class, GenericArrayWritable.class,
new ArrayCollectionMapFn(wt.getSerializationClass(), wt.getInputMapFn()),
new CollectionArrayMapFn(wt.getOutputMapFn()), ptype);
}
private static class MapInputMapFn<T> extends MapFn<TextMapWritable, Map<String, T>> {
private final Class<Writable> clazz;
private final MapFn<Writable, T> mapFn;
public MapInputMapFn(Class<Writable> clazz, MapFn<Writable, T> mapFn) {
this.clazz = clazz;
this.mapFn = mapFn;
}
@Override
public void configure(Configuration conf) {
mapFn.configure(conf);
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
mapFn.setContext(context);
}
@Override
public void initialize() {
mapFn.initialize();
}
@Override
public Map<String, T> map(TextMapWritable input) {
Map<String, T> out = Maps.newHashMap();
for (Map.Entry<Text, BytesWritable> e : input.entrySet()) {
Writable v = create(clazz, e.getValue());
out.put(e.getKey().toString(), mapFn.map(v));
}
return out;
}
}
private static class MapOutputMapFn<T> extends MapFn<Map<String, T>, TextMapWritable> {
private final MapFn<T, Writable> mapFn;
public MapOutputMapFn(MapFn<T, Writable> mapFn) {
this.mapFn = mapFn;
}
@Override
public void configure(Configuration conf) {
mapFn.configure(conf);
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
mapFn.setContext(context);
}
@Override
public void initialize() {
mapFn.initialize();
}
@Override
public TextMapWritable map(Map<String, T> input) {
TextMapWritable tmw = new TextMapWritable();
for (Map.Entry<String, T> e : input.entrySet()) {
Writable w = mapFn.map(e.getValue());
tmw.put(new Text(e.getKey()), asBytesWritable(w));
}
return tmw;
}
}
public static <T> WritableType<Map<String, T>, MapWritable> maps(PType<T> ptype) {
WritableType<T, ?> wt = (WritableType<T, ?>) ptype;
return new WritableType(Map.class, TextMapWritable.class,
new MapInputMapFn(wt.getSerializationClass(), wt.getInputMapFn()),
new MapOutputMapFn(wt.getOutputMapFn()), ptype);
}
public static <T> PType<T> jsons(Class<T> clazz) {
return PTypes.jsonString(clazz, WritableTypeFamily.getInstance());
}
// Not instantiable
private Writables() {
}
}
| 2,756 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/writable/package-info.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Business object serialization using Hadoop's Writables framework.
*/
package org.apache.crunch.types.writable;
| 2,757 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/writable/GenericArrayWritable.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
/**
* A {@link Writable} for marshalling/unmarshalling Collections. Note that
* element order is <em>undefined</em>!
*
*/
class GenericArrayWritable implements Writable {
private BytesWritable[] values;
private Class<? extends Writable> valueClass;
public GenericArrayWritable(Class<? extends Writable> valueClass) {
this.valueClass = valueClass;
}
public GenericArrayWritable() {
// for deserialization
}
public void set(BytesWritable[] values) {
this.values = values;
}
public BytesWritable[] get() {
return values;
}
public void readFields(DataInput in) throws IOException {
values = new BytesWritable[WritableUtils.readVInt(in)]; // construct values
if (values.length > 0) {
int nulls = WritableUtils.readVInt(in);
if (nulls == values.length) {
return;
}
for (int i = 0; i < values.length - nulls; i++) {
BytesWritable value = new BytesWritable();
value.readFields(in); // read a value
values[i] = value; // store it in values
}
}
}
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, values.length);
if (values.length > 0) {
int nulls = 0;
for (int i = 0; i < values.length; i++) {
if (values[i] == null) {
nulls++;
}
}
WritableUtils.writeVInt(out, nulls);
if (values.length - nulls > 0) {
for (int i = 0; i < values.length; i++) {
if (values[i] != null) {
values[i].write(out);
}
}
}
}
}
@Override
public int hashCode() {
HashCodeBuilder hcb = new HashCodeBuilder();
return hcb.append(values).toHashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
GenericArrayWritable other = (GenericArrayWritable) obj;
if (!Arrays.equals(values, other.values))
return false;
return true;
}
@Override
public String toString() {
return Arrays.toString(values);
}
}
| 2,758 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/AvroKeyConverter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroWrapper;
import org.apache.crunch.types.Converter;
import org.apache.hadoop.io.NullWritable;
class AvroKeyConverter<K> implements Converter<AvroWrapper<K>, NullWritable, K, Iterable<K>> {
@Override
public K convertInput(AvroWrapper<K> key, NullWritable value) {
return key.datum();
}
@Override
public AvroWrapper<K> outputKey(K value) {
return new AvroKey<K>(value);
}
@Override
public NullWritable outputValue(K value) {
return NullWritable.get();
}
@Override
public Class<AvroWrapper<K>> getKeyClass() {
return (Class<AvroWrapper<K>>) (Class) AvroKey.class;
}
@Override
public Class<NullWritable> getValueClass() {
return NullWritable.class;
}
@Override
public boolean applyPTypeTransforms() {
return true;
}
@Override
public Iterable<K> convertIterableInput(AvroWrapper<K> key, Iterable<NullWritable> value) {
throw new UnsupportedOperationException("Should not be possible");
}
}
| 2,759 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/SafeAvroSerialization.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.avro.Schema;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.mapred.AvroJob;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.avro.mapred.AvroWrapper;
import org.apache.avro.mapred.Pair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.Serialization;
import org.apache.hadoop.io.serializer.Serializer;
/** The {@link Serialization} used by jobs configured with {@link AvroJob}. */
class SafeAvroSerialization<T> extends Configured implements Serialization<AvroWrapper<T>> {
public boolean accept(Class<?> c) {
return AvroWrapper.class.isAssignableFrom(c);
}
/**
* Returns the specified map output deserializer. Defaults to the final output
* deserializer if no map output schema was specified.
*/
public Deserializer<AvroWrapper<T>> getDeserializer(Class<AvroWrapper<T>> c) {
boolean isKey = AvroKey.class.isAssignableFrom(c);
Configuration conf = getConf();
Schema schema = isKey ? Pair.getKeySchema(AvroJob.getMapOutputSchema(conf)) : Pair.getValueSchema(AvroJob
.getMapOutputSchema(conf));
DatumReader<T> datumReader = null;
if (conf.getBoolean(AvroJob.MAP_OUTPUT_IS_REFLECT, false)) {
datumReader = AvroMode.REFLECT.withFactoryFromConfiguration(conf).getReader(schema);
} else {
datumReader = AvroMode.fromShuffleConfiguration(conf).getReader(schema);
}
return new AvroWrapperDeserializer(datumReader, isKey);
}
private static final DecoderFactory FACTORY = DecoderFactory.get();
private class AvroWrapperDeserializer implements Deserializer<AvroWrapper<T>> {
private DatumReader<T> reader;
private BinaryDecoder decoder;
private boolean isKey;
public AvroWrapperDeserializer(DatumReader<T> reader, boolean isKey) {
this.reader = reader;
this.isKey = isKey;
}
public void open(InputStream in) {
this.decoder = FACTORY.directBinaryDecoder(in, decoder);
}
public AvroWrapper<T> deserialize(AvroWrapper<T> wrapper) throws IOException {
T datum = reader.read(wrapper == null ? null : wrapper.datum(), decoder);
if (wrapper == null) {
wrapper = isKey ? new AvroKey<T>(datum) : new AvroValue<T>(datum);
} else {
wrapper.datum(datum);
}
return wrapper;
}
public void close() throws IOException {
decoder.inputStream().close();
}
}
/** Returns the specified output serializer. */
public Serializer<AvroWrapper<T>> getSerializer(Class<AvroWrapper<T>> c) {
// AvroWrapper used for final output, AvroKey or AvroValue for map output
boolean isFinalOutput = c.equals(AvroWrapper.class);
Configuration conf = getConf();
Schema schema = isFinalOutput ? AvroJob.getOutputSchema(conf) : (AvroKey.class.isAssignableFrom(c) ? Pair
.getKeySchema(AvroJob.getMapOutputSchema(conf)) : Pair.getValueSchema(AvroJob.getMapOutputSchema(conf)));
ReaderWriterFactory factory = AvroMode.fromShuffleConfiguration(conf);
DatumWriter<T> writer = factory.getWriter(schema);
return new AvroWrapperSerializer(writer);
}
private class AvroWrapperSerializer implements Serializer<AvroWrapper<T>> {
private DatumWriter<T> writer;
private OutputStream out;
private BinaryEncoder encoder;
public AvroWrapperSerializer(DatumWriter<T> writer) {
this.writer = writer;
}
public void open(OutputStream out) {
this.out = out;
this.encoder = new EncoderFactory().directBinaryEncoder(out, null);
}
public void serialize(AvroWrapper<T> wrapper) throws IOException {
writer.write(wrapper.datum(), encoder);
// would be a lot faster if the Serializer interface had a flush()
// method and the Hadoop framework called it when needed rather
// than for every record.
encoder.flush();
}
public void close() throws IOException {
out.close();
}
}
}
| 2,760 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/ReflectDataFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import org.apache.avro.Schema;
import org.apache.avro.reflect.ReflectData;
import org.apache.avro.reflect.ReflectDatumReader;
import org.apache.avro.reflect.ReflectDatumWriter;
/**
* A Factory class for constructing Avro reflection-related objects.
*/
public class ReflectDataFactory implements ReaderWriterFactory {
@Override
public ReflectData getData() {
return ReflectData.AllowNull.get();
}
@Override
public <T> ReflectDatumReader<T> getReader(Schema schema) {
return new ReflectDatumReader<T>(schema);
}
@Override
public <T> ReflectDatumWriter<T> getWriter(Schema schema) {
return new ReflectDatumWriter<T>(schema);
}
}
| 2,761 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/AvroType.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import java.io.IOException;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericData;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.specific.SpecificRecord;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.crunch.MapFn;
import org.apache.crunch.fn.IdentityFn;
import org.apache.crunch.impl.mr.run.RuntimeParameters;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.ReadableSourceTarget;
import org.apache.crunch.io.avro.AvroFileSource;
import org.apache.crunch.io.avro.AvroFileSourceTarget;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.DeepCopier;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
/**
* The implementation of the PType interface for Avro-based serialization.
*
*/
public class AvroType<T> implements PType<T> {
public enum AvroRecordType {
REFLECT,
SPECIFIC,
GENERIC
}
private static final Logger LOG = LoggerFactory.getLogger(AvroType.class);
private static final Converter AVRO_CONVERTER = new AvroKeyConverter();
private final Class<T> typeClass;
private final String schemaString;
private transient Schema schema;
private final MapFn baseInputMapFn;
private final MapFn baseOutputMapFn;
private final List<PType> subTypes;
private AvroRecordType recordType;
private DeepCopier<T> deepCopier;
private transient boolean initialized = false;
public AvroType(Class<T> typeClass, Schema schema, DeepCopier<T> deepCopier, PType... ptypes) {
this(typeClass, schema, IdentityFn.getInstance(), IdentityFn.getInstance(), deepCopier, null, ptypes);
}
public AvroType(Class<T> typeClass, Schema schema, MapFn inputMapFn, MapFn outputMapFn,
DeepCopier<T> deepCopier, AvroRecordType recordType, PType... ptypes) {
this.typeClass = typeClass;
this.schema = Preconditions.checkNotNull(schema);
this.schemaString = schema.toString();
this.baseInputMapFn = inputMapFn;
this.baseOutputMapFn = outputMapFn;
this.deepCopier = deepCopier;
this.subTypes = Lists.newArrayList(ptypes);
this.recordType = recordType;
}
private AvroRecordType determineRecordType() {
if (checkReflect()) {
return AvroRecordType.REFLECT;
} else if (checkSpecific()) {
return AvroRecordType.SPECIFIC;
}
return AvroRecordType.GENERIC;
}
public AvroRecordType getRecordType() {
if (recordType == null) {
recordType = determineRecordType();
}
return recordType;
}
@Override
public Class<T> getTypeClass() {
return typeClass;
}
@Override
public PTypeFamily getFamily() {
return AvroTypeFamily.getInstance();
}
@Override
public List<PType> getSubTypes() {
return Lists.<PType> newArrayList(subTypes);
}
public Schema getSchema() {
if (schema == null) {
schema = new Schema.Parser().parse(schemaString);
}
return schema;
}
/**
* Determine if the wrapped type is a specific data avro type or wraps one.
*
* @return true if the wrapped type is a specific data type or wraps one
*/
public boolean hasSpecific() {
return getRecordType() == AvroRecordType.SPECIFIC;
}
private boolean checkSpecific() {
if (Avros.isPrimitive(typeClass)) {
return false;
}
if (!subTypes.isEmpty()) {
for (PType<?> subType : subTypes) {
if (((AvroType<?>) subType).hasSpecific()) {
return true;
}
}
return false;
}
return SpecificRecord.class.isAssignableFrom(typeClass);
}
/**
* Determine if the wrapped type is a generic data avro type.
*
* @return true if the wrapped type is a generic type
*/
public boolean isGeneric() {
return GenericData.Record.class.equals(typeClass);
}
/**
* Determine if the wrapped type is a reflection-based avro type or wraps one.
*
* @return true if the wrapped type is a reflection-based type or wraps one.
*/
public boolean hasReflect() {
return getRecordType() == AvroRecordType.REFLECT;
}
private boolean checkReflect() {
if (Avros.isPrimitive(typeClass)) {
return false;
}
if (!subTypes.isEmpty()) {
for (PType<?> subType : subTypes) {
if (((AvroType<?>) subType).hasReflect()) {
return true;
}
}
return false;
}
return !(typeClass.equals(GenericData.Record.class) || SpecificRecord.class
.isAssignableFrom(typeClass));
}
public MapFn<Object, T> getInputMapFn() {
return baseInputMapFn;
}
public MapFn<T, Object> getOutputMapFn() {
return baseOutputMapFn;
}
@Override
public Converter getConverter() {
return AVRO_CONVERTER;
}
@Override
public ReadableSourceTarget<T> getDefaultFileSource(Path path) {
return new AvroFileSourceTarget<T>(path, this);
}
@Override
public ReadableSource<T> createSourceTarget(Configuration conf, Path path, Iterable<T> contents, int parallelism)
throws IOException {
FileSystem fs = FileSystem.get(conf);
baseOutputMapFn.setConfiguration(conf);
baseOutputMapFn.initialize();
fs.mkdirs(path);
List<FSDataOutputStream> streams = Lists.newArrayListWithExpectedSize(parallelism);
List<DataFileWriter> writers = Lists.newArrayListWithExpectedSize(parallelism);
for (int i = 0; i < parallelism; i++) {
Path out = new Path(path, "out" + i);
FSDataOutputStream stream = fs.create(out);
DatumWriter datumWriter = Avros.newWriter(this);
DataFileWriter writer = new DataFileWriter(datumWriter);
writer.create(getSchema(), stream);
streams.add(stream);
writers.add(writer);
}
int target = 0;
for (T value : contents) {
writers.get(target).append(baseOutputMapFn.map(value));
target = (target + 1) % parallelism;
}
for (DataFileWriter writer : writers) {
writer.close();
}
for (FSDataOutputStream stream : streams) {
stream.close();
}
ReadableSource<T> ret = new AvroFileSource<T>(path, this);
ret.inputConf(RuntimeParameters.DISABLE_COMBINE_FILE, "true");
return ret;
}
@Override
public void initialize(Configuration conf) {
baseInputMapFn.setConfiguration(conf);
baseInputMapFn.initialize();
baseOutputMapFn.setConfiguration(conf);
baseOutputMapFn.initialize();
deepCopier.initialize(conf);
for (PType ptype : subTypes) {
ptype.initialize(conf);
}
initialized = true;
}
@Override
public T getDetachedValue(T value) {
if (!initialized) {
throw new IllegalStateException("Cannot call getDetachedValue on an uninitialized PType");
}
return deepCopier.deepCopy(value);
}
@Override
public boolean equals(Object other) {
if (other == null || !(other instanceof AvroType)) {
return false;
}
AvroType at = (AvroType) other;
return (typeClass.equals(at.typeClass) && subTypes.equals(at.subTypes));
}
@Override
public int hashCode() {
HashCodeBuilder hcb = new HashCodeBuilder();
hcb.append(typeClass).append(subTypes);
return hcb.toHashCode();
}
}
| 2,762 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/AvroPathPerKeyOutputFormat.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.mapred.AvroWrapper;
import org.apache.avro.mapred.Pair;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
/**
* A {@link FileOutputFormat} that takes in a {@link Utf8} and an Avro record and writes the Avro records to
* a sub-directory of the output path whose name is equal to the string-form of the {@code Utf8}.
*
* This {@code OutputFormat} only keeps one {@code RecordWriter} open at a time, so it's a very good idea to write
* out all of the records for the same key at the same time within each partition so as not to be frequently opening
* and closing files.
*/
public class AvroPathPerKeyOutputFormat<T> extends FileOutputFormat<AvroWrapper<Pair<Utf8, T>>, NullWritable> {
@Override
public RecordWriter<AvroWrapper<Pair<Utf8, T>>, NullWritable> getRecordWriter(TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
Configuration conf = taskAttemptContext.getConfiguration();
Path basePath = new Path(getOutputPath(taskAttemptContext), conf.get("mapreduce.output.basename", "out0"));
return new AvroFilePerKeyRecordWriter<T>(basePath, getUniqueFile(taskAttemptContext, "part", ".avro"), conf);
}
private class AvroFilePerKeyRecordWriter<T> extends RecordWriter<AvroWrapper<Pair<Utf8, T>>, NullWritable> {
private final Path basePath;
private final String uniqueFileName;
private final Configuration conf;
private String currentKey;
private DataFileWriter<T> currentWriter;
public AvroFilePerKeyRecordWriter(Path basePath, String uniqueFileName, Configuration conf) {
this.basePath = basePath;
this.uniqueFileName = uniqueFileName;
this.conf = conf;
}
@Override
public void write(AvroWrapper<Pair<Utf8, T>> record, NullWritable n) throws IOException, InterruptedException {
String key = record.datum().key().toString();
if (!key.equals(currentKey)) {
if (currentWriter != null) {
currentWriter.close();
}
currentKey = key;
Path dir = new Path(basePath, key);
FileSystem fs = dir.getFileSystem(conf);
if (!fs.exists(dir)) {
fs.mkdirs(dir);
}
currentWriter = AvroOutputFormat.getDataFileWriter(new Path(dir, uniqueFileName), conf);
}
currentWriter.append(record.datum().value());
}
@Override
public void close(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
if (currentWriter != null) {
currentWriter.close();
currentKey = null;
currentWriter = null;
}
}
}
}
| 2,763 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/AvroTextOutputFormat.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import java.io.IOException;
import org.apache.avro.mapred.AvroWrapper;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class AvroTextOutputFormat<K, V> extends TextOutputFormat<K, V> {
class DatumRecordTextWriter extends RecordWriter<K, V> {
private RecordWriter lineRecordWriter;
DatumRecordTextWriter(RecordWriter recordWriter) {
this.lineRecordWriter = recordWriter;
}
@Override
public void close(TaskAttemptContext context) throws IOException, InterruptedException {
lineRecordWriter.close(context);
}
@Override
public void write(K arg0, V arg1) throws IOException, InterruptedException {
lineRecordWriter.write(getData(arg0), getData(arg1));
}
private Object getData(Object o) {
Object data = o;
if (o instanceof AvroWrapper) {
data = ((AvroWrapper) o).datum();
}
return data;
}
}
@Override
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException {
RecordWriter<K, V> recordWriter = super.getRecordWriter(context);
return new DatumRecordTextWriter(recordWriter);
}
}
| 2,764 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/AvroTypeFamily.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.crunch.Tuple;
import org.apache.crunch.Tuple3;
import org.apache.crunch.Tuple4;
import org.apache.crunch.TupleN;
import org.apache.crunch.Union;
import org.apache.crunch.types.PGroupedTableType;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.types.PTypeUtils;
public class AvroTypeFamily implements PTypeFamily {
private static final AvroTypeFamily INSTANCE = new AvroTypeFamily();
public static AvroTypeFamily getInstance() {
return INSTANCE;
}
// There can only be one instance.
private AvroTypeFamily() {
}
@Override
public PType<Void> nulls() {
return Avros.nulls();
}
@Override
public PType<String> strings() {
return Avros.strings();
}
@Override
public PType<Long> longs() {
return Avros.longs();
}
@Override
public PType<Integer> ints() {
return Avros.ints();
}
@Override
public PType<Float> floats() {
return Avros.floats();
}
@Override
public PType<Double> doubles() {
return Avros.doubles();
}
@Override
public PType<Boolean> booleans() {
return Avros.booleans();
}
@Override
public PType<ByteBuffer> bytes() {
return Avros.bytes();
}
@Override
public <T> PType<T> records(Class<T> clazz) {
return Avros.records(clazz);
}
public PType<GenericData.Record> generics(Schema schema) {
return Avros.generics(schema);
}
public <T> PType<T> containers(Class<T> clazz) {
return Avros.containers(clazz);
}
@Override
public <T> PType<Collection<T>> collections(PType<T> ptype) {
return Avros.collections(ptype);
}
@Override
public <T> PType<Map<String, T>> maps(PType<T> ptype) {
return Avros.maps(ptype);
}
@Override
public <V1, V2> PType<Pair<V1, V2>> pairs(PType<V1> p1, PType<V2> p2) {
return Avros.pairs(p1, p2);
}
@Override
public <V1, V2, V3> PType<Tuple3<V1, V2, V3>> triples(PType<V1> p1, PType<V2> p2, PType<V3> p3) {
return Avros.triples(p1, p2, p3);
}
@Override
public <V1, V2, V3, V4> PType<Tuple4<V1, V2, V3, V4>> quads(PType<V1> p1, PType<V2> p2, PType<V3> p3, PType<V4> p4) {
return Avros.quads(p1, p2, p3, p4);
}
@Override
public PType<TupleN> tuples(PType<?>... ptypes) {
return Avros.tuples(ptypes);
}
@Override
public <K, V> PTableType<K, V> tableOf(PType<K> key, PType<V> value) {
return Avros.tableOf(key, value);
}
@Override
public <T> PType<T> as(PType<T> ptype) {
if (ptype instanceof AvroType || ptype instanceof AvroGroupedTableType) {
return ptype;
}
if (ptype instanceof PGroupedTableType) {
PTableType ptt = ((PGroupedTableType) ptype).getTableType();
return new AvroGroupedTableType((AvroTableType) as(ptt));
}
Class<T> typeClass = ptype.getTypeClass();
PType<T> prim = Avros.getPrimitiveType(typeClass);
if (prim != null) {
return prim;
}
return PTypeUtils.convert(ptype, this);
}
@Override
public <T extends Tuple> PType<T> tuples(Class<T> clazz, PType<?>... ptypes) {
return Avros.tuples(clazz, ptypes);
}
@Override
public <S, T> PType<T> derived(Class<T> clazz, MapFn<S, T> inputFn, MapFn<T, S> outputFn, PType<S> base) {
return Avros.derived(clazz, inputFn, outputFn, base);
}
@Override
public <S, T> PType<T> derivedImmutable(Class<T> clazz, MapFn<S, T> inputFn, MapFn<T, S> outputFn, PType<S> base) {
return Avros.derivedImmutable(clazz, inputFn, outputFn, base);
}
@Override
public PType<Union> unionOf(PType<?>... ptypes) {
return Avros.unionOf(ptypes);
}
}
| 2,765 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/AvroTableType.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import org.apache.avro.Schema;
import org.apache.avro.generic.IndexedRecord;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.crunch.lib.PTables;
import org.apache.crunch.types.PGroupedTableType;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.TupleDeepCopier;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
/**
* The implementation of the PTableType interface for Avro-based serialization.
*
*/
class AvroTableType<K, V> extends BaseAvroTableType<K, V> implements PTableType<K, V> {
private static class PairToAvroPair extends MapFn<Pair, org.apache.avro.mapred.Pair> {
private final MapFn keyMapFn;
private final MapFn valueMapFn;
private final String firstJson;
private final String secondJson;
private String pairSchemaJson;
private transient Schema pairSchema;
public PairToAvroPair(AvroType keyType, AvroType valueType) {
this.keyMapFn = keyType.getOutputMapFn();
this.firstJson = keyType.getSchema().toString();
this.valueMapFn = valueType.getOutputMapFn();
this.secondJson = valueType.getSchema().toString();
}
@Override
public void configure(Configuration conf) {
keyMapFn.configure(conf);
valueMapFn.configure(conf);
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
keyMapFn.setContext(context);
valueMapFn.setContext(context);
}
@Override
public void initialize() {
keyMapFn.initialize();
valueMapFn.initialize();
pairSchemaJson = org.apache.avro.mapred.Pair.getPairSchema(
new Schema.Parser().parse(firstJson),
Avros.allowNulls(new Schema.Parser().parse(secondJson))).toString();
}
@Override
public org.apache.avro.mapred.Pair map(Pair input) {
if (pairSchema == null) {
pairSchema = new Schema.Parser().parse(pairSchemaJson);
}
org.apache.avro.mapred.Pair avroPair = new org.apache.avro.mapred.Pair(pairSchema);
avroPair.key(keyMapFn.map(input.first()));
avroPair.value(valueMapFn.map(input.second()));
return avroPair;
}
}
private static class IndexedRecordToPair extends MapFn<IndexedRecord, Pair> {
private final MapFn firstMapFn;
private final MapFn secondMapFn;
public IndexedRecordToPair(MapFn firstMapFn, MapFn secondMapFn) {
this.firstMapFn = firstMapFn;
this.secondMapFn = secondMapFn;
}
@Override
public void configure(Configuration conf) {
firstMapFn.configure(conf);
secondMapFn.configure(conf);
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
firstMapFn.setContext(context);
secondMapFn.setContext(context);
}
@Override
public void initialize() {
firstMapFn.initialize();
secondMapFn.initialize();
}
@Override
public Pair map(IndexedRecord input) {
return Pair.of(firstMapFn.map(input.get(0)), secondMapFn.map(input.get(1)));
}
}
private final AvroType<K> keyType;
private final AvroType<V> valueType;
public AvroTableType(AvroType<K> keyType, AvroType<V> valueType, Class<Pair<K, V>> pairClass) {
super(pairClass, org.apache.avro.mapred.Pair.getPairSchema(keyType.getSchema(),
Avros.allowNulls(valueType.getSchema())),
new IndexedRecordToPair(keyType.getInputMapFn(),
valueType.getInputMapFn()), new PairToAvroPair(keyType, valueType),
new TupleDeepCopier(Pair.class, keyType, valueType), null, keyType, valueType);
this.keyType = keyType;
this.valueType = valueType;
}
@Override
public PType<K> getKeyType() {
return keyType;
}
@Override
public PType<V> getValueType() {
return valueType;
}
@Override
public PGroupedTableType<K, V> getGroupedTableType() {
return new AvroGroupedTableType<K, V>(this);
}
@Override
public Pair<K, V> getDetachedValue(Pair<K, V> value) {
return PTables.getDetachedValue(this, value);
}
}
| 2,766 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/AvroDeepCopier.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import java.io.ByteArrayOutputStream;
import java.io.Serializable;
import java.lang.reflect.Constructor;
import java.nio.ByteBuffer;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericData.Record;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.specific.SpecificData;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.types.DeepCopier;
import org.apache.hadoop.conf.Configuration;
/**
* Performs deep copies of Avro-serializable objects.
* <p>
* <b>Warning:</b> Methods in this class are not thread-safe. This shouldn't be a problem when
* running in a map-reduce context where each mapper/reducer is running in its own JVM, but it may
* well be a problem in any other kind of multi-threaded context.
*/
abstract class AvroDeepCopier<T> implements DeepCopier<T>, Serializable {
private String jsonSchema;
protected transient Configuration conf;
private transient Schema schema;
public AvroDeepCopier(Schema schema) {
this.jsonSchema = schema.toString();
}
protected Schema getSchema() {
if (schema == null) {
schema = new Schema.Parser().parse(jsonSchema);
}
return schema;
}
@Override
public void initialize(Configuration conf) {
this.conf = conf;
}
/**
* Deep copier for Avro specific data objects.
*/
public static class AvroSpecificDeepCopier<T> extends AvroDeepCopier<T> {
public AvroSpecificDeepCopier(Schema schema) {
super(schema);
}
@Override
public T deepCopy(T source) {
return SpecificData.get().deepCopy(getSchema(), source);
}
}
/**
* Deep copier for Avro generic data objects.
*/
public static class AvroGenericDeepCopier extends AvroDeepCopier<Record> {
public AvroGenericDeepCopier(Schema schema) {
super(schema);
}
@Override
public Record deepCopy(Record source) {
return GenericData.get().deepCopy(getSchema(), source);
}
}
/**
* Deep copier for Avro reflect data objects.
*/
public static class AvroReflectDeepCopier<T> extends AvroDeepCopier<T> {
private transient DatumReader<T> datumReader;
private transient DatumWriter<T> datumWriter;
private transient BinaryEncoder binaryEncoder;
private transient BinaryDecoder binaryDecoder;
public AvroReflectDeepCopier(Schema schema) {
super(schema);
}
protected DatumReader<T> createDatumReader(Configuration conf) {
return AvroMode.REFLECT.withFactoryFromConfiguration(conf).getReader(getSchema());
}
protected DatumWriter<T> createDatumWriter(Configuration conf) {
return AvroMode.REFLECT.withFactoryFromConfiguration(conf).getWriter(getSchema());
}
/**
* Create a deep copy of an Avro value.
*
* @param source The value to be copied
* @return The deep copy of the value
*/
@Override
public T deepCopy(T source) {
if (source == null) {
return null;
}
if (datumReader == null) {
datumReader = createDatumReader(conf);
}
if (datumWriter == null) {
datumWriter = createDatumWriter(conf);
}
ByteArrayOutputStream byteOutStream = new ByteArrayOutputStream();
binaryEncoder = EncoderFactory.get().binaryEncoder(byteOutStream, binaryEncoder);
T target = createNewInstance((Class<T>) source.getClass());
try {
datumWriter.write(source, binaryEncoder);
binaryEncoder.flush();
binaryDecoder = DecoderFactory.get()
.binaryDecoder(byteOutStream.toByteArray(), binaryDecoder);
return datumReader.read(target, binaryDecoder);
} catch (Exception e) {
throw new CrunchRuntimeException("Error while deep copying avro value " + source, e);
}
}
protected T createNewInstance(Class<T> targetClass) {
try {
Constructor<T> ctor = targetClass.getDeclaredConstructor();
ctor.setAccessible(true);
return ctor.newInstance();
} catch (ReflectiveOperationException e) {
throw new CrunchRuntimeException(e);
}
}
}
/**
* Copies ByteBuffers that are stored in Avro. A specific case is needed here
* because ByteBuffers are the one built-in case where the serialization type is different
* than the output type and the output type isn't immutable.
*/
public static class AvroByteBufferDeepCopier implements DeepCopier<ByteBuffer> {
public static final AvroByteBufferDeepCopier INSTANCE = new AvroByteBufferDeepCopier();
@Override
public void initialize(Configuration conf) {
// No-op
}
@Override
public ByteBuffer deepCopy(ByteBuffer source) {
if (source == null) {
return null;
}
byte[] copy = new byte[source.limit()];
System.arraycopy(source.array(), 0, copy, 0, source.limit());
return ByteBuffer.wrap(copy);
}
}
}
| 2,767 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/AvroUtf8InputFormat.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import java.io.IOException;
import org.apache.avro.mapred.AvroWrapper;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.LineRecordReader;
/**
* An {@link org.apache.hadoop.mapred.InputFormat} for text files. Each line is
* a {@link Utf8} key; values are null.
*/
public class AvroUtf8InputFormat extends FileInputFormat<AvroWrapper<Utf8>, NullWritable> {
static class Utf8LineRecordReader extends RecordReader<AvroWrapper<Utf8>, NullWritable> {
private LineRecordReader lineRecordReader;
private AvroWrapper<Utf8> currentKey = new AvroWrapper<Utf8>();
public Utf8LineRecordReader() throws IOException {
this.lineRecordReader = new LineRecordReader();
}
public void close() throws IOException {
lineRecordReader.close();
}
public float getProgress() throws IOException {
return lineRecordReader.getProgress();
}
@Override
public AvroWrapper<Utf8> getCurrentKey() throws IOException, InterruptedException {
Text txt = lineRecordReader.getCurrentValue();
currentKey.datum(new Utf8(txt.toString()));
return currentKey;
}
@Override
public NullWritable getCurrentValue() throws IOException, InterruptedException {
return NullWritable.get();
}
@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
lineRecordReader.initialize(split, context);
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
return lineRecordReader.nextKeyValue();
}
}
private CompressionCodecFactory compressionCodecs = null;
public void configure(Configuration conf) {
compressionCodecs = new CompressionCodecFactory(conf);
}
protected boolean isSplitable(FileSystem fs, Path file) {
return compressionCodecs.getCodec(file) == null;
}
@Override
public RecordReader<AvroWrapper<Utf8>, NullWritable> createRecordReader(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
return new Utf8LineRecordReader();
}
}
| 2,768 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/AvroDerivedValueDeepCopier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import org.apache.crunch.MapFn;
import org.apache.crunch.types.DeepCopier;
import org.apache.hadoop.conf.Configuration;
/**
* A DeepCopier specific to Avro derived types.
*/
public class AvroDerivedValueDeepCopier<T, S> implements DeepCopier {
private final MapFn<T,S> derivedToAvroFn;
private final MapFn<S,T> avroToDerivedFn;
private final AvroType<S> avroBaseType;
public AvroDerivedValueDeepCopier(MapFn<T,S> derivedToAvroFn, MapFn<S,T> avroToDerivedFn, AvroType<S> avroBaseType) {
this.derivedToAvroFn = derivedToAvroFn;
this.avroToDerivedFn = avroToDerivedFn;
this.avroBaseType = avroBaseType;
}
@Override
public void initialize(Configuration conf) {
derivedToAvroFn.setConfiguration(conf);
derivedToAvroFn.initialize();
avroToDerivedFn.setConfiguration(conf);
avroToDerivedFn.initialize();
avroBaseType.initialize(conf);
}
@Override
public Object deepCopy(Object source) {
return avroToDerivedFn.map(avroBaseType.getDetachedValue(derivedToAvroFn.map((T) source)));
}
}
| 2,769 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/AvroMode.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import com.google.common.collect.Maps;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.reflect.ReflectData;
import org.apache.avro.reflect.ReflectDatumReader;
import org.apache.avro.reflect.ReflectDatumWriter;
import org.apache.avro.specific.SpecificData;
import org.apache.avro.specific.SpecificDatumReader;
import org.apache.avro.specific.SpecificDatumWriter;
import org.apache.crunch.io.FormatBundle;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import java.util.Map;
/**
* AvroMode is an immutable object used for configuring the reading and writing of Avro types.
* The mode will not be used or honored unless it has been appropriately configured using one of the supported
* methods. Certain sources might also support specifying a specific mode to use.
*/
public class AvroMode implements ReaderWriterFactory {
/**
* Internal enum which represents the various Avro data types.
*/
public static enum ModeType {
SPECIFIC, REFLECT, GENERIC
}
/**
* Default mode to use for reading and writing {@link ReflectData Reflect} types.
*/
public static final AvroMode REFLECT = new AvroMode(ModeType.REFLECT, Avros.REFLECT_DATA_FACTORY_CLASS);
/**
* Default mode to use for reading and writing {@link SpecificData Specific} types.
*/
public static final AvroMode SPECIFIC = new AvroMode(ModeType.SPECIFIC, "crunch.specificfactory");
/**
* Default mode to use for reading and writing {@link GenericData Generic} types.
*/
public static final AvroMode GENERIC = new AvroMode(ModeType.GENERIC, "crunch.genericfactory");
public static final String AVRO_MODE_PROPERTY = "crunch.avro.mode";
public static final String AVRO_SHUFFLE_MODE_PROPERTY = "crunch.avro.shuffle.mode";
/**
* Creates an AvroMode based on the {@link #AVRO_MODE_PROPERTY} property in the {@code conf}.
* @param conf The configuration holding the properties for mode to be created.
* @return an AvroMode based on the {@link #AVRO_MODE_PROPERTY} property in the {@code conf}.
*/
public static AvroMode fromConfiguration(Configuration conf) {
AvroMode mode = getMode(conf.getEnum(AVRO_MODE_PROPERTY, ModeType.REFLECT));
return mode.withFactoryFromConfiguration(conf);
}
/**
* Creates an AvroMode based on the {@link #AVRO_SHUFFLE_MODE_PROPERTY} property in the {@code conf}.
* @param conf The configuration holding the properties for mode to be created.
* @return an AvroMode based on the {@link #AVRO_SHUFFLE_MODE_PROPERTY} property in the {@code conf}.
*/
public static AvroMode fromShuffleConfiguration(Configuration conf) {
AvroMode mode = getMode(conf.getEnum(AVRO_SHUFFLE_MODE_PROPERTY, ModeType.REFLECT));
return mode.withFactoryFromConfiguration(conf);
}
/**
* Creates an {@link AvroMode} based upon the specified {@code type}.
* @param type the Avro type which indicates a specific mode.
* @return an {@link AvroMode} based upon the specified {@code type}.
*/
public static AvroMode fromType(AvroType<?> type) {
if (type.hasReflect()) {
if (type.hasSpecific()) {
Avros.checkCombiningSpecificAndReflectionSchemas();
}
return REFLECT;
} else if (type.hasSpecific()) {
return SPECIFIC;
} else {
return GENERIC;
}
}
private static AvroMode getMode(ModeType modeType){
switch(modeType){
case SPECIFIC:
return SPECIFIC;
case GENERIC:
return GENERIC;
case REFLECT:
default:
return REFLECT;
}
}
private static ClassLoader specificLoader = null;
/**
* Set the {@code ClassLoader} that will be used for loading Avro {@code org.apache.avro.specific.SpecificRecord}
* and reflection implementation classes. It is typically not necessary to call this method -- it should only be used
* if a specific class loader is needed in order to load the specific datum classes.
*
* @param loader the {@code ClassLoader} to be used for loading specific datum classes
*/
public static void setSpecificClassLoader(ClassLoader loader) {
specificLoader = loader;
}
/**
* Get the configured {@code ClassLoader} to be used for loading Avro {@code org.apache.specific.SpecificRecord}
* and reflection implementation classes. The return value may be null.
*
* @return the configured {@code ClassLoader} for loading specific or reflection datum classes, may be null
*/
public static ClassLoader getSpecificClassLoader() {
return specificLoader;
}
/**
* Internal method for setting the specific class loader if none is already set. If no specific class loader is set,
* the given class loader will be set as the specific class loader. If a specific class loader is already set, this
* will be a no-op.
*
* @param loader the {@code ClassLoader} to be registered as the specific class loader if no specific class loader
* is already set
*/
static void registerSpecificClassLoaderInternal(ClassLoader loader) {
if (specificLoader == null) {
setSpecificClassLoader(loader);
}
}
/**
* the factory methods in this class may be overridden in ReaderWriterFactory
*/
private final ReaderWriterFactory factory;
/**
* The property name used setting property into {@link Configuration}.
*/
private final String propName;
/**
* The mode type representing the Avro data form.
*/
private final ModeType modeType;
private AvroMode(ModeType modeType, ReaderWriterFactory factory, String propName) {
this.factory = factory;
this.propName = propName;
this.modeType = modeType;
}
private AvroMode(ModeType modeType, String propName) {
this(modeType, null, propName);
}
/**
* Returns a {@link GenericData} instance based on the mode type.
* @return a {@link GenericData} instance based on the mode type.
*/
public GenericData getData() {
if (factory != null) {
return factory.getData();
}
switch(this.modeType) {
case REFLECT:
return ReflectData.AllowNull.get();
case SPECIFIC:
return SpecificData.get();
default:
return GenericData.get();
}
}
/**
* Creates a {@code DatumReader} based on the {@code schema}.
* @param schema the schema to be read
* @param <T> the record type created by the reader.
* @return a {@code DatumReader} based on the {@code schema}.
*/
public <T> DatumReader<T> getReader(Schema schema) {
if (factory != null) {
return factory.getReader(schema);
}
switch (this.modeType) {
case REFLECT:
if (specificLoader != null) {
return new ReflectDatumReader<T>(schema, schema, new ReflectData(specificLoader));
} else {
return new ReflectDatumReader<T>(schema);
}
case SPECIFIC:
if (specificLoader != null) {
return new SpecificDatumReader<T>(
schema, schema, new SpecificData(specificLoader));
} else {
return new SpecificDatumReader<T>(schema);
}
default:
return new GenericDatumReader<T>(schema);
}
}
/**
* Creates a {@code DatumWriter} based on the {@code schema}.
* @param schema the schema to be read
* @param <T> the record type created by the writer.
* @return a {@code DatumWriter} based on the {@code schema}.
*/
public <T> DatumWriter<T> getWriter(Schema schema) {
if (factory != null) {
return factory.getWriter(schema);
}
switch (this.modeType) {
case REFLECT:
return new ReflectDatumWriter<T>(schema);
case SPECIFIC:
return new SpecificDatumWriter<T>(schema);
default:
return new GenericDatumWriter<T>(schema);
}
}
/**
* Creates a new {@code AvroMode} instance which will utilize the {@code factory} instance
* for creating Avro readers and writers.
*
* @param factory factory implementation for the mode to use
* @return a new {@code AvroMode} instance which will utilize the {@code factory} instance
* for creating Avro readers and writers.
* @deprecated use {@link #withFactory(ReaderWriterFactory)} instead.
*/
@Deprecated
public AvroMode override(ReaderWriterFactory factory) {
return withFactory(factory);
}
/**
* Creates a new {@code AvroMode} instance which will utilize the {@code factory} instance
* for creating Avro readers and writers. If {@code null} the default factory for the mode
* will be used.
*
* @param factory factory implementation for the mode to use
* @return a new {@code AvroMode} instance which will utilize the {@code factory} instance
* for creating Avro readers and writers.
*/
public AvroMode withFactory(ReaderWriterFactory factory){
if (factory != this) {
return withReaderWriterFactory(factory);
} else {
return this;
}
}
/**
* Populates the {@code conf} with mode specific settings for use during the shuffle phase.
* @param conf the configuration to populate.
*/
public void configureShuffle(Configuration conf) {
conf.setEnum(AVRO_SHUFFLE_MODE_PROPERTY, this.modeType);
configure(conf);
}
/**
* Populates the {@code bundle} with mode specific settings for the specific {@link FormatBundle}.
* @param bundle the bundle to populate.
*/
public void configure(FormatBundle bundle) {
bundle.set(AVRO_MODE_PROPERTY, this.modeType.toString());
if (factory != null) {
bundle.set(propName, factory.getClass().getName());
}
}
/**
* Populates the {@code conf} with mode specific settings.
* @param conf the configuration to populate.
*/
public void configure(Configuration conf) {
conf.set(AVRO_MODE_PROPERTY, this.modeType.toString());
if (factory != null) {
conf.setClass(propName, factory.getClass(), ReaderWriterFactory.class);
}
}
/**
* Returns the entries that a {@code Configuration} instance needs to enable
* this AvroMode as a serializable map of key-value pairs.
*/
public Map<String, String> getModeProperties() {
Map<String, String> props = Maps.newHashMap();
props.put(AVRO_MODE_PROPERTY, this.modeType.toString());
if (factory != null) {
props.put(propName, factory.getClass().getCanonicalName());
}
return props;
}
/**
* Populates the {@code conf} with mode specific settings.
* @param conf the configuration to populate.
* @deprecated use {@link #configure(org.apache.hadoop.conf.Configuration)}
*/
@Deprecated
public void configureFactory(Configuration conf) {
configure(conf);
}
/**
* Creates a new {@code AvroMode} instance which will utilize the {@code factory} instance
* for creating Avro readers and writers. If {@code null} the default factory for the mode
* will be used.
*
* @param readerWriterFactory factory implementation for the mode to use
* @return a new {@code AvroMode} instance which will utilize the {@code factory} instance
* for creating Avro readers and writers.
*/
private AvroMode withReaderWriterFactory(ReaderWriterFactory readerWriterFactory) {
return new AvroMode(modeType, readerWriterFactory, propName);
}
/**
* Returns the factory that will be used for the mode.
*
* @return the factory that will be used for the mode.
*/
public ReaderWriterFactory getFactory() {
return factory != null ? factory : this;
}
@Override
public boolean equals(Object o) {
if(o == null){
return false;
}
if(this == o){
return true;
}
if(!(o instanceof AvroMode)){
return false;
}
AvroMode that = (AvroMode) o;
if(!this.modeType.equals(that.modeType)){
return false;
}
if(!this.propName.equals(that.propName)){
return false;
}
if(this.factory != null){
if(that.factory == null){
return false;
}else {
return this.factory.equals(that.factory);
}
}else{
return that.factory == null;
}
}
@Override
public int hashCode() {
int hash = propName.hashCode();
hash = 31*hash + modeType.hashCode();
if(factory != null){
hash = 31*hash+factory.hashCode();
}
return hash;
}
@SuppressWarnings("unchecked")
public AvroMode withFactoryFromConfiguration(Configuration conf) {
// although the shuffle and input/output use different properties for mode,
// this is shared - only one ReaderWriterFactory can be used.
Class<?> factoryClass = conf.getClass(propName, this.getClass());
if (factoryClass != this.getClass()) {
return withReaderWriterFactory((ReaderWriterFactory)
ReflectionUtils.newInstance(factoryClass, conf));
} else {
return this;
}
}
}
| 2,770 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/AvroGroupedTableType.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import java.io.IOException;
import java.util.Collection;
import org.apache.avro.mapred.AvroJob;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroKeyComparator;
import org.apache.avro.mapred.AvroValue;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.crunch.fn.PairMapFn;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.lib.PTables;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.PGroupedTableType;
import org.apache.crunch.types.PTableType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
/**
*
*
*/
class AvroGroupedTableType<K, V> extends PGroupedTableType<K, V> {
private static final AvroPairConverter CONVERTER = new AvroPairConverter();
private final MapFn inputFn;
private final MapFn outputFn;
public AvroGroupedTableType(BaseAvroTableType<K, V> tableType) {
super(tableType);
AvroType keyType = (AvroType) tableType.getKeyType();
AvroType valueType = (AvroType) tableType.getValueType();
this.inputFn = new PairIterableMapFn(keyType.getInputMapFn(), valueType.getInputMapFn());
this.outputFn = new PairMapFn(keyType.getOutputMapFn(), valueType.getOutputMapFn());
}
@Override
public Class<Pair<K, Iterable<V>>> getTypeClass() {
return (Class<Pair<K, Iterable<V>>>) Pair.of(null, null).getClass();
}
@Override
public Converter getGroupingConverter() {
return CONVERTER;
}
@Override
public MapFn getInputMapFn() {
return inputFn;
}
@Override
public MapFn getOutputMapFn() {
return outputFn;
}
@Override
public void initialize(Configuration conf) {
getTableType().initialize(conf);
}
@Override
public Pair<K, Iterable<V>> getDetachedValue(Pair<K, Iterable<V>> value) {
return PTables.getGroupedDetachedValue(this, value);
}
@Override
public void configureShuffle(Job job, GroupingOptions options) {
AvroTableType<K, V> att = (AvroTableType<K, V>) tableType;
String schemaJson = att.getSchema().toString();
Configuration conf = job.getConfiguration();
if (att.hasReflect()) {
if (att.hasSpecific()) {
Avros.checkCombiningSpecificAndReflectionSchemas();
}
conf.setBoolean(AvroJob.MAP_OUTPUT_IS_REFLECT, true);
}
conf.set(AvroJob.MAP_OUTPUT_SCHEMA, schemaJson);
job.setSortComparatorClass(AvroKeyComparator.class);
job.setMapOutputKeyClass(AvroKey.class);
job.setMapOutputValueClass(AvroValue.class);
if (options != null) {
options.configure(job);
}
AvroMode.fromType(att).withFactoryFromConfiguration(conf).configureShuffle(conf);
Collection<String> serializations = job.getConfiguration().getStringCollection(
"io.serializations");
if (!serializations.contains(SafeAvroSerialization.class.getName())) {
serializations.add(SafeAvroSerialization.class.getName());
job.getConfiguration().setStrings("io.serializations", serializations.toArray(new String[0]));
}
}
@Override
public ReadableSource<Pair<K, Iterable<V>>> createSourceTarget(
Configuration conf,
Path path,
Iterable<Pair<K, Iterable<V>>> contents,
int parallelism) throws IOException {
throw new UnsupportedOperationException("GroupedTableTypes do not support creating ReadableSources");
}
}
| 2,771 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/ReaderWriterFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
/**
* Interface for accessing DatumReader, DatumWriter, and Data classes.
*/
public interface ReaderWriterFactory {
GenericData getData();
<D> DatumReader<D> getReader(Schema schema);
<D> DatumWriter<D> getWriter(Schema schema);
}
| 2,772 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/AvroOutputFormat.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.mapred.AvroJob;
import org.apache.avro.mapred.AvroWrapper;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/** An {@link org.apache.hadoop.mapreduce.OutputFormat} for Avro data files. */
public class AvroOutputFormat<T> extends FileOutputFormat<AvroWrapper<T>, NullWritable> {
public static <S> DataFileWriter<S> getDataFileWriter(Path path, Configuration conf) throws IOException {
Schema schema = AvroJob.getOutputSchema(conf);
DataFileWriter<S> writer = new DataFileWriter<S>(AvroMode.fromConfiguration(conf).<S>getWriter(schema));
JobConf jc = new JobConf(conf);
/* copied from org.apache.avro.mapred.AvroOutputFormat */
if (org.apache.hadoop.mapred.FileOutputFormat.getCompressOutput(jc)) {
int level = conf.getInt(org.apache.avro.mapred.AvroOutputFormat.DEFLATE_LEVEL_KEY,
org.apache.avro.file.CodecFactory.DEFAULT_DEFLATE_LEVEL);
String codecName = conf.get(AvroJob.OUTPUT_CODEC,
org.apache.avro.file.DataFileConstants.DEFLATE_CODEC);
CodecFactory codec = codecName.equals(org.apache.avro.file.DataFileConstants.DEFLATE_CODEC)
? CodecFactory.deflateCodec(level)
: CodecFactory.fromString(codecName);
writer.setCodec(codec);
}
writer.setSyncInterval(jc.getInt(org.apache.avro.mapred.AvroOutputFormat.SYNC_INTERVAL_KEY,
org.apache.avro.file.DataFileConstants.DEFAULT_SYNC_INTERVAL));
FileSystem fs = path.getFileSystem(conf);
if (fs.exists(path)) {
writer.create(schema, fs.append(path));
} else {
writer.create(schema, fs.create(path));
}
return writer;
}
@Override
public RecordWriter<AvroWrapper<T>, NullWritable> getRecordWriter(TaskAttemptContext context) throws IOException,
InterruptedException {
Configuration conf = context.getConfiguration();
Path path = getDefaultWorkFile(context, org.apache.avro.mapred.AvroOutputFormat.EXT);
final DataFileWriter<T> writer = getDataFileWriter(path, conf);
return new RecordWriter<AvroWrapper<T>, NullWritable>() {
@Override
public void write(AvroWrapper<T> wrapper, NullWritable ignore) throws IOException {
writer.append(wrapper.datum());
}
@Override
public void close(TaskAttemptContext context) throws IOException, InterruptedException {
writer.close();
}
};
}
}
| 2,773 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/AvroRecordReader.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import static org.apache.avro.file.DataFileConstants.MAGIC;
import java.io.EOFException;
import java.io.IOException;
import java.util.Arrays;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.file.DataFileReader12;
import org.apache.avro.file.FileReader;
import org.apache.avro.file.SeekableInput;
import org.apache.avro.io.DatumReader;
import org.apache.avro.mapred.AvroWrapper;
import org.apache.avro.mapred.FsInput;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
/** An {@link RecordReader} for Avro data files. */
class AvroRecordReader<T> extends RecordReader<AvroWrapper<T>, NullWritable> {
private FileReader<T> reader;
private long start;
private long end;
private AvroWrapper<T> key;
private NullWritable value;
private Schema schema;
public AvroRecordReader(Schema schema) {
this.schema = schema;
}
@Override
public void initialize(InputSplit genericSplit, TaskAttemptContext context) throws IOException, InterruptedException {
FileSplit split = (FileSplit) genericSplit;
Configuration conf = context.getConfiguration();
SeekableInput in = new FsInput(split.getPath(), conf);
DatumReader<T> datumReader = AvroMode
.fromConfiguration(context.getConfiguration())
.getReader(schema);
this.reader = openAvroDataFileReader(in, datumReader);
reader.sync(split.getStart()); // sync to start
this.start = reader.tell();
this.end = split.getStart() + split.getLength();
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
if (!reader.hasNext() || reader.pastSync(end)) {
key = null;
value = null;
return false;
}
if (key == null) {
key = new AvroWrapper<T>();
}
if (value == null) {
value = NullWritable.get();
}
key.datum(reader.next(key.datum()));
return true;
}
@Override
public AvroWrapper<T> getCurrentKey() throws IOException, InterruptedException {
return key;
}
@Override
public NullWritable getCurrentValue() throws IOException, InterruptedException {
return value;
}
@Override
public float getProgress() throws IOException {
if (end == start) {
return 0.0f;
} else {
return Math.min(1.0f, (getPos() - start) / (float) (end - start));
}
}
public long getPos() throws IOException {
return reader.tell();
}
@Override
public void close() throws IOException {
if (reader != null) {
reader.close();
reader = null;
}
}
/**
* Local patch for AVRO-2944.
*/
private static <D> FileReader<D> openAvroDataFileReader(SeekableInput in, DatumReader<D> reader) throws IOException {
if (in.length() < MAGIC.length)
throw new IOException("Not an Avro data file");
// read magic header
byte[] magic = new byte[MAGIC.length];
in.seek(0);
int offset = 0;
int length = magic.length;
while (length > 0) {
int bytesRead = in.read(magic, offset, length);
if (bytesRead < 0)
throw new EOFException("Unexpected EOF with " + length + " bytes remaining to read");
length -= bytesRead;
offset += bytesRead;
}
in.seek(0);
if (Arrays.equals(MAGIC, magic)) // current format
return new DataFileReader<>(in, reader);
if (Arrays.equals(new byte[] { (byte) 'O', (byte) 'b', (byte) 'j', (byte) 0 }, magic)) // 1.2 format
return new DataFileReader12<>(in, reader);
throw new IOException("Not an Avro data file");
}
}
| 2,774 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/AvroInputFormat.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.mapred.AvroJob;
import org.apache.avro.mapred.AvroWrapper;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
/** An {@link org.apache.hadoop.mapreduce.InputFormat} for Avro data files. */
public class AvroInputFormat<T> extends FileInputFormat<AvroWrapper<T>, NullWritable> {
@Override
public RecordReader<AvroWrapper<T>, NullWritable> createRecordReader(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
context.setStatus(split.toString());
String jsonSchema = context.getConfiguration().get(AvroJob.INPUT_SCHEMA);
Schema schema = new Schema.Parser().parse(jsonSchema);
return new AvroRecordReader<T>(schema);
}
}
| 2,775 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/AvroCapabilities.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.reflect.ReflectDatumReader;
import org.apache.avro.reflect.ReflectDatumWriter;
import com.google.common.collect.Lists;
/**
* Determines the capabilities of the Avro version that is currently being used.
*/
class AvroCapabilities {
public static class Record extends org.apache.avro.specific.SpecificRecordBase implements
org.apache.avro.specific.SpecificRecord {
public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser()
.parse("{\"type\":\"record\",\"name\":\"Record\",\"namespace\":\"org.apache.crunch.types.avro\",\"fields\":[{\"name\":\"subrecords\",\"type\":{\"type\":\"array\",\"items\":\"string\"}}]}");
@Deprecated
public java.util.List<java.lang.CharSequence> subrecords;
public java.lang.Object get(int field$) {
switch (field$) {
case 0:
return subrecords;
default:
throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
// Used by DatumReader. Applications should not call.
@SuppressWarnings(value = "unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
case 0:
subrecords = (java.util.List<java.lang.CharSequence>) value$;
break;
default:
throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
@Override
public Schema getSchema() {
return SCHEMA$;
}
}
/**
* Determine if the current Avro version can use the ReflectDatumReader to
* read SpecificData that includes an array. The inability to do this was a
* bug that was fixed in Avro 1.7.0.
*
* @return true if SpecificData can be properly read using a
* ReflectDatumReader
*/
static boolean canDecodeSpecificSchemaWithReflectDatumReader() {
ReflectDatumReader<Record> datumReader = new ReflectDatumReader(Record.SCHEMA$);
ReflectDatumWriter<Record> datumWriter = new ReflectDatumWriter(Record.SCHEMA$);
Record record = new Record();
record.subrecords = Lists.<CharSequence> newArrayList("a", "b");
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
BinaryEncoder encoder = EncoderFactory.get().binaryEncoder(byteArrayOutputStream, null);
try {
datumWriter.write(record, encoder);
encoder.flush();
BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(
byteArrayOutputStream.toByteArray(), null);
datumReader.read(record, decoder);
} catch (IOException ioe) {
throw new RuntimeException("Error performing specific schema test", ioe);
} catch (ClassCastException cce) {
// This indicates that we're using a pre-1.7.0 version of Avro, as the
// ReflectDatumReader in those versions could not correctly handle an
// array in a SpecificData value
return false;
}
return true;
}
}
| 2,776 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/Avros.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import com.google.common.base.Charsets;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Type;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.reflect.ReflectData;
import org.apache.avro.specific.SpecificRecord;
import org.apache.avro.util.Utf8;
import org.apache.commons.codec.binary.Base64;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.crunch.Tuple;
import org.apache.crunch.Tuple3;
import org.apache.crunch.Tuple4;
import org.apache.crunch.TupleN;
import org.apache.crunch.Union;
import org.apache.crunch.fn.CompositeMapFn;
import org.apache.crunch.fn.IdentityFn;
import org.apache.crunch.types.CollectionDeepCopier;
import org.apache.crunch.types.MapDeepCopier;
import org.apache.crunch.types.NoOpDeepCopier;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypes;
import org.apache.crunch.types.TupleDeepCopier;
import org.apache.crunch.types.TupleFactory;
import org.apache.crunch.types.UnionDeepCopier;
import org.apache.crunch.types.writable.WritableDeepCopier;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Defines static methods that are analogous to the methods defined in
* {@link AvroTypeFamily} for convenient static importing.
*
*/
public class Avros {
/**
* Older versions of Avro (i.e., before 1.7.0) do not support schemas that are
* composed of a mix of specific and reflection-based schemas. This bit
* controls whether or not we allow Crunch jobs to be created that involve
* mixing specific and reflection-based schemas and can be overridden by the
* client developer.
*/
public static final boolean CAN_COMBINE_SPECIFIC_AND_REFLECT_SCHEMAS;
static {
CAN_COMBINE_SPECIFIC_AND_REFLECT_SCHEMAS = AvroCapabilities.canDecodeSpecificSchemaWithReflectDatumReader();
}
/**
* The instance we use for generating reflected schemas. In releases up to
* 0.8.0, this may be modified by clients (e.g., Scrunch.) to override the
* reader, writer, and data instances used.
*
* Configuring the ReaderWriterFactory by setting this field is deprecated.
* Instead, use {@link AvroMode#override(ReaderWriterFactory)}.
*
* @deprecated as of 0.9.0; use AvroMode.REFLECT.override(ReaderWriterFactory)
*/
public static ReflectDataFactory REFLECT_DATA_FACTORY = new ReflectDataFactory();
/**
* The name of the configuration parameter that tracks which reflection
* factory to use.
*/
public static final String REFLECT_DATA_FACTORY_CLASS = "crunch.reflectdatafactory";
/**
* @deprecated as of 0.9.0; use AvroMode.REFLECT.configure(Configuration)
*/
@Deprecated
public static void configureReflectDataFactory(Configuration conf) {
AvroMode.REFLECT.withFactory(REFLECT_DATA_FACTORY).configure(conf);
}
/**
* @deprecated as of 0.9.0; use AvroMode.fromConfiguration(conf)
*/
public static ReflectDataFactory getReflectDataFactory(Configuration conf) {
return (ReflectDataFactory)AvroMode.REFLECT.withFactoryFromConfiguration(conf).getFactory();
}
public static void checkCombiningSpecificAndReflectionSchemas() {
if (!CAN_COMBINE_SPECIFIC_AND_REFLECT_SCHEMAS) {
throw new IllegalStateException("Crunch does not support running jobs that"
+ " contain a mixture of reflection-based and avro-generated data types."
+ " Please consider turning your reflection-based type into an avro-generated"
+ " type and using that generated type instead."
+ " If the version of Avro you are using is 1.7.0 or greater, you can enable"
+ " combined schemas by setting the Avros.CAN_COMBINE_SPECIFIC_AND_REFLECT_SCHEMAS"
+ " field to 'true'.");
}
}
public static <T> DatumReader<T> newReader(Schema schema) {
return AvroMode.GENERIC.getReader(schema);
}
public static <T> DatumReader<T> newReader(AvroType<T> type) {
return AvroMode.fromType(type).getReader(type.getSchema());
}
public static <T> DatumWriter<T> newWriter(Schema schema) {
return AvroMode.GENERIC.getWriter(schema);
}
public static <T> DatumWriter<T> newWriter(AvroType<T> type) {
return AvroMode.fromType(type).getWriter(type.getSchema());
}
public static MapFn<CharSequence, String> UTF8_TO_STRING = new MapFn<CharSequence, String>() {
@Override
public String map(CharSequence input) {
if (input == null) {
return null;
}
return input.toString();
}
};
public static MapFn<String, Utf8> STRING_TO_UTF8 = new MapFn<String, Utf8>() {
@Override
public Utf8 map(String input) {
if (input == null) {
return null;
}
return new Utf8(input);
}
};
public static MapFn<Object, ByteBuffer> BYTES_IN = new MapFn<Object, ByteBuffer>() {
@Override
public ByteBuffer map(Object input) {
if (input == null) {
return null;
}
if (input instanceof ByteBuffer) {
return (ByteBuffer) input;
}
return ByteBuffer.wrap((byte[]) input);
}
};
private static final AvroType<String> strings = new AvroType<String>(String.class, Schema.create(Schema.Type.STRING),
UTF8_TO_STRING, STRING_TO_UTF8, NoOpDeepCopier.<String>create(), AvroType.AvroRecordType.GENERIC);
private static final AvroType<Void> nulls = create(Void.class, Schema.Type.NULL);
private static final AvroType<Long> longs = create(Long.class, Schema.Type.LONG);
private static final AvroType<Integer> ints = create(Integer.class, Schema.Type.INT);
private static final AvroType<Float> floats = create(Float.class, Schema.Type.FLOAT);
private static final AvroType<Double> doubles = create(Double.class, Schema.Type.DOUBLE);
private static final AvroType<Boolean> booleans = create(Boolean.class, Schema.Type.BOOLEAN);
private static final AvroType<ByteBuffer> bytes = new AvroType<ByteBuffer>(ByteBuffer.class,
Schema.create(Schema.Type.BYTES), BYTES_IN, IdentityFn.getInstance(),
AvroDeepCopier.AvroByteBufferDeepCopier.INSTANCE, AvroType.AvroRecordType.GENERIC);
private static final Map<Class<?>, PType<?>> PRIMITIVES = ImmutableMap.<Class<?>, PType<?>> builder()
.put(String.class, strings).put(Long.class, longs).put(Integer.class, ints).put(Float.class, floats)
.put(Double.class, doubles).put(Boolean.class, booleans).put(ByteBuffer.class, bytes).build();
private static final Map<Class<?>, AvroType<?>> EXTENSIONS = Maps.newHashMap();
public static <T> void register(Class<T> clazz, AvroType<T> ptype) {
EXTENSIONS.put(clazz, ptype);
}
public static <T> PType<T> getPrimitiveType(Class<T> clazz) {
return (PType<T>) PRIMITIVES.get(clazz);
}
static <T> boolean isPrimitive(AvroType<T> avroType) {
return avroType.getTypeClass().isPrimitive() || PRIMITIVES.containsKey(avroType.getTypeClass());
}
static <T> boolean isPrimitive(Class<T> typeClass) {
return typeClass.isPrimitive() || PRIMITIVES.containsKey(typeClass);
}
private static <T> AvroType<T> create(Class<T> clazz, Schema.Type schemaType) {
return new AvroType<T>(clazz, Schema.create(schemaType), NoOpDeepCopier.<T>create());
}
public static final AvroType<Void> nulls() {
return nulls;
}
public static final AvroType<String> strings() {
return strings;
}
public static final AvroType<Long> longs() {
return longs;
}
public static final AvroType<Integer> ints() {
return ints;
}
public static final AvroType<Float> floats() {
return floats;
}
public static final AvroType<Double> doubles() {
return doubles;
}
public static final AvroType<Boolean> booleans() {
return booleans;
}
public static final AvroType<ByteBuffer> bytes() {
return bytes;
}
public static final <T> AvroType<T> records(Class<T> clazz) {
if (EXTENSIONS.containsKey(clazz)) {
return (AvroType<T>) EXTENSIONS.get(clazz);
}
return containers(clazz);
}
public static final AvroType<GenericData.Record> generics(Schema schema) {
return new AvroType<GenericData.Record>(
GenericData.Record.class, schema, new AvroDeepCopier.AvroGenericDeepCopier(schema));
}
public static final <T> AvroType<T> containers(Class<T> clazz) {
if (SpecificRecord.class.isAssignableFrom(clazz)) {
return (AvroType<T>) specifics((Class<SpecificRecord>) clazz);
}
return reflects(clazz);
}
public static final <T extends SpecificRecord> AvroType<T> specifics(Class<T> clazz) {
AvroMode.registerSpecificClassLoaderInternal(clazz.getClassLoader());
T t = ReflectionUtils.newInstance(clazz, null);
Schema schema = t.getSchema();
return new AvroType<T>(clazz, schema, new AvroDeepCopier.AvroSpecificDeepCopier<T>(schema));
}
public static final <T> AvroType<T> reflects(Class<T> clazz) {
Schema schema = ((ReflectData) AvroMode.REFLECT.getData()).getSchema(clazz);
return reflects(clazz, schema);
}
public static final <T> AvroType<T> reflects(Class<T> clazz, Schema schema) {
AvroMode.registerSpecificClassLoaderInternal(clazz.getClassLoader());
return new AvroType<T>(clazz, schema, new AvroDeepCopier.AvroReflectDeepCopier<T>(schema));
}
private static class BytesToWritableMapFn<T extends Writable> extends MapFn<Object, T> {
private static final Logger LOG = LoggerFactory.getLogger(BytesToWritableMapFn.class);
private final Class<T> writableClazz;
public BytesToWritableMapFn(Class<T> writableClazz) {
this.writableClazz = writableClazz;
}
@Override
public T map(Object input) {
ByteBuffer byteBuffer = BYTES_IN.map(input);
T instance = ReflectionUtils.newInstance(writableClazz, null);
try {
instance.readFields(new DataInputStream(new ByteArrayInputStream(byteBuffer.array(),
byteBuffer.arrayOffset(), byteBuffer.limit())));
} catch (IOException e) {
LOG.error("Exception thrown reading instance of: {}", writableClazz, e);
}
return instance;
}
}
private static class WritableToBytesMapFn<T extends Writable> extends MapFn<T, ByteBuffer> {
private static final Logger LOG = LoggerFactory.getLogger(WritableToBytesMapFn.class);
@Override
public ByteBuffer map(T input) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream das = new DataOutputStream(baos);
try {
input.write(das);
} catch (IOException e) {
LOG.error("Exception thrown converting Writable to bytes", e);
}
return ByteBuffer.wrap(baos.toByteArray());
}
}
public static final <T extends Writable> AvroType<T> writables(Class<T> clazz) {
return new AvroType<T>(clazz, Schema.create(Schema.Type.BYTES), new BytesToWritableMapFn<T>(clazz),
new WritableToBytesMapFn<T>(), new WritableDeepCopier<T>(clazz), AvroType.AvroRecordType.GENERIC);
}
private static class GenericDataArrayToCollection<T> extends MapFn<Object, Collection<T>> {
private final MapFn<Object, T> mapFn;
public GenericDataArrayToCollection(MapFn<Object, T> mapFn) {
this.mapFn = mapFn;
}
@Override
public void configure(Configuration conf) {
mapFn.configure(conf);
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
mapFn.setContext(context);
}
@Override
public void initialize() {
mapFn.initialize();
}
@Override
public Collection<T> map(Object input) {
Collection<T> ret = Lists.newArrayList();
if (input instanceof Collection) {
for (Object in : (Collection<Object>) input) {
ret.add(mapFn.map(in));
}
} else {
// Assume it is an array
Object[] arr = (Object[]) input;
for (Object in : arr) {
ret.add(mapFn.map(in));
}
}
return ret;
}
}
private static class CollectionToGenericDataArray extends MapFn<Collection<?>, GenericData.Array<?>> {
private final MapFn mapFn;
private final String jsonSchema;
private transient Schema schema;
public CollectionToGenericDataArray(Schema schema, MapFn mapFn) {
this.mapFn = mapFn;
this.jsonSchema = schema.toString();
}
@Override
public void configure(Configuration conf) {
mapFn.configure(conf);
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
mapFn.setContext(context);
}
@Override
public void initialize() {
mapFn.initialize();
}
@Override
public GenericData.Array<?> map(Collection<?> input) {
if (schema == null) {
schema = new Schema.Parser().parse(jsonSchema);
}
GenericData.Array array = new GenericData.Array(input.size(), schema);
for (Object in : input) {
array.add(mapFn.map(in));
}
return array;
}
}
public static final <T> AvroType<Collection<T>> collections(PType<T> ptype) {
AvroType<T> avroType = (AvroType<T>) ptype;
Schema collectionSchema = Schema.createArray(allowNulls(avroType.getSchema()));
GenericDataArrayToCollection<T> input = new GenericDataArrayToCollection<T>(avroType.getInputMapFn());
CollectionToGenericDataArray output = new CollectionToGenericDataArray(collectionSchema, avroType.getOutputMapFn());
return new AvroType(Collection.class, collectionSchema, input, output, new CollectionDeepCopier<T>(ptype),
avroType.getRecordType(), ptype);
}
private static class AvroMapToMap<T> extends MapFn<Map<CharSequence, Object>, Map<String, T>> {
private final MapFn<Object, T> mapFn;
public AvroMapToMap(MapFn<Object, T> mapFn) {
this.mapFn = mapFn;
}
@Override
public void configure(Configuration conf) {
mapFn.configure(conf);
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
mapFn.setContext(context);
}
@Override
public void initialize() {
mapFn.initialize();
}
@Override
public Map<String, T> map(Map<CharSequence, Object> input) {
Map<String, T> out = Maps.newHashMap();
for (Map.Entry<CharSequence, Object> e : input.entrySet()) {
out.put(e.getKey().toString(), mapFn.map(e.getValue()));
}
return out;
}
}
private static class MapToAvroMap<T> extends MapFn<Map<String, T>, Map<Utf8, Object>> {
private final MapFn<T, Object> mapFn;
public MapToAvroMap(MapFn<T, Object> mapFn) {
this.mapFn = mapFn;
}
@Override
public void configure(Configuration conf) {
mapFn.configure(conf);
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
mapFn.setContext(context);
}
@Override
public void initialize() {
this.mapFn.initialize();
}
@Override
public Map<Utf8, Object> map(Map<String, T> input) {
Map<Utf8, Object> out = Maps.newHashMap();
for (Map.Entry<String, T> e : input.entrySet()) {
out.put(new Utf8(e.getKey()), mapFn.map(e.getValue()));
}
return out;
}
}
public static final <T> AvroType<Map<String, T>> maps(PType<T> ptype) {
AvroType<T> avroType = (AvroType<T>) ptype;
Schema mapSchema = Schema.createMap(allowNulls(avroType.getSchema()));
AvroMapToMap<T> inputFn = new AvroMapToMap<T>(avroType.getInputMapFn());
MapToAvroMap<T> outputFn = new MapToAvroMap<T>(avroType.getOutputMapFn());
return new AvroType(Map.class, mapSchema, inputFn, outputFn, new MapDeepCopier<T>(ptype),
avroType.getRecordType(), ptype);
}
private static class GenericRecordToTuple extends MapFn<GenericRecord, Tuple> {
private final TupleFactory<?> tupleFactory;
private final List<MapFn> fns;
public GenericRecordToTuple(TupleFactory<?> tupleFactory, PType<?>... ptypes) {
this.tupleFactory = tupleFactory;
this.fns = Lists.newArrayList();
for (PType<?> ptype : ptypes) {
AvroType atype = (AvroType) ptype;
fns.add(atype.getInputMapFn());
}
}
@Override
public void configure(Configuration conf) {
for (MapFn fn : fns) {
fn.configure(conf);
}
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
for (MapFn fn : fns) {
fn.setContext(context);
}
}
@Override
public void initialize() {
for (MapFn fn : fns) {
fn.initialize();
}
tupleFactory.initialize();
}
@Override
public Tuple map(GenericRecord input) {
Object[] values = new Object[fns.size()];
for (int i = 0; i < values.length; i++) {
Object v = input.get(i);
if (v == null) {
values[i] = null;
} else {
values[i] = fns.get(i).map(v);
}
}
return tupleFactory.makeTuple(values);
}
}
private static class TupleToGenericRecord extends MapFn<Tuple, GenericRecord> {
private final List<MapFn> fns;
private final List<AvroType> avroTypes;
private final String jsonSchema;
private final boolean isReflect;
private transient Schema schema;
private transient AvroMode mode;
public TupleToGenericRecord(Schema schema, PType<?>... ptypes) {
this.fns = Lists.newArrayList();
this.avroTypes = Lists.newArrayList();
this.jsonSchema = schema.toString();
boolean reflectFound = false;
boolean specificFound = false;
for (PType ptype : ptypes) {
AvroType atype = (AvroType) ptype;
fns.add(atype.getOutputMapFn());
avroTypes.add(atype);
if (atype.hasReflect()) {
reflectFound = true;
}
if (atype.hasSpecific()) {
specificFound = true;
}
}
if (specificFound && reflectFound) {
checkCombiningSpecificAndReflectionSchemas();
}
this.isReflect = reflectFound;
}
@Override
public void configure(Configuration conf) {
for (MapFn fn : fns) {
fn.configure(conf);
}
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
for (MapFn fn : fns) {
fn.setContext(context);
}
}
@Override
public void initialize() {
this.schema = new Schema.Parser().parse(jsonSchema);
for (MapFn fn : fns) {
fn.initialize();
}
if (getConfiguration() != null) {
mode = AvroMode.REFLECT.withFactoryFromConfiguration(getConfiguration());
} else {
mode = AvroMode.REFLECT;
}
}
private GenericRecord createRecord() {
if (isReflect) {
return new ReflectGenericRecord(schema, mode);
} else {
return new GenericData.Record(schema);
}
}
@Override
public GenericRecord map(Tuple input) {
GenericRecord record = createRecord();
for (int i = 0; i < input.size(); i++) {
Object v = input.get(i);
if (v == null) {
record.put(i, null);
} else {
record.put(i, fns.get(i).map(v));
}
}
return record;
}
}
public static final <V1, V2> AvroType<Pair<V1, V2>> pairs(PType<V1> p1, PType<V2> p2) {
Schema schema = createTupleSchema(p1, p2);
GenericRecordToTuple input = new GenericRecordToTuple(TupleFactory.PAIR, p1, p2);
TupleToGenericRecord output = new TupleToGenericRecord(schema, p1, p2);
return new AvroType(Pair.class, schema, input, output, new TupleDeepCopier(Pair.class, p1, p2), null, p1, p2);
}
public static final <V1, V2, V3> AvroType<Tuple3<V1, V2, V3>> triples(PType<V1> p1, PType<V2> p2, PType<V3> p3) {
Schema schema = createTupleSchema(p1, p2, p3);
return new AvroType(Tuple3.class, schema, new GenericRecordToTuple(TupleFactory.TUPLE3, p1, p2, p3),
new TupleToGenericRecord(schema, p1, p2, p3), new TupleDeepCopier(Tuple3.class, p1, p2, p3), null, p1, p2, p3);
}
public static final <V1, V2, V3, V4> AvroType<Tuple4<V1, V2, V3, V4>> quads(PType<V1> p1, PType<V2> p2, PType<V3> p3,
PType<V4> p4) {
Schema schema = createTupleSchema(p1, p2, p3, p4);
return new AvroType(Tuple4.class, schema, new GenericRecordToTuple(TupleFactory.TUPLE4, p1, p2, p3, p4),
new TupleToGenericRecord(schema, p1, p2, p3, p4), new TupleDeepCopier(Tuple4.class, p1, p2, p3, p4), null,
p1, p2, p3, p4);
}
public static final AvroType<TupleN> tuples(PType... ptypes) {
Schema schema = createTupleSchema(ptypes);
return new AvroType(TupleN.class, schema, new GenericRecordToTuple(TupleFactory.TUPLEN, ptypes),
new TupleToGenericRecord(schema, ptypes), new TupleDeepCopier(TupleN.class, ptypes), null, ptypes);
}
public static final AvroType<TupleN> namedTuples(String tupleName, String[] fieldNames, PType[] ptypes) {
Preconditions.checkArgument(fieldNames.length == ptypes.length,
"Number of field names must match number of ptypes");
Schema schema = createTupleSchema(tupleName, fieldNames, ptypes);
return new AvroType(TupleN.class, schema, new GenericRecordToTuple(TupleFactory.TUPLEN, ptypes),
new TupleToGenericRecord(schema, ptypes), new TupleDeepCopier(TupleN.class, ptypes), null, ptypes);
}
public static <T extends Tuple> AvroType<T> tuples(Class<T> clazz, PType... ptypes) {
Schema schema = createTupleSchema(ptypes);
Class[] typeArgs = new Class[ptypes.length];
for (int i = 0; i < typeArgs.length; i++) {
typeArgs[i] = ptypes[i].getTypeClass();
}
TupleFactory<T> factory = TupleFactory.create(clazz, typeArgs);
return new AvroType<T>(clazz, schema, new GenericRecordToTuple(factory, ptypes), new TupleToGenericRecord(schema,
ptypes), new TupleDeepCopier(clazz, ptypes), null, ptypes);
}
private static class UnionRecordToTuple extends MapFn<GenericRecord, Union> {
private final List<MapFn> fns;
public UnionRecordToTuple(PType<?>... ptypes) {
this.fns = Lists.newArrayList();
for (PType<?> ptype : ptypes) {
AvroType atype = (AvroType) ptype;
fns.add(atype.getInputMapFn());
}
}
@Override
public void configure(Configuration conf) {
for (MapFn fn : fns) {
fn.configure(conf);
}
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
for (MapFn fn : fns) {
fn.setContext(context);
}
}
@Override
public void initialize() {
for (MapFn fn : fns) {
fn.initialize();
}
}
@Override
public Union map(GenericRecord input) {
int index = (Integer) input.get(0);
return new Union(index, fns.get(index).map(input.get(1)));
}
}
private static class TupleToUnionRecord extends MapFn<Union, GenericRecord> {
private final List<MapFn> fns;
private final List<AvroType> avroTypes;
private final String jsonSchema;
private final boolean isReflect;
private transient Schema schema;
private transient AvroMode mode;
public TupleToUnionRecord(Schema schema, PType<?>... ptypes) {
this.fns = Lists.newArrayList();
this.avroTypes = Lists.newArrayList();
this.jsonSchema = schema.toString();
boolean reflectFound = false;
boolean specificFound = false;
for (PType ptype : ptypes) {
AvroType atype = (AvroType) ptype;
fns.add(atype.getOutputMapFn());
avroTypes.add(atype);
if (atype.hasReflect()) {
reflectFound = true;
}
if (atype.hasSpecific()) {
specificFound = true;
}
}
if (specificFound && reflectFound) {
checkCombiningSpecificAndReflectionSchemas();
}
this.isReflect = reflectFound;
}
@Override
public void configure(Configuration conf) {
for (MapFn fn : fns) {
fn.configure(conf);
}
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
for (MapFn fn : fns) {
fn.setContext(context);
}
}
@Override
public void initialize() {
this.schema = new Schema.Parser().parse(jsonSchema);
for (MapFn fn : fns) {
fn.initialize();
}
if (getConfiguration() != null) {
mode = AvroMode.REFLECT.withFactoryFromConfiguration(getConfiguration());
} else {
mode = AvroMode.REFLECT;
}
}
private GenericRecord createRecord() {
if (isReflect) {
return new ReflectGenericRecord(schema, mode);
} else {
return new GenericData.Record(schema);
}
}
@Override
public GenericRecord map(Union input) {
GenericRecord record = createRecord();
int index = input.getIndex();
record.put(0, index);
record.put(1, fns.get(index).map(input.getValue()));
return record;
}
}
public static PType<Union> unionOf(PType<?>... ptypes) {
List<Schema> schemas = Lists.newArrayList();
MessageDigest md;
try {
md = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
for (int i = 0; i < ptypes.length; i++) {
AvroType atype = (AvroType) ptypes[i];
Schema schema = atype.getSchema();
if (!schemas.contains(schema)) {
schemas.add(schema);
md.update(schema.toString().getBytes(Charsets.UTF_8));
}
}
List<Schema.Field> fields = Lists.newArrayList(
new Schema.Field("index", Schema.create(Type.INT), "", null),
new Schema.Field("value", Schema.createUnion(schemas), "", null));
String schemaName = "union" + Base64.encodeBase64URLSafeString(md.digest()).replace('-', 'x');
Schema schema = Schema.createRecord(schemaName, "", "crunch", false);
schema.setFields(fields);
return new AvroType<Union>(Union.class, schema, new UnionRecordToTuple(ptypes),
new TupleToUnionRecord(schema, ptypes), new UnionDeepCopier(ptypes), null, ptypes);
}
private static String[] fieldNames(int len) {
String[] ret = new String[len];
for (int i = 0; i < ret.length; i++) {
ret[i]= "v" + i;
}
return ret;
}
private static Schema createTupleSchema(PType<?>... ptypes) throws RuntimeException {
return createTupleSchema("", fieldNames(ptypes.length), ptypes);
}
private static Schema createTupleSchema(String tupleName, String[] fieldNames, PType<?>[] ptypes) throws RuntimeException {
// Guarantee each tuple schema has a globally unique name
List<Schema.Field> fields = Lists.newArrayList();
MessageDigest md;
try {
md = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
for (int i = 0; i < ptypes.length; i++) {
AvroType atype = (AvroType) ptypes[i];
Schema fieldSchema = allowNulls(atype.getSchema());
fields.add(new Schema.Field(fieldNames[i], fieldSchema, "", null));
md.update(fieldNames[i].getBytes(Charsets.UTF_8));
md.update(fieldSchema.toString().getBytes(Charsets.UTF_8));
}
String schemaName, schemaNamespace;
if (tupleName.isEmpty()) {
schemaName = "tuple" + Base64.encodeBase64URLSafeString(md.digest()).replace('-', 'x');
schemaNamespace = "crunch";
} else {
int splitIndex = tupleName.lastIndexOf('.');
if (splitIndex == -1) {
schemaName = tupleName;
schemaNamespace = "crunch";
} else {
schemaName = tupleName.substring(splitIndex + 1);
schemaNamespace = tupleName.substring(0, splitIndex);
}
}
Schema schema = Schema.createRecord(schemaName, "", schemaNamespace, false);
schema.setFields(fields);
return schema;
}
public static final <S, T> AvroType<T> derived(Class<T> clazz, MapFn<S, T> inputFn, MapFn<T, S> outputFn,
PType<S> base) {
AvroType<S> abase = (AvroType<S>) base;
return new AvroType<T>(clazz, abase.getSchema(), new CompositeMapFn(abase.getInputMapFn(), inputFn),
new CompositeMapFn(outputFn, abase.getOutputMapFn()), new AvroDerivedValueDeepCopier(outputFn, inputFn, abase),
abase.getRecordType(), base.getSubTypes().toArray(new PType[0]));
}
public static final <S, T> AvroType<T> derivedImmutable(Class<T> clazz, MapFn<S, T> inputFn, MapFn<T, S> outputFn,
PType<S> base) {
AvroType<S> abase = (AvroType<S>) base;
return new AvroType<T>(clazz, abase.getSchema(), new CompositeMapFn(abase.getInputMapFn(), inputFn),
new CompositeMapFn(outputFn, abase.getOutputMapFn()), NoOpDeepCopier.<T>create(), abase.getRecordType(),
base.getSubTypes().toArray(new PType[0]));
}
public static <T> PType<T> jsons(Class<T> clazz) {
return PTypes.jsonString(clazz, AvroTypeFamily.getInstance());
}
/**
* A table type with an Avro type as key and as value.
* <p/>
* The {code PTableType} returned by this method is also compatible with files containing Avro {@code Pair}s that
* are created using the {@code org.apache.avro.mapred.AvroJob} class.
*
* @param key the PType of the key in the table
* @param value the PType of the value in the table
* @return PTableType for reading and writing avro tables
*/
public static final <K, V> AvroTableType<K, V> tableOf(PType<K> key, PType<V> value) {
if (key instanceof PTableType) {
PTableType ptt = (PTableType) key;
key = Avros.pairs(ptt.getKeyType(), ptt.getValueType());
}
if (value instanceof PTableType) {
PTableType ptt = (PTableType) value;
value = Avros.pairs(ptt.getKeyType(), ptt.getValueType());
}
AvroType<K> avroKey = (AvroType<K>) key;
AvroType<V> avroValue = (AvroType<V>) value;
return new AvroTableType(avroKey, avroValue, Pair.class);
}
/**
* A table type with an Avro type as key and value. The {@code PTableType} returned by this method is specifically
* for reading and writing files that are compatible with those created via the
* {@code org.apache.avro.mapreduce.AvroJob} class. For all other Avro table purposes, the
* {@link #tableOf(org.apache.crunch.types.PType, org.apache.crunch.types.PType)} method should be used.
*
* @param key the PType of the key in the table
* @param value the PType of the value in the table
* @return PTableType for reading and writing files compatible with those created via
* the {@code org.apache.avro.mapreduce.AvroJob} class
*/
public static final <K, V> AvroKeyValueTableType<K, V> keyValueTableOf(PType<K> key, PType<V> value) {
AvroType<K> avroKey = (AvroType<K>) key;
AvroType<V> avroValue = (AvroType<V>) value;
return new AvroKeyValueTableType<K, V>(avroKey, avroValue,
// Casting this to class is an unfortunately little way to get the generics out of the way here
(Class)Pair.class);
}
private static final Schema NULL_SCHEMA = Schema.create(Type.NULL);
static Schema allowNulls(Schema base) {
if (NULL_SCHEMA.equals(base)) {
return base;
} else if (base.getType() == Type.UNION) {
List<Schema> types = Lists.newArrayList();
boolean hasNull = false;
for (Schema s : base.getTypes()) {
if (s.getType() == Schema.Type.NULL) {
hasNull = true;
}
types.add(s);
}
if (hasNull) {
return base;
} else {
types.add(Schema.create(Schema.Type.NULL));
return Schema.createUnion(types);
}
} else {
return Schema.createUnion(ImmutableList.of(base, NULL_SCHEMA));
}
}
private static class ReflectGenericRecord extends GenericData.Record {
private AvroMode mode;
public ReflectGenericRecord(Schema schema, AvroMode mode) {
super(schema);
this.mode = mode;
}
@Override
public int hashCode() {
return reflectAwareHashCode(this, getSchema(), mode);
}
}
/*
* TODO: Remove this once we no longer have to support 1.5.4.
*/
private static int reflectAwareHashCode(Object o, Schema s, AvroMode mode) {
if (o == null)
return 0; // incomplete datum
int hashCode = 1;
switch (s.getType()) {
case RECORD:
for (Schema.Field f : s.getFields()) {
if (f.order() == Schema.Field.Order.IGNORE)
continue;
hashCode = hashCodeAdd(hashCode, mode.getData().getField(o, f.name(), f.pos()), f.schema(), mode);
}
return hashCode;
case ARRAY:
Collection<?> a = (Collection<?>) o;
Schema elementType = s.getElementType();
for (Object e : a)
hashCode = hashCodeAdd(hashCode, e, elementType, mode);
return hashCode;
case UNION:
return reflectAwareHashCode(o, s.getTypes().get(mode.getData().resolveUnion(s, o)), mode);
case ENUM:
return s.getEnumOrdinal(o.toString());
case NULL:
return 0;
case STRING:
return (o instanceof Utf8 ? o : new Utf8(o.toString())).hashCode();
default:
return o.hashCode();
}
}
/** Add the hash code for an object into an accumulated hash code. */
private static int hashCodeAdd(int hashCode, Object o, Schema s, AvroMode mode) {
return 31 * hashCode + reflectAwareHashCode(o, s, mode);
}
private Avros() {
}
}
| 2,777 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/AvroKeyValueTableType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.hadoop.io.AvroKeyValue;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.crunch.lib.PTables;
import org.apache.crunch.types.PGroupedTableType;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.TupleDeepCopier;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
/**
* A {@code PTableType} that is compatible with Avro key/value files that are created or read using the
* {@code org.apache.avro.mapreduce.AvroJob} class.
*/
class AvroKeyValueTableType<K, V> extends BaseAvroTableType<K, V> implements PTableType<K, V> {
private static class PairToAvroKeyValueRecord extends MapFn<Pair, GenericRecord> {
private final MapFn keyMapFn;
private final MapFn valueMapFn;
private final String keySchemaJson;
private final String valueSchemaJson;
private String keyValueSchemaJson;
private transient Schema keyValueSchema;
public PairToAvroKeyValueRecord(AvroType keyType, AvroType valueType) {
this.keyMapFn = keyType.getOutputMapFn();
this.keySchemaJson = keyType.getSchema().toString();
this.valueMapFn = valueType.getOutputMapFn();
this.valueSchemaJson = valueType.getSchema().toString();
}
@Override
public void configure(Configuration conf) {
keyMapFn.configure(conf);
valueMapFn.configure(conf);
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
keyMapFn.setContext(context);
valueMapFn.setContext(context);
}
@Override
public void initialize() {
keyMapFn.initialize();
valueMapFn.initialize();
Schema.Parser parser = new Schema.Parser();
keyValueSchemaJson = AvroKeyValue.getSchema(parser.parse(keySchemaJson), parser.parse(valueSchemaJson)).toString();
}
@Override
public GenericRecord map(Pair input) {
if (keyValueSchema == null) {
keyValueSchema = new Schema.Parser().parse(keyValueSchemaJson);
}
GenericRecord keyValueRecord = new GenericData.Record(keyValueSchema);
keyValueRecord.put(AvroKeyValue.KEY_FIELD, keyMapFn.map(input.first()));
keyValueRecord.put(AvroKeyValue.VALUE_FIELD, valueMapFn.map(input.second()));
return keyValueRecord;
}
}
private static class AvroKeyValueRecordToPair extends MapFn<GenericRecord, Pair> {
private final MapFn firstMapFn;
private final MapFn secondMapFn;
public AvroKeyValueRecordToPair(MapFn firstMapFn, MapFn secondMapFn) {
this.firstMapFn = firstMapFn;
this.secondMapFn = secondMapFn;
}
@Override
public void configure(Configuration conf) {
firstMapFn.configure(conf);
secondMapFn.configure(conf);
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
firstMapFn.setContext(context);
secondMapFn.setContext(context);
}
@Override
public void initialize() {
firstMapFn.initialize();
secondMapFn.initialize();
}
@Override
public Pair map(GenericRecord input) {
return Pair.of(
firstMapFn.map(input.get(AvroKeyValue.KEY_FIELD)),
secondMapFn.map(input.get(AvroKeyValue.VALUE_FIELD)));
}
}
private final AvroType<K> keyType;
private final AvroType<V> valueType;
public AvroKeyValueTableType(AvroType<K> keyType, AvroType<V> valueType, Class<Pair<K, V>> pairClass) {
super(pairClass, AvroKeyValue.getSchema(keyType.getSchema(), valueType.getSchema()),
new AvroKeyValueRecordToPair(keyType.getInputMapFn(), valueType.getInputMapFn()),
new PairToAvroKeyValueRecord(keyType, valueType),
new TupleDeepCopier(Pair.class, keyType, valueType),
null, keyType, valueType);
this.keyType = keyType;
this.valueType = valueType;
}
@Override
public PType<K> getKeyType() {
return keyType;
}
@Override
public PType<V> getValueType() {
return valueType;
}
@Override
public PGroupedTableType<K, V> getGroupedTableType() {
return new AvroGroupedTableType<K, V>(this);
}
@Override
public Pair<K, V> getDetachedValue(Pair<K, V> value) {
return PTables.getDetachedValue(this, value);
}
}
| 2,778 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/AvroPairConverter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import java.util.Iterator;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.crunch.Pair;
import org.apache.crunch.types.Converter;
class AvroPairConverter<K, V> implements Converter<AvroKey<K>, AvroValue<V>, Pair<K, V>, Pair<K, Iterable<V>>> {
private transient AvroKey<K> keyWrapper = null;
private transient AvroValue<V> valueWrapper = null;
@Override
public Pair<K, V> convertInput(AvroKey<K> key, AvroValue<V> value) {
return Pair.of(key.datum(), value.datum());
}
public Pair<K, Iterable<V>> convertIterableInput(AvroKey<K> key, Iterable<AvroValue<V>> iter) {
Iterable<V> it = new AvroWrappedIterable<V>(iter);
return Pair.of(key.datum(), it);
}
@Override
public AvroKey<K> outputKey(Pair<K, V> value) {
getKeyWrapper().datum(value.first());
return keyWrapper;
}
@Override
public AvroValue<V> outputValue(Pair<K, V> value) {
getValueWrapper().datum(value.second());
return valueWrapper;
}
@Override
public Class<AvroKey<K>> getKeyClass() {
return (Class<AvroKey<K>>) getKeyWrapper().getClass();
}
@Override
public Class<AvroValue<V>> getValueClass() {
return (Class<AvroValue<V>>) getValueWrapper().getClass();
}
@Override
public boolean applyPTypeTransforms() {
return true;
}
private AvroKey<K> getKeyWrapper() {
if (keyWrapper == null) {
keyWrapper = new AvroKey<K>();
}
return keyWrapper;
}
private AvroValue<V> getValueWrapper() {
if (valueWrapper == null) {
valueWrapper = new AvroValue<V>();
}
return valueWrapper;
}
private static class AvroWrappedIterable<V> implements Iterable<V> {
private final Iterable<AvroValue<V>> iters;
public AvroWrappedIterable(Iterable<AvroValue<V>> iters) {
this.iters = iters;
}
@Override
public Iterator<V> iterator() {
return new Iterator<V>() {
private final Iterator<AvroValue<V>> it = iters.iterator();
@Override
public boolean hasNext() {
return it.hasNext();
}
@Override
public V next() {
return it.next().datum();
}
@Override
public void remove() {
it.remove();
}
};
}
}
}
| 2,779 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/package-info.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Business object serialization using Apache Avro.
*/
package org.apache.crunch.types.avro;
| 2,780 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/types/avro/BaseAvroTableType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.avro;
import org.apache.avro.Schema;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.crunch.types.DeepCopier;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
/**
* Base type for dealing with PTables with Avro keys and values.
*/
abstract class BaseAvroTableType<K, V> extends AvroType<Pair<K, V>> implements PTableType<K, V> {
protected BaseAvroTableType(Class<Pair<K, V>> typeClass, Schema schema, MapFn inputMapFn, MapFn outputMapFn,
DeepCopier<Pair<K, V>> deepCopier, AvroRecordType recordType, PType... ptypes) {
super(typeClass, schema, inputMapFn, outputMapFn, deepCopier, recordType, ptypes);
}
}
| 2,781 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/util/CrunchRenameCopyListing.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the
* Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package org.apache.crunch.util;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.tools.CopyListing;
import org.apache.hadoop.tools.CopyListingFileStatus;
import org.apache.hadoop.tools.DistCpOptions;
import org.apache.hadoop.tools.DistCpOptions.FileAttribute;
import org.apache.hadoop.tools.SimpleCopyListing;
import org.apache.hadoop.tools.util.DistCpUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Stack;
/**
* A custom {@link CopyListing} implementation capable of dynamically renaming
* the target paths according to a {@link #DISTCP_PATH_RENAMES configured set of values}.
* <p>
* Once https://issues.apache.org/jira/browse/HADOOP-16147 is available, this
* class can be significantly simplified.
* </p>
*/
public class CrunchRenameCopyListing extends SimpleCopyListing {
/**
* Comma-separated list of original-file:renamed-file path rename pairs.
*/
public static final String DISTCP_PATH_RENAMES = "crunch.distcp.path.renames";
private static final Logger LOG = LoggerFactory.getLogger(CrunchRenameCopyListing.class);
private final Map<String, String> pathRenames;
private long totalPaths = 0;
private long totalBytesToCopy = 0;
/**
* Constructor, to initialize configuration.
*
* @param configuration The input configuration, with which the source/target FileSystems may be accessed.
* @param credentials - Credentials object on which the FS delegation tokens are cached. If null
* delegation token caching is skipped
*/
public CrunchRenameCopyListing(Configuration configuration, Credentials credentials) {
super(configuration, credentials);
pathRenames = new HashMap<>();
String[] pathRenameConf = configuration.getStrings(DISTCP_PATH_RENAMES);
if (pathRenameConf == null) {
throw new IllegalArgumentException("Missing required configuration: " + DISTCP_PATH_RENAMES);
}
for (String pathRename : pathRenameConf) {
String[] pathRenameParts = pathRename.split(":");
if (pathRenameParts.length != 2) {
throw new IllegalArgumentException("Invalid path rename format: " + pathRename);
}
if (pathRenames.put(pathRenameParts[0], pathRenameParts[1]) != null) {
throw new IllegalArgumentException("Invalid duplicate path rename: " + pathRenameParts[0]);
}
}
LOG.info("Loaded {} path rename entries", pathRenames.size());
// Clear out the rename configuration property, as it is no longer needed
configuration.unset(DISTCP_PATH_RENAMES);
}
@Override
public void doBuildListing(SequenceFile.Writer fileListWriter, DistCpOptions options) throws IOException {
try {
for (Path path : options.getSourcePaths()) {
FileSystem sourceFS = path.getFileSystem(getConf());
final boolean preserveAcls = options.shouldPreserve(FileAttribute.ACL);
final boolean preserveXAttrs = options.shouldPreserve(FileAttribute.XATTR);
final boolean preserveRawXAttrs = options.shouldPreserveRawXattrs();
path = makeQualified(path);
FileStatus rootStatus = sourceFS.getFileStatus(path);
Path sourcePathRoot = computeSourceRootPath(rootStatus, options);
FileStatus[] sourceFiles = sourceFS.listStatus(path);
boolean explore = (sourceFiles != null && sourceFiles.length > 0);
if (!explore || rootStatus.isDirectory()) {
CopyListingFileStatus rootCopyListingStatus = DistCpUtils.toCopyListingFileStatus(sourceFS, rootStatus, preserveAcls,
preserveXAttrs, preserveRawXAttrs);
writeToFileListingRoot(fileListWriter, rootCopyListingStatus, sourcePathRoot, options);
}
if (explore) {
for (FileStatus sourceStatus : sourceFiles) {
if (LOG.isDebugEnabled()) {
LOG.debug("Recording source-path: {} for copy.", sourceStatus.getPath());
}
CopyListingFileStatus sourceCopyListingStatus = DistCpUtils.toCopyListingFileStatus(sourceFS, sourceStatus,
preserveAcls && sourceStatus.isDirectory(), preserveXAttrs && sourceStatus.isDirectory(),
preserveRawXAttrs && sourceStatus.isDirectory());
writeToFileListing(fileListWriter, sourceCopyListingStatus, sourcePathRoot, options);
if (isDirectoryAndNotEmpty(sourceFS, sourceStatus)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Traversing non-empty source dir: {}", sourceStatus.getPath());
}
traverseNonEmptyDirectory(fileListWriter, sourceStatus, sourcePathRoot, options);
}
}
}
}
fileListWriter.close();
fileListWriter = null;
} finally {
if (fileListWriter != null) {
try {
fileListWriter.close();
} catch(IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Exception in closing {}", fileListWriter, e);
}
}
}
}
}
private Path computeSourceRootPath(FileStatus sourceStatus, DistCpOptions options) throws IOException {
Path target = options.getTargetPath();
FileSystem targetFS = target.getFileSystem(getConf());
final boolean targetPathExists = options.getTargetPathExists();
boolean solitaryFile = options.getSourcePaths().size() == 1 && !sourceStatus.isDirectory();
if (solitaryFile) {
if (targetFS.isFile(target) || !targetPathExists) {
return sourceStatus.getPath();
} else {
return sourceStatus.getPath().getParent();
}
} else {
boolean specialHandling =
(options.getSourcePaths().size() == 1 && !targetPathExists) || options.shouldSyncFolder() || options.shouldOverwrite();
return specialHandling && sourceStatus.isDirectory() ? sourceStatus.getPath() : sourceStatus.getPath().getParent();
}
}
private Path makeQualified(Path path) throws IOException {
final FileSystem fs = path.getFileSystem(getConf());
return path.makeQualified(fs.getUri(), fs.getWorkingDirectory());
}
private static boolean isDirectoryAndNotEmpty(FileSystem fileSystem, FileStatus fileStatus) throws IOException {
return fileStatus.isDirectory() && getChildren(fileSystem, fileStatus).length > 0;
}
private static FileStatus[] getChildren(FileSystem fileSystem, FileStatus parent) throws IOException {
return fileSystem.listStatus(parent.getPath());
}
private void traverseNonEmptyDirectory(SequenceFile.Writer fileListWriter, FileStatus sourceStatus, Path sourcePathRoot,
DistCpOptions options) throws IOException {
FileSystem sourceFS = sourcePathRoot.getFileSystem(getConf());
final boolean preserveAcls = options.shouldPreserve(FileAttribute.ACL);
final boolean preserveXAttrs = options.shouldPreserve(FileAttribute.XATTR);
final boolean preserveRawXattrs = options.shouldPreserveRawXattrs();
Stack<FileStatus> pathStack = new Stack<>();
pathStack.push(sourceStatus);
while (!pathStack.isEmpty()) {
for (FileStatus child : getChildren(sourceFS, pathStack.pop())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Recording source-path: {} for copy.", sourceStatus.getPath());
}
CopyListingFileStatus childCopyListingStatus = DistCpUtils.toCopyListingFileStatus(sourceFS, child,
preserveAcls && child.isDirectory(), preserveXAttrs && child.isDirectory(), preserveRawXattrs && child.isDirectory());
writeToFileListing(fileListWriter, childCopyListingStatus, sourcePathRoot, options);
if (isDirectoryAndNotEmpty(sourceFS, child)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Traversing non-empty source dir: {}", sourceStatus.getPath());
}
pathStack.push(child);
}
}
}
}
private void writeToFileListingRoot(SequenceFile.Writer fileListWriter, CopyListingFileStatus fileStatus, Path sourcePathRoot,
DistCpOptions options) throws IOException {
boolean syncOrOverwrite = options.shouldSyncFolder() || options.shouldOverwrite();
if (fileStatus.getPath().equals(sourcePathRoot) && fileStatus.isDirectory() && syncOrOverwrite) {
// Skip the root-paths when syncOrOverwrite
if (LOG.isDebugEnabled()) {
LOG.debug("Skip {}", fileStatus.getPath());
}
return;
}
writeToFileListing(fileListWriter, fileStatus, sourcePathRoot, options);
}
private void writeToFileListing(SequenceFile.Writer fileListWriter, CopyListingFileStatus fileStatus, Path sourcePathRoot,
DistCpOptions options) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("REL PATH: {}, FULL PATH: {}",
DistCpUtils.getRelativePath(sourcePathRoot, fileStatus.getPath()), fileStatus.getPath());
}
if (!shouldCopy(fileStatus.getPath())) {
return;
}
fileListWriter.append(getFileListingKey(sourcePathRoot, fileStatus),
getFileListingValue(fileStatus));
fileListWriter.sync();
if (!fileStatus.isDirectory()) {
totalBytesToCopy += fileStatus.getLen();
}
totalPaths++;
}
/**
* Returns the key for an entry in the copy listing sequence file
* @param sourcePathRoot the root source path for determining the relative target path
* @param fileStatus the copy listing file status
* @return the key for the sequence file entry
*/
protected Text getFileListingKey(Path sourcePathRoot, CopyListingFileStatus fileStatus) {
Path fileStatusPath = fileStatus.getPath();
String pathName = fileStatusPath.getName();
String renamedPathName = pathRenames.get(pathName);
if (renamedPathName != null && !pathName.equals(renamedPathName)) {
LOG.info("Applying dynamic rename of {} to {}", pathName, renamedPathName);
fileStatusPath = new Path(fileStatusPath.getParent(), renamedPathName);
}
return new Text(DistCpUtils.getRelativePath(sourcePathRoot, fileStatusPath));
}
/**
* Returns the value for an entry in the copy listing sequence file
* @param fileStatus the copy listing file status
* @return the value for the sequence file entry
*/
protected CopyListingFileStatus getFileListingValue(CopyListingFileStatus fileStatus) {
return fileStatus;
}
@Override
protected long getBytesToCopy() {
return totalBytesToCopy;
}
@Override
protected long getNumberOfPaths() {
return totalPaths;
}
}
| 2,782 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/util/PartitionUtils.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.util;
import com.google.common.base.Preconditions;
import org.apache.crunch.PCollection;
import org.apache.hadoop.conf.Configuration;
/**
* Helper functions and settings for determining the number of reducers to use in a pipeline
* job created by the Crunch planner.
*/
public class PartitionUtils {
public static final String BYTES_PER_REDUCE_TASK = "crunch.bytes.per.reduce.task";
public static final long DEFAULT_BYTES_PER_REDUCE_TASK = 1000L * 1000L * 1000L;
/**
* Set an upper limit on the number of reducers the Crunch planner will set for an MR
* job when it tries to determine how many reducers to use based on the input size.
*/
public static final String MAX_REDUCERS = "crunch.max.reducers";
public static final int DEFAULT_MAX_REDUCERS = 500;
public static <T> int getRecommendedPartitions(PCollection<T> pcollection) {
Configuration conf = pcollection.getPipeline().getConfiguration();
return getRecommendedPartitions(pcollection, conf);
}
public static <T> int getRecommendedPartitions(PCollection<T> pcollection, Configuration conf) {
long bytesPerTask = conf.getLong(BYTES_PER_REDUCE_TASK, DEFAULT_BYTES_PER_REDUCE_TASK);
Preconditions.checkArgument(bytesPerTask > 0);
int recommended = 1 + (int) (pcollection.getSize() / bytesPerTask);
int maxRecommended = conf.getInt(MAX_REDUCERS, DEFAULT_MAX_REDUCERS);
if (maxRecommended > 0 && recommended > maxRecommended) {
return maxRecommended;
} else {
return recommended;
}
}
}
| 2,783 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/util/DoFnIterator.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.util;
import com.google.common.collect.Lists;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
/**
* An {@code Iterator<T>} that combines a delegate {@code Iterator<S>} and a {@code DoFn<S, T>}, generating
* data by passing the contents of the iterator through the function. Note that the input {@code DoFn} should
* have both its {@code setContext} and {@code initialize} functions called <b>before</b> it is passed to
* the constructor.
*
* @param <S> The type of the delegate iterator
* @param <T> The returned type
*/
public class DoFnIterator<S, T> implements Iterator<T> {
private final Iterator<S> iter;
private final DoFn<S, T> fn;
private CacheEmitter<T> cache;
private boolean cleanup;
public DoFnIterator(Iterator<S> iter, DoFn<S, T> fn) {
this.iter = iter;
this.fn = fn;
this.cache = new CacheEmitter<T>();
this.cleanup = false;
}
@Override
public boolean hasNext() {
while (cache.isEmpty() && iter.hasNext()) {
fn.process(iter.next(), cache);
}
if (cache.isEmpty() && !cleanup) {
fn.cleanup(cache);
cleanup = true;
}
return !cache.isEmpty();
}
@Override
public T next() {
return cache.poll();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
private static class CacheEmitter<T> implements Emitter<T> {
private final LinkedList<T> cache;
private CacheEmitter() {
this.cache = Lists.newLinkedList();
}
public synchronized boolean isEmpty() {
return cache.isEmpty();
}
public synchronized T poll() {
return cache.poll();
}
@Override
public synchronized void emit(T emitted) {
cache.add(emitted);
}
@Override
public void flush() {
// No-op
}
}
}
| 2,784 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/util/ClassloaderFallbackObjectInputStream.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.util;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInputStream;
import java.io.ObjectStreamClass;
/**
* A custom {@link ObjectInputStream} that falls back to the thread context classloader
* if the class can't be found with the usual classloader that {@link
* ObjectInputStream} uses. This is needed when running in the Scala REPL.
* See https://issues.scala-lang.org/browse/SI-2403.
*/
public class ClassloaderFallbackObjectInputStream extends ObjectInputStream {
public ClassloaderFallbackObjectInputStream(InputStream in) throws IOException {
super(in);
}
@Override
protected Class<?> resolveClass(ObjectStreamClass desc) throws IOException,
ClassNotFoundException {
try {
return super.resolveClass(desc);
} catch (ClassNotFoundException e) {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
return Class.forName(desc.getName(), false, cl);
}
}
}
| 2,785 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/util/Tuples.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.util;
import java.util.Iterator;
import java.util.List;
import org.apache.crunch.Pair;
import org.apache.crunch.Tuple3;
import org.apache.crunch.Tuple4;
import org.apache.crunch.TupleN;
import com.google.common.collect.Lists;
import com.google.common.collect.UnmodifiableIterator;
/**
* Utilities for working with subclasses of the {@code Tuple} interface.
*
*/
public class Tuples {
private static abstract class TuplifyIterator<T> extends UnmodifiableIterator<T> {
protected List<Iterator<?>> iterators;
public TuplifyIterator(Iterator<?>... iterators) {
this.iterators = Lists.newArrayList(iterators);
}
@Override
public boolean hasNext() {
for (Iterator<?> iter : iterators) {
if (!iter.hasNext()) {
return false;
}
}
return true;
}
protected Object next(int index) {
return iterators.get(index).next();
}
}
public static class PairIterable<S, T> implements Iterable<Pair<S, T>> {
private final Iterable<S> first;
private final Iterable<T> second;
public PairIterable(Iterable<S> first, Iterable<T> second) {
this.first = first;
this.second = second;
}
@Override
public Iterator<Pair<S, T>> iterator() {
return new TuplifyIterator<Pair<S, T>>(first.iterator(), second.iterator()) {
@Override
public Pair<S, T> next() {
return Pair.of((S) next(0), (T) next(1));
}
};
}
}
public static class TripIterable<A, B, C> implements Iterable<Tuple3<A, B, C>> {
private final Iterable<A> first;
private final Iterable<B> second;
private final Iterable<C> third;
public TripIterable(Iterable<A> first, Iterable<B> second, Iterable<C> third) {
this.first = first;
this.second = second;
this.third = third;
}
@Override
public Iterator<Tuple3<A, B, C>> iterator() {
return new TuplifyIterator<Tuple3<A, B, C>>(first.iterator(), second.iterator(), third.iterator()) {
@Override
public Tuple3<A, B, C> next() {
return new Tuple3<A, B, C>((A) next(0), (B) next(1), (C) next(2));
}
};
}
}
public static class QuadIterable<A, B, C, D> implements Iterable<Tuple4<A, B, C, D>> {
private final Iterable<A> first;
private final Iterable<B> second;
private final Iterable<C> third;
private final Iterable<D> fourth;
public QuadIterable(Iterable<A> first, Iterable<B> second, Iterable<C> third, Iterable<D> fourth) {
this.first = first;
this.second = second;
this.third = third;
this.fourth = fourth;
}
@Override
public Iterator<Tuple4<A, B, C, D>> iterator() {
return new TuplifyIterator<Tuple4<A, B, C, D>>(first.iterator(), second.iterator(), third.iterator(),
fourth.iterator()) {
@Override
public Tuple4<A, B, C, D> next() {
return new Tuple4<A, B, C, D>((A) next(0), (B) next(1), (C) next(2), (D) next(3));
}
};
}
}
public static class TupleNIterable implements Iterable<TupleN> {
private final Iterator<?>[] iters;
public TupleNIterable(Iterable<?>... iterables) {
this.iters = new Iterator[iterables.length];
for (int i = 0; i < iters.length; i++) {
iters[i] = iterables[i].iterator();
}
}
@Override
public Iterator<TupleN> iterator() {
return new TuplifyIterator<TupleN>(iters) {
@Override
public TupleN next() {
Object[] values = new Object[iters.length];
for (int i = 0; i < values.length; i++) {
values[i] = next(i);
}
return new TupleN(values);
}
};
}
}
}
| 2,786 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/util/CrunchTool.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.util;
import java.io.Serializable;
import org.apache.crunch.PCollection;
import org.apache.crunch.PTable;
import org.apache.crunch.Pipeline;
import org.apache.crunch.PipelineExecution;
import org.apache.crunch.PipelineResult;
import org.apache.crunch.Source;
import org.apache.crunch.TableSource;
import org.apache.crunch.Target;
import org.apache.crunch.impl.mem.MemPipeline;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.io.At;
import org.apache.crunch.io.From;
import org.apache.crunch.io.To;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.util.Tool;
/**
* An extension of the {@code Tool} interface that creates a {@code Pipeline}
* instance and provides methods for working with the Pipeline from inside of
* the Tool's run method.
*
*/
public abstract class CrunchTool extends Configured implements Tool, Serializable {
protected static final From from = new From();
protected static final To to = new To();
protected static final At at = new At();
// Pipeline object itself isn't necessarily serializable.
private transient Pipeline pipeline;
public CrunchTool() {
this(false);
}
public CrunchTool(boolean inMemory) {
this.pipeline = inMemory ? MemPipeline.getInstance() : new MRPipeline(getClass());
}
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (conf != null && pipeline != null) {
pipeline.setConfiguration(conf);
}
}
@Override
public Configuration getConf() {
return pipeline.getConfiguration();
}
public void enableDebug() {
pipeline.enableDebug();
}
public <T> PCollection<T> read(Source<T> source) {
return pipeline.read(source);
}
public <K, V> PTable<K, V> read(TableSource<K, V> tableSource) {
return pipeline.read(tableSource);
}
public PCollection<String> readTextFile(String pathName) {
return pipeline.readTextFile(pathName);
}
public void write(PCollection<?> pcollection, Target target) {
pipeline.write(pcollection, target);
}
public void writeTextFile(PCollection<?> pcollection, String pathName) {
pipeline.writeTextFile(pcollection, pathName);
}
public <T> Iterable<T> materialize(PCollection<T> pcollection) {
return pipeline.materialize(pcollection);
}
public PipelineResult run() {
return pipeline.run();
}
public PipelineExecution runAsync() {
return pipeline.runAsync();
}
public PipelineResult done() {
return pipeline.done();
}
protected Pipeline getPipeline() {
return pipeline;
}
}
| 2,787 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/util/UnionReadableData.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.util;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.crunch.ReadableData;
import org.apache.crunch.SourceTarget;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import java.io.IOException;
import java.util.List;
import java.util.Set;
public class UnionReadableData<T> implements ReadableData<T> {
private final List<ReadableData<T>> data;
public UnionReadableData(List<ReadableData<T>> data) {
this.data = data;
}
@Override
public Set<SourceTarget<?>> getSourceTargets() {
Set<SourceTarget<?>> srcTargets = Sets.newHashSet();
for (ReadableData<T> rd: data) {
srcTargets.addAll(rd.getSourceTargets());
}
return srcTargets;
}
@Override
public void configure(Configuration conf) {
for (ReadableData<T> rd : data) {
rd.configure(conf);
}
}
@Override
public Iterable<T> read(final TaskInputOutputContext<?, ?, ?, ?> context) throws IOException {
List<Iterable<T>> iterables = Lists.newArrayList();
for (ReadableData<T> rd : data) {
iterables.add(rd.read(context));
}
return Iterables.concat(iterables);
}
}
| 2,788 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/util/HashUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.util;
/**
* Utility methods for working with hash codes.
*/
public class HashUtil {
/**
* Applies a supplemental hashing function to an integer, increasing variability in lower-order bits.
* This method is intended to avoid collisions in functions which rely on variance in the lower bits of a hash
* code (e.g. hash partitioning).
*/
// The following comments and code are taken directly from Guava's com.google.common.collect.Hashing class
// This method was written by Doug Lea with assistance from members of JCP
// JSR-166 Expert Group and released to the public domain, as explained at
// http://creativecommons.org/licenses/publicdomain
//
// As of 2010/06/11, this method is identical to the (package private) hash
// method in OpenJDK 7's java.util.HashMap class.
public static int smearHash(int hashCode) {
hashCode ^= (hashCode >>> 20) ^ (hashCode >>> 12);
return hashCode ^ (hashCode >>> 7) ^ (hashCode >>> 4);
}
}
| 2,789 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/util/DistCache.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.util;
import java.io.File;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.net.URL;
import java.net.URLDecoder;
import java.util.Enumeration;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
/**
* Provides functions for working with Hadoop's distributed cache. These
* include:
* <ul>
* <li>
* Functions for working with a job-specific distributed cache of objects, like
* the serialized runtime nodes in a MapReduce.</li>
* <li>
* Functions for adding library jars to the distributed cache, which will be
* added to the classpath of MapReduce tasks.</li>
* </ul>
*/
public class DistCache {
/**
* Configuration key for setting the replication factor for files distributed using the Crunch
* DistCache helper class. This can be used to scale read access for files used by the Crunch
* framework.
*/
public static final String DIST_CACHE_REPLICATION = "crunch.distcache.replication";
// Configuration key holding the paths of jars to export to the distributed
// cache.
private static final String TMPJARS_KEY = "tmpjars";
public static void write(Configuration conf, Path path, Object value) throws IOException {
FileSystem fs = path.getFileSystem(conf);
short replication = (short) conf.getInt(DIST_CACHE_REPLICATION, fs.getDefaultReplication(path));
ObjectOutputStream oos = new ObjectOutputStream(fs.create(path, replication));
oos.writeObject(value);
oos.close();
DistributedCache.addCacheFile(path.toUri(), conf);
}
public static Object read(Configuration conf, Path requestedFile) throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
Path cachedPath = null;
try {
cachedPath = getPathToCacheFile(requestedFile, conf);
} catch (CrunchRuntimeException cre) {
throw new IOException("Can not determine cached location for " + requestedFile.toString(), cre);
}
if(cachedPath == null || !localFs.exists(cachedPath)) {
throw new IOException("Expected file with path: " + requestedFile.toString() + " to be cached");
}
ObjectInputStream ois = null;
try {
ois = new ObjectInputStream(localFs.open(cachedPath));
return ois.readObject();
} catch (ClassNotFoundException e) {
throw new CrunchRuntimeException(e);
} finally {
if (ois != null) {
ois.close();
}
}
}
public static void addCacheFile(Path path, Configuration conf) {
DistributedCache.addCacheFile(path.toUri(), conf);
}
public static Path getPathToCacheFile(Path path, Configuration conf) {
try {
for (Path localPath : DistributedCache.getLocalCacheFiles(conf)) {
if (localPath.toString().endsWith(path.getName())) {
return localPath.makeQualified(FileSystem.getLocal(conf));
}
}
} catch (IOException e) {
throw new CrunchRuntimeException(e);
}
return null;
}
/**
* Adds the specified jar to the distributed cache of jobs using the provided
* configuration. The jar will be placed on the classpath of tasks run by the
* job.
*
* @param conf
* The configuration used to add the jar to the distributed cache.
* @param jarFile
* The jar file to add to the distributed cache.
* @throws IOException
* If the jar file does not exist or there is a problem accessing
* the file.
*/
public static void addJarToDistributedCache(Configuration conf, File jarFile) throws IOException {
if (!jarFile.exists()) {
throw new IOException("Jar file: " + jarFile.getCanonicalPath() + " does not exist.");
}
if (!jarFile.getName().endsWith(".jar")) {
throw new IllegalArgumentException("File: " + jarFile.getCanonicalPath() + " is not a .jar " + "file.");
}
// Get a qualified path for the jar.
FileSystem fileSystem = FileSystem.getLocal(conf);
Path jarPath = new Path(jarFile.getCanonicalPath());
String qualifiedPath = jarPath.makeQualified(fileSystem).toString();
// Add the jar to the configuration variable.
String jarConfiguration = conf.get(TMPJARS_KEY, "");
if (!jarConfiguration.isEmpty()) {
jarConfiguration += ",";
}
jarConfiguration += qualifiedPath;
conf.set(TMPJARS_KEY, jarConfiguration);
}
/**
* Adds the jar at the specified path to the distributed cache of jobs using
* the provided configuration. The jar will be placed on the classpath of
* tasks run by the job.
*
* @param conf
* The configuration used to add the jar to the distributed cache.
* @param jarFile
* The path to the jar file to add to the distributed cache.
* @throws IOException
* If the jar file does not exist or there is a problem accessing
* the file.
*/
public static void addJarToDistributedCache(Configuration conf, String jarFile) throws IOException {
addJarToDistributedCache(conf, new File(jarFile));
}
/**
* Finds the path to a jar that contains the class provided, if any. There is
* no guarantee that the jar returned will be the first on the classpath to
* contain the file. This method is basically lifted out of Hadoop's
* {@link org.apache.hadoop.mapred.JobConf} class.
*
* @param jarClass
* The class the jar file should contain.
* @return The path to a jar file that contains the class, or
* <code>null</code> if no such jar exists.
* @throws IOException
* If there is a problem searching for the jar file.
*/
public static String findContainingJar(Class<?> jarClass) throws IOException {
ClassLoader loader = jarClass.getClassLoader();
String classFile = jarClass.getName().replaceAll("\\.", "/") + ".class";
for (Enumeration<URL> itr = loader.getResources(classFile); itr.hasMoreElements();) {
URL url = itr.nextElement();
if ("jar".equals(url.getProtocol())) {
String toReturn = url.getPath();
if (toReturn.startsWith("file:")) {
toReturn = toReturn.substring("file:".length());
}
// URLDecoder is a misnamed class, since it actually decodes
// x-www-form-urlencoded MIME type rather than actual
// URL encoding (which the file path has). Therefore it would
// decode +s to ' 's which is incorrect (spaces are actually
// either unencoded or encoded as "%20"). Replace +s first, so
// that they are kept sacred during the decoding process.
toReturn = toReturn.replaceAll("\\+", "%2B");
toReturn = URLDecoder.decode(toReturn, "UTF-8");
return toReturn.replaceAll("!.*$", "");
}
}
return null;
}
/**
* Adds all jars under the specified directory to the distributed cache of
* jobs using the provided configuration. The jars will be placed on the
* classpath of tasks run by the job. This method does not descend into
* subdirectories when adding jars.
*
* @param conf
* The configuration used to add jars to the distributed cache.
* @param jarDirectory
* A directory containing jar files to add to the distributed cache.
* @throws IOException
* If the directory does not exist or there is a problem accessing
* the directory.
*/
public static void addJarDirToDistributedCache(Configuration conf, File jarDirectory) throws IOException {
if (!jarDirectory.exists() || !jarDirectory.isDirectory()) {
throw new IOException("Jar directory: " + jarDirectory.getCanonicalPath() + " does not "
+ "exist or is not a directory.");
}
for (File file : jarDirectory.listFiles()) {
if (!file.isDirectory() && file.getName().endsWith(".jar")) {
addJarToDistributedCache(conf, file);
}
}
}
/**
* Adds all jars under the directory at the specified path to the distributed
* cache of jobs using the provided configuration. The jars will be placed on
* the classpath of the tasks run by the job. This method does not descend
* into subdirectories when adding jars.
*
* @param conf
* The configuration used to add jars to the distributed cache.
* @param jarDirectory
* The path to a directory containing jar files to add to the
* distributed cache.
* @throws IOException
* If the directory does not exist or there is a problem accessing
* the directory.
*/
public static void addJarDirToDistributedCache(Configuration conf, String jarDirectory) throws IOException {
addJarDirToDistributedCache(conf, new File(jarDirectory));
}
}
| 2,790 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/util/DelegatingReadableData.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.util;
import org.apache.crunch.DoFn;
import org.apache.crunch.ReadableData;
import org.apache.crunch.SourceTarget;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import java.io.IOException;
import java.util.Iterator;
import java.util.Set;
/**
* Implements the {@code ReadableData<T>} interface by delegating to an {@code ReadableData<S>} instance
* and passing its contents through a {@code DoFn<S, T>}.
*/
public class DelegatingReadableData<S, T> implements ReadableData<T> {
private final ReadableData<S> delegate;
private final DoFn<S, T> fn;
public DelegatingReadableData(ReadableData<S> delegate, DoFn<S, T> fn) {
this.delegate = delegate;
this.fn = fn;
}
@Override
public Set<SourceTarget<?>> getSourceTargets() {
return delegate.getSourceTargets();
}
@Override
public void configure(Configuration conf) {
delegate.configure(conf);
fn.configure(conf);
}
@Override
public Iterable<T> read(TaskInputOutputContext<?, ?, ?, ?> context) throws IOException {
fn.setContext(context);
fn.initialize();
final Iterable<S> delegateIterable = delegate.read(context);
return new Iterable<T>() {
@Override
public Iterator<T> iterator() {
return new DoFnIterator<S, T>(delegateIterable.iterator(), fn);
}
};
}
}
| 2,791 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/util/SerializableSupplier.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.util;
import com.google.common.base.Supplier;
import java.io.Serializable;
/**
* An extension of Guava's {@link Supplier} interface that indicates that an instance
* will also implement {@link Serializable}, which makes this object suitable for use
* with Crunch's DoFns when we need to construct an instance of a non-serializable
* type for use in processing.
*/
public interface SerializableSupplier<T> extends Supplier<T>, Serializable {
}
| 2,792 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/util/package-info.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An assorted set of utilities.
*/
package org.apache.crunch.util;
| 2,793 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/ReadableSource.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io;
import java.io.IOException;
import org.apache.crunch.ReadableData;
import org.apache.crunch.Source;
import org.apache.hadoop.conf.Configuration;
/**
* An extension of the {@code Source} interface that indicates that a
* {@code Source} instance may be read as a series of records by the client
* code. This is used to determine whether a {@code PCollection} instance can be
* materialized.
*/
public interface ReadableSource<T> extends Source<T> {
/**
* Returns an {@code Iterable} that contains the contents of this source.
*
* @param conf The current {@code Configuration} instance
* @return the contents of this {@code Source} as an {@code Iterable} instance
* @throws IOException
*/
Iterable<T> read(Configuration conf) throws IOException;
/**
* @return a {@code ReadableData} instance containing the data referenced by this
* {@code ReadableSource}.
*/
ReadableData<T> asReadable();
}
| 2,794 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/FormatBundle.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.IOException;
import java.io.Serializable;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.regex.Pattern;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A combination of an {@link InputFormat} or {@link OutputFormat} and any extra
* configuration information that format class needs to run.
*
* <p>The {@code FormatBundle} allow us to let different formats act as
* if they are the only format that exists in a particular MapReduce job, even
* when we have multiple types of inputs and outputs within a single job.
*/
public class FormatBundle<K> implements Serializable, Writable, Configurable {
private final Logger LOG = LoggerFactory.getLogger(FormatBundle.class);
/**
* A comma-separated list of properties whose value will be redacted.
* MR config to redact job conf properties: https://issues.apache.org/jira/browse/MAPREDUCE-6741
*/
private static final String MR_JOB_REDACTED_PROPERTIES = "mapreduce.job.redacted-properties";
private static final String REDACTION_REPLACEMENT_VAL = "*********(redacted)";
private final String FILESYSTEM_BLACKLIST_PATTERNS_KEY = "crunch.fs.props.blacklist.patterns";
private final String[] FILESYSTEM_BLACKLIST_PATTERNS_DEFAULT =
new String[] {
"^fs\\.defaultFS$",
"^fs\\.default\\.name$"};
private final String FILESYSTEM_WHITELIST_PATTERNS_KEY = "crunch.fs.props.whitelist.patterns";
private final String[] FILESYSTEM_WHITELIST_PATTERNS_DEFAULT =
new String[] {
"^fs\\..*",
"^dfs\\..*"};
private Class<K> formatClass;
private Map<String, String> extraConf;
private Configuration conf;
private FileSystem fileSystem;
public static <T> FormatBundle<T> fromSerialized(String serialized, Configuration conf) {
ByteArrayInputStream bais = new ByteArrayInputStream(Base64.decodeBase64(serialized));
try {
FormatBundle<T> bundle = new FormatBundle<T>();
bundle.setConf(conf);
bundle.readFields(new DataInputStream(bais));
return bundle;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static <T extends InputFormat<?, ?>> FormatBundle<T> forInput(Class<T> inputFormatClass) {
return new FormatBundle<T>(inputFormatClass);
}
public static <T extends OutputFormat<?, ?>> FormatBundle<T> forOutput(Class<T> outputFormatClass) {
return new FormatBundle<T>(outputFormatClass);
}
public FormatBundle() {
// For Writable support
}
@VisibleForTesting
FormatBundle(Class<K> formatClass) {
this.formatClass = formatClass;
this.extraConf = Maps.newHashMap();
}
public FormatBundle<K> set(String key, String value) {
this.extraConf.put(key, value);
return this;
}
public FormatBundle<K> setFileSystem(FileSystem fileSystem) {
this.fileSystem = fileSystem;
return this;
}
public FileSystem getFileSystem() {
return fileSystem;
}
public Class<K> getFormatClass() {
return formatClass;
}
public Configuration configure(Configuration conf) {
// first configure fileystem properties
Map<String, String> appliedFsProperties = configureFileSystem(conf);
// then apply extraConf properties
for (Map.Entry<String, String> e : extraConf.entrySet()) {
String key = e.getKey();
String value = e.getValue();
conf.set(key, value);
if (appliedFsProperties.get(key) != null) {
LOG.info("{}={} from extraConf overrode {}={} from filesystem conf",
new Object[] {key, value, key, appliedFsProperties.get(key)});
}
}
return conf;
}
private Map<String,String> configureFileSystem(Configuration conf) {
if (fileSystem == null) {
return Collections.emptyMap();
}
Collection<Pattern> blacklistPatterns =
compilePatterns(
conf.getStrings(FILESYSTEM_BLACKLIST_PATTERNS_KEY,
FILESYSTEM_BLACKLIST_PATTERNS_DEFAULT));
Collection<Pattern> whitelistPatterns =
compilePatterns(
conf.getStrings(FILESYSTEM_WHITELIST_PATTERNS_KEY,
FILESYSTEM_WHITELIST_PATTERNS_DEFAULT));
Configuration fileSystemConf = fileSystem.getConf();
Map<String, String> appliedProperties = new HashMap<>();
Collection<String> redactedProperties = conf.getTrimmedStringCollection(MR_JOB_REDACTED_PROPERTIES);
for (Entry<String, String> e : fileSystemConf) {
String key = e.getKey();
String value = fileSystemConf.get(key);
String originalValue = conf.get(key);
if (value.equals(originalValue)) {
continue;
}
Pattern matchingBlacklistPattern = matchingPattern(key, blacklistPatterns);
if (matchingBlacklistPattern != null) {
LOG.info("{}={} matches blacklist pattern '{}', omitted",
new Object[] {key, value, matchingBlacklistPattern});
continue;
}
Pattern matchingWhitelistPattern = matchingPattern(key, whitelistPatterns);
if (matchingWhitelistPattern == null) {
LOG.info("{}={} matches no whitelist pattern from {}, omitted",
new Object[] {key, value, whitelistPatterns});
continue;
}
if (key.equals(DFSConfigKeys.DFS_NAMESERVICES)) {
String[] originalArrayValue = conf.getStrings(key);
if (originalValue != null) {
String[] newValue = value != null ? value.split(",") : new String[0];
String[] merged = mergeValues(originalArrayValue, newValue);
LOG.info("Merged '{}' into '{}' with result '{}'",
new Object[] {newValue, DFSConfigKeys.DFS_NAMESERVICES, merged});
conf.setStrings(key, merged);
appliedProperties.put(key, StringUtils.arrayToString(merged));
continue;
}
}
String message = "Applied {}={} from FS '{}'";
if (originalValue != null) {
message += ", overriding '{}'";
}
if (redactedProperties.contains(key)) {
LOG.info(message,
new Object[]{key, REDACTION_REPLACEMENT_VAL, fileSystem.getUri(), REDACTION_REPLACEMENT_VAL});
} else {
LOG.info(message,
new Object[]{key, value, fileSystem.getUri(), originalValue});
}
conf.set(key, value);
appliedProperties.put(key, value);
}
return appliedProperties;
}
private static Pattern matchingPattern(String s, Collection<Pattern> patterns) {
for (Pattern pattern : patterns) {
if (pattern.matcher(s).find()) {
return pattern;
}
}
return null;
}
private static Collection<Pattern> compilePatterns(String[] patterns) {
Collection<Pattern> compiledPatterns = new ArrayList<>(patterns.length);
for (String pattern : patterns) {
compiledPatterns.add(Pattern.compile(pattern));
}
return compiledPatterns;
}
private static String[] mergeValues(String[] value1, String[] value2) {
Set<String> values = Sets.newHashSet();
values.addAll(Arrays.asList(value1));
values.addAll(Arrays.asList(value2));
return values.toArray(new String[0]);
}
public String serialize() {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try {
DataOutputStream dos = new DataOutputStream(baos);
write(dos);
return Base64.encodeBase64String(baos.toByteArray());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public String getName() {
return formatClass.getSimpleName();
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(formatClass)
.append(fileSystem)
.append(extraConf).toHashCode();
}
@Override
public boolean equals(Object other) {
if (other == null || !(other instanceof FormatBundle)) {
return false;
}
FormatBundle<K> oib = (FormatBundle<K>) other;
return Objects.equals(formatClass, oib.formatClass)
&& Objects.equals(fileSystem, oib.fileSystem)
&& Objects.equals(extraConf, oib.extraConf);
}
@Override
public void readFields(DataInput in) throws IOException {
this.formatClass = readClass(in);
int ecSize = in.readInt();
this.extraConf = Maps.newHashMap();
for (int i = 0; i < ecSize; i++) {
String key = Text.readString(in);
String value = Text.readString(in);
extraConf.put(key, value);
}
boolean hasFilesystem;
try {
hasFilesystem = in.readBoolean();
} catch (EOFException e) {
// This can be a normal occurrence when Crunch is treated as a cluster-provided
// dependency and the version is upgraded. Some jobs will have been submitted with
// code that does not contain the filesystem field. If those jobs run later with
// this code that does contain the field, EOFException will occur trying to read
// the non-existent field.
LOG.debug("EOFException caught attempting to read filesystem field. This condition "
+ "may temporarily occur with jobs that are submitted before but run after a "
+ "cluster-provided Crunch version upgrade.", e);
hasFilesystem = false;
}
if (hasFilesystem) {
String fileSystemUri = Text.readString(in);
Configuration filesystemConf = new Configuration(false);
filesystemConf.readFields(in);
this.fileSystem = FileSystem.get(URI.create(fileSystemUri), filesystemConf);
}
}
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, formatClass.getName());
out.writeInt(extraConf.size());
for (Map.Entry<String, String> e : extraConf.entrySet()) {
Text.writeString(out, e.getKey());
Text.writeString(out, e.getValue());
}
out.writeBoolean(fileSystem != null);
if (fileSystem != null) {
Text.writeString(out, fileSystem.getUri().toString());
fileSystem.getConf().write(out);
}
}
private Class readClass(DataInput in) throws IOException {
String className = Text.readString(in);
try {
return conf.getClassByName(className);
} catch (ClassNotFoundException e) {
throw new RuntimeException("readObject can't find class", e);
}
}
@Override
public Configuration getConf() {
return conf;
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
}
| 2,795 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/At.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io;
import org.apache.avro.generic.GenericData;
import org.apache.avro.specific.SpecificRecord;
import org.apache.crunch.SourceTarget;
import org.apache.crunch.TableSourceTarget;
import org.apache.crunch.io.avro.AvroFileSourceTarget;
import org.apache.crunch.io.seq.SeqFileSourceTarget;
import org.apache.crunch.io.seq.SeqFileTableSourceTarget;
import org.apache.crunch.io.text.TextFileSourceTarget;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.types.avro.AvroType;
import org.apache.crunch.types.avro.Avros;
import org.apache.crunch.types.writable.Writables;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Writable;
/**
* <p>Static factory methods for creating common {@link SourceTarget} types, which may be treated as both a {@code Source}
* and a {@code Target}.</p>
*
* <p>The {@code At} methods is analogous to the {@link From} and {@link To} factory methods, but is used for
* storing intermediate outputs that need to be passed from one run of a MapReduce pipeline to another run. The
* {@code SourceTarget} object acts as both a {@code Source} and a {@Target}, which enables it to provide this
* functionality.
*
* <pre>
* {@code
*
* Pipeline pipeline = new MRPipeline(this.getClass());
* // Create our intermediate storage location
* SourceTarget<String> intermediate = At.textFile("/temptext");
* ...
* // Write out the output of the first phase of a pipeline.
* pipeline.write(phase1, intermediate);
*
* // Explicitly call run to kick off the pipeline.
* pipeline.run();
*
* // And then kick off a second phase by consuming the output
* // from the first phase.
* PCollection<String> phase2Input = pipeline.read(intermediate);
* ...
* }
* </pre>
*
* <p>The {@code SourceTarget} abstraction is useful when we care about reading the intermediate
* outputs of a pipeline as well as the final results.</p>
*/
public class At {
/**
* Creates a {@code SourceTarget<T>} instance from the Avro file(s) at the given path name.
*
* @param pathName The name of the path to the data on the filesystem
* @param avroClass The subclass of {@code SpecificRecord} to use for the Avro file
* @return A new {@code SourceTarget<T>} instance
*/
public static <T extends SpecificRecord> SourceTarget<T> avroFile(String pathName, Class<T> avroClass) {
return avroFile(new Path(pathName), avroClass);
}
/**
* Creates a {@code SourceTarget<T>} instance from the Avro file(s) at the given {@code Path}.
*
* @param path The {@code Path} to the data
* @param avroClass The subclass of {@code SpecificRecord} to use for the Avro file
* @return A new {@code SourceTarget<T>} instance
*/
public static <T extends SpecificRecord> SourceTarget<T> avroFile(Path path, Class<T> avroClass) {
return avroFile(path, Avros.specifics(avroClass));
}
/**
* Creates a {@code SourceTarget<GenericData.Record>} by reading the schema of the Avro file
* at the given path. If the path is a directory, the schema of a file in the directory
* will be used to determine the schema to use.
*
* @param pathName The name of the path to the data on the filesystem
* @return A new {@code SourceTarget<GenericData.Record>} instance
*/
public static SourceTarget<GenericData.Record> avroFile(String pathName) {
return avroFile(new Path(pathName));
}
/**
* Creates a {@code SourceTarget<GenericData.Record>} by reading the schema of the Avro file
* at the given path. If the path is a directory, the schema of a file in the directory
* will be used to determine the schema to use.
*
* @param path The path to the data on the filesystem
* @return A new {@code SourceTarget<GenericData.Record>} instance
*/
public static SourceTarget<GenericData.Record> avroFile(Path path) {
return avroFile(path, new Configuration());
}
/**
* Creates a {@code SourceTarget<GenericData.Record>} by reading the schema of the Avro file
* at the given path using the {@code FileSystem} information contained in the given
* {@code Configuration} instance. If the path is a directory, the schema of a file in
* the directory will be used to determine the schema to use.
*
* @param path The path to the data on the filesystem
* @param conf The configuration information
* @return A new {@code SourceTarget<GenericData.Record>} instance
*/
public static SourceTarget<GenericData.Record> avroFile(Path path, Configuration conf) {
return avroFile(path, Avros.generics(From.getSchemaFromPath(path, conf)));
}
/**
* Creates a {@code SourceTarget<T>} instance from the Avro file(s) at the given path name.
*
* @param pathName The name of the path to the data on the filesystem
* @param ptype The {@code PType} for the Avro records
* @return A new {@code SourceTarget<T>} instance
*/
public static <T> SourceTarget<T> avroFile(String pathName, PType<T> ptype) {
return avroFile(new Path(pathName), ptype);
}
/**
* Creates a {@code SourceTarget<T>} instance from the Avro file(s) at the given {@code Path}.
*
* @param path The {@code Path} to the data
* @param ptype The {@code PType} for the Avro records
* @return A new {@code SourceTarget<T>} instance
*/
public static <T> SourceTarget<T> avroFile(Path path, PType<T> ptype) {
return new AvroFileSourceTarget<T>(path, (AvroType<T>) ptype);
}
/**
* Creates a {@code SourceTarget<T>} instance from the SequenceFile(s) at the given path name
* from the value field of each key-value pair in the SequenceFile(s).
*
* @param pathName The name of the path to the data on the filesystem
* @param valueClass The {@code Writable} type for the value of the SequenceFile entry
* @return A new {@code SourceTarget<T>} instance
*/
public static <T extends Writable> SourceTarget<T> sequenceFile(String pathName, Class<T> valueClass) {
return sequenceFile(new Path(pathName), valueClass);
}
/**
* Creates a {@code SourceTarget<T>} instance from the SequenceFile(s) at the given {@code Path}
* from the value field of each key-value pair in the SequenceFile(s).
*
* @param path The {@code Path} to the data
* @param valueClass The {@code Writable} type for the value of the SequenceFile entry
* @return A new {@code SourceTarget<T>} instance
*/
public static <T extends Writable> SourceTarget<T> sequenceFile(Path path, Class<T> valueClass) {
return sequenceFile(path, Writables.writables(valueClass));
}
/**
* Creates a {@code SourceTarget<T>} instance from the SequenceFile(s) at the given path name
* from the value field of each key-value pair in the SequenceFile(s).
*
* @param pathName The name of the path to the data on the filesystem
* @param ptype The {@code PType} for the value of the SequenceFile entry
* @return A new {@code SourceTarget<T>} instance
*/
public static <T> SourceTarget<T> sequenceFile(String pathName, PType<T> ptype) {
return sequenceFile(new Path(pathName), ptype);
}
/**
* Creates a {@code SourceTarget<T>} instance from the SequenceFile(s) at the given {@code Path}
* from the value field of each key-value pair in the SequenceFile(s).
*
* @param path The {@code Path} to the data
* @param ptype The {@code PType} for the value of the SequenceFile entry
* @return A new {@code SourceTarget<T>} instance
*/
public static <T> SourceTarget<T> sequenceFile(Path path, PType<T> ptype) {
return new SeqFileSourceTarget<T>(path, ptype);
}
/**
* Creates a {@code TableSourceTarget<K, V>} instance from the SequenceFile(s) at the given path name
* from the key-value pairs in the SequenceFile(s).
*
* @param pathName The name of the path to the data on the filesystem
* @param keyClass The {@code Writable} type for the key of the SequenceFile entry
* @param valueClass The {@code Writable} type for the value of the SequenceFile entry
* @return A new {@code TableSourceTarget<K, V>} instance
*/
public static <K extends Writable, V extends Writable> TableSourceTarget<K, V> sequenceFile(
String pathName, Class<K> keyClass, Class<V> valueClass) {
return sequenceFile(new Path(pathName), keyClass, valueClass);
}
/**
* Creates a {@code TableSourceTarget<K, V>} instance from the SequenceFile(s) at the given {@code Path}
* from the key-value pairs in the SequenceFile(s).
*
* @param path The {@code Path} to the data
* @param keyClass The {@code Writable} type for the key of the SequenceFile entry
* @param valueClass The {@code Writable} type for the value of the SequenceFile entry
* @return A new {@code TableSourceTarget<K, V>} instance
*/
public static <K extends Writable, V extends Writable> TableSourceTarget<K, V> sequenceFile(
Path path, Class<K> keyClass, Class<V> valueClass) {
return sequenceFile(path, Writables.writables(keyClass), Writables.writables(valueClass));
}
/**
* Creates a {@code TableSourceTarget<K, V>} instance from the SequenceFile(s) at the given path name
* from the key-value pairs in the SequenceFile(s).
*
* @param pathName The name of the path to the data on the filesystem
* @param keyType The {@code PType} for the key of the SequenceFile entry
* @param valueType The {@code PType} for the value of the SequenceFile entry
* @return A new {@code TableSourceTarget<K, V>} instance
*/
public static <K, V> TableSourceTarget<K, V> sequenceFile(String pathName, PType<K> keyType, PType<V> valueType) {
return sequenceFile(new Path(pathName), keyType, valueType);
}
/**
* Creates a {@code TableSourceTarget<K, V>} instance from the SequenceFile(s) at the given {@code Path}
* from the key-value pairs in the SequenceFile(s).
*
* @param path The {@code Path} to the data
* @param keyType The {@code PType} for the key of the SequenceFile entry
* @param valueType The {@code PType} for the value of the SequenceFile entry
* @return A new {@code TableSourceTarget<K, V>} instance
*/
public static <K, V> TableSourceTarget<K, V> sequenceFile(Path path, PType<K> keyType, PType<V> valueType) {
PTypeFamily ptf = keyType.getFamily();
return new SeqFileTableSourceTarget<K, V>(path, ptf.tableOf(keyType, valueType));
}
/**
* Creates a {@code SourceTarget<String>} instance for the text file(s) at the given path name.
*
* @param pathName The name of the path to the data on the filesystem
* @return A new {@code SourceTarget<String>} instance
*/
public static SourceTarget<String> textFile(String pathName) {
return textFile(new Path(pathName));
}
/**
* Creates a {@code SourceTarget<String>} instance for the text file(s) at the given {@code Path}.
*
* @param path The {@code Path} to the data
* @return A new {@code SourceTarget<String>} instance
*/
public static SourceTarget<String> textFile(Path path) {
return textFile(path, Writables.strings());
}
/**
* Creates a {@code SourceTarget<T>} instance for the text file(s) at the given path name using
* the provided {@code PType<T>} to convert the input text.
*
* @param pathName The name of the path to the data on the filesystem
* @param ptype The {@code PType<T>} to use to process the input text
* @return A new {@code SourceTarget<T>} instance
*/
public static <T> SourceTarget<T> textFile(String pathName, PType<T> ptype) {
return textFile(new Path(pathName), ptype);
}
/**
* Creates a {@code SourceTarget<T>} instance for the text file(s) at the given {@code Path} using
* the provided {@code PType<T>} to convert the input text.
*
* @param path The {@code Path} to the data
* @param ptype The {@code PType<T>} to use to process the input text
* @return A new {@code SourceTarget<T>} instance
*/
public static <T> SourceTarget<T> textFile(Path path, PType<T> ptype) {
return new TextFileSourceTarget<T>(path, ptype);
}
}
| 2,796 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/CompositePathIterable.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Collections;
import java.util.Iterator;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import com.google.common.collect.UnmodifiableIterator;
public class CompositePathIterable<T> implements Iterable<T> {
private final FileStatus[] stati;
private final FileSystem fs;
private final FileReaderFactory<T> readerFactory;
private static final PathFilter FILTER = new PathFilter() {
@Override
public boolean accept(Path path) {
String name = path.getName();
return !name.startsWith("_") && !name.startsWith(".");
}
};
public static <S> Iterable<S> create(FileSystem fs, Path path, FileReaderFactory<S> readerFactory) throws IOException {
if (!fs.exists(path)) {
throw new IOException("No files found to materialize at: " + path);
}
FileStatus[] stati = null;
try {
stati = fs.listStatus(path, FILTER);
} catch (FileNotFoundException e) {
stati = null;
}
if (stati == null) {
throw new IOException("No files found to materialize at: " + path);
}
if (stati.length == 0) {
return Collections.emptyList();
} else {
return new CompositePathIterable<S>(stati, fs, readerFactory);
}
}
private CompositePathIterable(FileStatus[] stati, FileSystem fs, FileReaderFactory<T> readerFactory) {
this.stati = stati;
this.fs = fs;
this.readerFactory = readerFactory;
}
@Override
public Iterator<T> iterator() {
return new UnmodifiableIterator<T>() {
private int index = 0;
private Iterator<T> iter = readerFactory.read(fs, stati[index++].getPath());
@Override
public boolean hasNext() {
if (!iter.hasNext()) {
while (index < stati.length) {
iter = readerFactory.read(fs, stati[index++].getPath());
if (iter.hasNext()) {
return true;
}
}
return false;
}
return true;
}
@Override
public T next() {
return iter.next();
}
};
}
}
| 2,797 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/SequentialFileNamingScheme.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io;
import java.io.IOException;
import java.util.Map;
import com.google.common.collect.Maps;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
/**
* Default {@link FileNamingScheme} that uses an incrementing sequence number in
* order to generate unique file names.
*/
public class SequentialFileNamingScheme implements FileNamingScheme {
private static final SequentialFileNamingScheme INSTANCE = new SequentialFileNamingScheme();
public static SequentialFileNamingScheme getInstance() {
return INSTANCE;
}
private final Map<Path, Integer> cache;
private SequentialFileNamingScheme() {
this.cache = Maps.newHashMap();
}
@Override
public String getMapOutputName(Configuration configuration, Path outputDirectory) throws IOException {
return getSequentialFileName(configuration, outputDirectory, "m");
}
@Override
public String getReduceOutputName(Configuration configuration, Path outputDirectory, int partitionId)
throws IOException {
return getSequentialFileName(configuration, outputDirectory, "r");
}
private String getSequentialFileName(Configuration configuration, Path outputDirectory, String jobTypeName)
throws IOException {
return String.format("part-%s-%05d", jobTypeName, getSequenceNumber(configuration, outputDirectory));
}
private synchronized int getSequenceNumber(Configuration conf, Path outputDirectory) throws IOException {
if (cache.containsKey(outputDirectory)) {
int next = cache.get(outputDirectory);
cache.put(outputDirectory, next + 1);
return next;
} else {
FileSystem fileSystem = outputDirectory.getFileSystem(conf);
int next = fileSystem.listStatus(outputDirectory).length;
cache.put(outputDirectory, next + 1);
return next;
}
}
}
| 2,798 |
0 | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch | Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/MapReduceTarget.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io;
import org.apache.crunch.Target;
import org.apache.crunch.types.PType;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
public interface MapReduceTarget extends Target {
void configureForMapReduce(Job job, PType<?> ptype, Path outputPath, String name);
}
| 2,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.