_id
stringlengths
2
7
title
stringlengths
3
140
partition
stringclasses
3 values
text
stringlengths
73
34.1k
language
stringclasses
1 value
meta_information
dict
q162300
ServerCore.getNewClientId
train
private long getNewClientId() { while (true) { long clientId = Math.abs(clientIdsGenerator.nextLong()); if (!clientsData.containsKey(clientId)) { return clientId; } } }
java
{ "resource": "" }
q162301
ChecksumStore.putIfAbsentChecksum
train
public Long putIfAbsentChecksum(Block blk, Long newChecksum) throws IOException { Long oldChecksum = putIfAbsent(blk, newChecksum); if (oldChecksum!= null && !oldChecksum.equals(newChecksum)) { throw new IOException("Block " + blk.toString() + " has different checksums " + oldChecksum + "(old) and " + newChecksum+ "(new)"); } return oldChecksum; }
java
{ "resource": "" }
q162302
OverrideRecordReader.fillJoinCollector
train
protected void fillJoinCollector(K iterkey) throws IOException { final PriorityQueue<ComposableRecordReader<K,?>> q = getRecordReaderQueue(); if (!q.isEmpty()) { int highpos = -1; ArrayList<ComposableRecordReader<K,?>> list = new ArrayList<ComposableRecordReader<K,?>>(kids.length); q.peek().key(iterkey); final WritableComparator cmp = getComparator(); while (0 == cmp.compare(q.peek().key(), iterkey)) { ComposableRecordReader<K,?> t = q.poll(); if (-1 == highpos || list.get(highpos).id() < t.id()) { highpos = list.size(); } list.add(t); if (q.isEmpty()) break; } ComposableRecordReader<K,?> t = list.remove(highpos); t.accept(jc, iterkey); for (ComposableRecordReader<K,?> rr : list) { rr.skip(iterkey); } list.add(t); for (ComposableRecordReader<K,?> rr : list) { if (rr.hasNext()) { q.add(rr); } } } }
java
{ "resource": "" }
q162303
BlockWithChecksumFileReader.getGenerationStampFromSeperateChecksumFile
train
static long getGenerationStampFromSeperateChecksumFile(String[] listdir, String blockName) { for (int j = 0; j < listdir.length; j++) { String path = listdir[j]; if (!path.startsWith(blockName)) { continue; } String[] vals = StringUtils.split(path, '_'); if (vals.length != 3) { // blk, blkid, genstamp.meta continue; } String[] str = StringUtils.split(vals[2], '.'); if (str.length != 2) { continue; } return Long.parseLong(str[0]); } DataNode.LOG.warn("Block " + blockName + " does not have a metafile!"); return Block.GRANDFATHER_GENERATION_STAMP; }
java
{ "resource": "" }
q162304
BlockWithChecksumFileReader.parseGenerationStampInMetaFile
train
static long parseGenerationStampInMetaFile(File blockFile, File metaFile ) throws IOException { String metaname = metaFile.getName(); String gs = metaname.substring(blockFile.getName().length() + 1, metaname.length() - FSDataset.METADATA_EXTENSION.length()); try { return Long.parseLong(gs); } catch(NumberFormatException nfe) { throw (IOException)new IOException("blockFile=" + blockFile + ", metaFile=" + metaFile).initCause(nfe); } }
java
{ "resource": "" }
q162305
BlockWithChecksumFileReader.metaFileExists
train
static public boolean metaFileExists(FSDatasetInterface dataset, int namespaceId, Block b) throws IOException { return getMetaFile(dataset, namespaceId, b).exists(); }
java
{ "resource": "" }
q162306
BufferedByteInputStream.wrapInputStream
train
public static DataInputStream wrapInputStream(InputStream is, int bufferSize, int readBufferSize) { // wrapping BufferedByteInputStream in BufferedInputStream decreases // pressure on BBIS internal locks, and we read from the BBIS in // bigger chunks return new DataInputStream(new BufferedInputStream( new BufferedByteInputStream(is, bufferSize, readBufferSize))); }
java
{ "resource": "" }
q162307
BufferedByteInputStream.close
train
public void close() throws IOException { // multiple close should return with no errors // readThread will close underlying buffer readThread.close(); try { readThread.join(); } catch (InterruptedException e) { throw new IOException(e); } }
java
{ "resource": "" }
q162308
BufferedByteInputStream.checkOutput
train
private int checkOutput(int readBytes) throws IOException { if (readBytes > -1) { return readBytes; } if (closed) { throw new IOException("The stream has been closed"); } if (readThread.error != null) { throw new IOException(readThread.error.getMessage()); } return readBytes; }
java
{ "resource": "" }
q162309
ClusterStatus.initTrackersToTasksMap
train
private void initTrackersToTasksMap(Collection<JobInProgress> jobsInProgress) { for (TaskTrackerStatus tracker : taskTrackersDetails) { taskTrackerExtendedTasks.put(tracker.getTrackerName(), new ArrayList<TaskStatus>()); } for (JobInProgress job : jobsInProgress) { total_map_tasks += job.getTasks(TaskType.MAP).length; total_reduce_tasks += job.getTasks(TaskType.REDUCE).length; for (TaskInProgress task : job.getTasks(TaskType.REDUCE)) { TaskStatus[] taskStatuses = task.getTaskStatuses(); for (TaskStatus status : taskStatuses) { Collection<TaskStatus> trackerTasks = taskTrackerExtendedTasks.get(status.getTaskTracker()); if (trackerTasks == null) { trackerTasks = new ArrayList<TaskStatus>(); taskTrackerExtendedTasks.put(status.getTaskTracker(), trackerTasks); } trackerTasks.add(status); } } for (TaskInProgress task : job.getTasks(TaskType.MAP)) { TaskStatus[] taskStatuses = task.getTaskStatuses(); for (TaskStatus status : taskStatuses) { Collection<TaskStatus> trackerTasks = taskTrackerExtendedTasks.get(status.getTaskTracker()); if (trackerTasks == null) { trackerTasks = new ArrayList<TaskStatus>(); taskTrackerExtendedTasks.put(status.getTaskTracker(), trackerTasks); } trackerTasks.add(status); } } } }
java
{ "resource": "" }
q162310
MapOutputFile.getOutputFile
train
public Path getOutputFile(TaskAttemptID mapTaskId) throws IOException { return lDirAlloc.getLocalPathToRead(TaskTracker.getIntermediateOutputDir( jobId.toString(), mapTaskId.toString()) + "/file.out", conf); }
java
{ "resource": "" }
q162311
MapOutputFile.getSpillFileForWrite
train
public Path getSpillFileForWrite(TaskAttemptID mapTaskId, int spillNumber, long size) throws IOException { return lDirAlloc.getLocalPathForWrite(TaskTracker.getIntermediateOutputDir( jobId.toString(), mapTaskId.toString()) + "/spill" + spillNumber + ".out", size, conf); }
java
{ "resource": "" }
q162312
MapOutputFile.getSpillIndexFile
train
public Path getSpillIndexFile(TaskAttemptID mapTaskId, int spillNumber) throws IOException { return lDirAlloc.getLocalPathToRead(TaskTracker.getIntermediateOutputDir( jobId.toString(), mapTaskId.toString()) + "/spill" + spillNumber + ".out.index", conf); }
java
{ "resource": "" }
q162313
MapOutputFile.getInputFile
train
public Path getInputFile(int mapId, TaskAttemptID reduceTaskId) throws IOException { // TODO *oom* should use a format here return lDirAlloc.getLocalPathToRead(TaskTracker.getIntermediateOutputDir( jobId.toString(), reduceTaskId.toString()) + "/map_" + mapId + ".out", conf); }
java
{ "resource": "" }
q162314
MapOutputFile.getInputFileForWrite
train
public Path getInputFileForWrite(TaskID mapId, TaskAttemptID reduceTaskId, long size) throws IOException { // TODO *oom* should use a format here return lDirAlloc.getLocalPathForWrite(TaskTracker.getIntermediateOutputDir( jobId.toString(), reduceTaskId.toString()) + "/map_" + mapId.getId() + ".out", size, conf); }
java
{ "resource": "" }
q162315
MapOutputFile.removeAll
train
public void removeAll(TaskAttemptID taskId) throws IOException { String toBeDeleted = TaskTracker.getIntermediateOutputDir(jobId.toString(), taskId.toString()); if (asyncDiskService != null) { asyncDiskService.moveAndDeleteFromEachVolume(toBeDeleted); LOG.info("Move and then delete map ouput " + toBeDeleted + " for task " + taskId); return; } LOG.info("Delete map ouput " + toBeDeleted + " for task " + taskId); conf.deleteLocalFiles(toBeDeleted); }
java
{ "resource": "" }
q162316
LocatedBlocks.setLastBlockSize
train
public synchronized void setLastBlockSize(long blockId, long blockSize) { assert blocks.size() > 0; LocatedBlock last = blocks.get(blocks.size() - 1); if (underConstruction && blockSize > last.getBlockSize()) { assert blockId == last.getBlock().getBlockId(); this.setFileLength(this.getFileLength() + blockSize - last.getBlockSize()); last.setBlockSize(blockSize); if (LOG.isDebugEnabled()) { LOG.debug("DFSClient setting last block " + last + " to length " + blockSize + " filesize is now " + getFileLength()); } } }
java
{ "resource": "" }
q162317
BlockReaderLocalWithChecksum.readAll
train
public ByteBuffer readAll() throws IOException { MappedByteBuffer bb = dataFileChannel.map(FileChannel.MapMode.READ_ONLY, startOffset, length); return bb; }
java
{ "resource": "" }
q162318
TFactoryBasedThreadPoolServer.createNewServer
train
public static TFactoryBasedThreadPoolServer createNewServer( TProcessor processor, ServerSocket serverSocket, int socketTimeOut) throws IOException { TServerSocket socket = new TServerSocket(serverSocket, socketTimeOut); TFactoryBasedThreadPoolServer.Args args = new TFactoryBasedThreadPoolServer.Args(socket); args.stopTimeoutVal = 0; args.processor(processor); args.transportFactory(new TFramedTransport.Factory()); args.protocolFactory(new TBinaryProtocol.Factory(true, true)); return new TFactoryBasedThreadPoolServer( args, new TFactoryBasedThreadPoolServer.DaemonThreadFactory()); }
java
{ "resource": "" }
q162319
IsolationRunner.fillInMissingMapOutputs
train
private static void fillInMissingMapOutputs(FileSystem fs, TaskAttemptID taskId, int numMaps, JobConf conf) throws IOException { Class<? extends WritableComparable> keyClass = conf.getMapOutputKeyClass().asSubclass(WritableComparable.class); Class<? extends Writable> valueClass = conf.getMapOutputValueClass().asSubclass(Writable.class); MapOutputFile namer = new MapOutputFile(taskId.getJobID()); namer.setConf(conf); for(int i=0; i<numMaps; i++) { Path f = namer.getInputFile(i, taskId); if (!fs.exists(f)) { LOG.info("Create missing input: " + f); SequenceFile.Writer out = SequenceFile.createWriter(fs, conf, f, keyClass, valueClass); out.close(); } } }
java
{ "resource": "" }
q162320
IsolationRunner.main
train
public static void main(String[] args ) throws ClassNotFoundException, IOException, InterruptedException { if (args.length != 1) { System.out.println("Usage: IsolationRunner <path>/job.xml"); System.exit(1); } File jobFilename = new File(args[0]); if (!jobFilename.exists() || !jobFilename.isFile()) { System.out.println(jobFilename + " is not a valid job file."); System.exit(1); } JobConf conf = new JobConf(new Path(jobFilename.toString())); TaskAttemptID taskId = TaskAttemptID.forName(conf.get("mapred.task.id")); boolean isMap = conf.getBoolean("mapred.task.is.map", true); int partition = conf.getInt("mapred.task.partition", 0); // setup the local and user working directories FileSystem local = FileSystem.getLocal(conf); LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir"); File workDirName = new File(lDirAlloc.getLocalPathToRead( TaskTracker.getLocalTaskDir( taskId.getJobID().toString(), taskId.toString()) + Path.SEPARATOR + "work", conf). toString()); local.setWorkingDirectory(new Path(workDirName.toString())); FileSystem.get(conf).setWorkingDirectory(conf.getWorkingDirectory()); // set up a classloader with the right classpath ClassLoader classLoader = makeClassLoader(conf, workDirName); Thread.currentThread().setContextClassLoader(classLoader); conf.setClassLoader(classLoader); Task task; if (isMap) { Path localSplit = new Path(new Path(jobFilename.toString()).getParent(), "split.dta"); DataInputStream splitFile = FileSystem.getLocal(conf).open(localSplit); String splitClass = Text.readString(splitFile); BytesWritable split = new BytesWritable(); split.readFields(splitFile); splitFile.close(); task = new MapTask(jobFilename.toString(), taskId, partition, splitClass, split, 1, conf.getUser()); } else { int numMaps = conf.getNumMapTasks(); fillInMissingMapOutputs(local, taskId, numMaps, conf); task = new ReduceTask(jobFilename.toString(), taskId, partition, numMaps, 1, conf.getUser()); } task.setConf(conf); task.run(conf, new FakeUmbilical()); }
java
{ "resource": "" }
q162321
DFSFolder.upload
train
public void upload(IProgressMonitor monitor, final File file) throws IOException { if (file.isDirectory()) { Path filePath = new Path(this.path, file.getName()); getDFS().mkdirs(filePath); DFSFolder newFolder = new DFSFolder(this, filePath); monitor.worked(1); for (File child : file.listFiles()) { if (monitor.isCanceled()) return; newFolder.upload(monitor, child); } } else if (file.isFile()) { Path filePath = new Path(this.path, file.getName()); DFSFile newFile = new DFSFile(this, filePath, file, monitor); } else { // XXX don't know what the file is? } }
java
{ "resource": "" }
q162322
DFSFolder.mkdir
train
public void mkdir(String folderName) { try { getDFS().mkdirs(new Path(this.path, folderName)); } catch (IOException ioe) { ioe.printStackTrace(); } doRefresh(); }
java
{ "resource": "" }
q162323
PendingReplication.add
train
boolean add(Path filename) { synchronized (pendingReplications) { PendingInfo found = pendingReplications.get(filename); if (found == null) { pendingReplications.put(filename, new PendingInfo(filename)); return true; } return false; } }
java
{ "resource": "" }
q162324
TopologyCache.getNode
train
public Node getNode(String name) { Node n = hostnameToNodeMap.get(name); // it's ok if multiple threads try to resolve the same host at the same time // the assumption is that resolve() will return a canonical node object and // the put operation is therefore idempotent if (n == null) { n = resolveAndGetNode(name); hostnameToNodeMap.put(name, n); // Make an entry for the node at the max level in the cache nodesAtMaxLevel.add( getParentNode(n, NetworkTopology.DEFAULT_HOST_LEVEL - 1)); } return n; }
java
{ "resource": "" }
q162325
SequenceFileOutputFormat.getReaders
train
public static SequenceFile.Reader[] getReaders(Configuration conf, Path dir) throws IOException { FileSystem fs = dir.getFileSystem(conf); Path[] names = FileUtil.stat2Paths(fs.listStatus(dir)); // sort names, so that hash partitioning works Arrays.sort(names); SequenceFile.Reader[] parts = new SequenceFile.Reader[names.length]; for (int i = 0; i < names.length; i++) { parts[i] = new SequenceFile.Reader(fs, names[i], conf); } return parts; }
java
{ "resource": "" }
q162326
TrackerStats.isFaulty
train
public boolean isFaulty(String trackerName) { synchronized (this) { NodeUsageReport usageReport = usageReports.get(trackerName); return isDeadTracker(trackerName) || (usageReport != null && (usageReport.getNumFailedConnections() > maxFailedConnections || usageReport.getNumFailed() > maxFailures)); } }
java
{ "resource": "" }
q162327
TrackerStats.getNumFaultyTrackers
train
public int getNumFaultyTrackers() { int count = 0; synchronized (this) { for (String trackerName : usageReports.keySet()) { if (isFaulty(trackerName)) { count++; } } } return count; }
java
{ "resource": "" }
q162328
TrackerStats.recordTask
train
public void recordTask(String trackerName) { synchronized (this) { NodeUsageReport usageReport = getReportUnprotected(trackerName); usageReport.setNumTotalTasks(usageReport.getNumTotalTasks() + 1); } }
java
{ "resource": "" }
q162329
TrackerStats.recordSucceededTask
train
public void recordSucceededTask(String trackerName) { synchronized (this) { NodeUsageReport usageReport = getReportUnprotected(trackerName); usageReport.setNumSucceeded(usageReport.getNumSucceeded() + 1); } }
java
{ "resource": "" }
q162330
TrackerStats.recordKilledTask
train
public void recordKilledTask(String trackerName) { synchronized (this) { NodeUsageReport usageReport = getReportUnprotected(trackerName); usageReport.setNumKilled(usageReport.getNumKilled() + 1); } }
java
{ "resource": "" }
q162331
TrackerStats.recordFailedTask
train
public void recordFailedTask(String trackerName) { synchronized (this) { NodeUsageReport usageReport = getReportUnprotected(trackerName); usageReport.setNumFailed(usageReport.getNumFailed() + 1); } }
java
{ "resource": "" }
q162332
TrackerStats.recordSlowTask
train
public void recordSlowTask(String trackerName) { synchronized (this) { NodeUsageReport usageReport = getReportUnprotected(trackerName); usageReport.setNumSlow(usageReport.getNumSlow() + 1); } }
java
{ "resource": "" }
q162333
TrackerStats.recordConnectionError
train
public void recordConnectionError(String trackerName) { synchronized (this) { NodeUsageReport usageReport = getReportUnprotected(trackerName); usageReport .setNumFailedConnections(usageReport.getNumFailedConnections() + 1); } }
java
{ "resource": "" }
q162334
TrackerStats.getReportUnprotected
train
private NodeUsageReport getReportUnprotected(String trackerName) { NodeUsageReport usageReport = usageReports.get(trackerName); if (usageReport == null) { usageReport = new NodeUsageReport(trackerName, 0, 0, 0, 0, 0, 0, 0); usageReports.put(trackerName, usageReport); } return usageReport; }
java
{ "resource": "" }
q162335
MetricsTimeVaryingRate.inc
train
public void inc(final int numOps, final long time) { lock.lock(); try { currentData.numOperations += numOps; currentData.time += time; long timePerOps = time/numOps; minMax.update(timePerOps); } finally { lock.unlock(); } }
java
{ "resource": "" }
q162336
TaskAttemptID.downgrade
train
public static TaskAttemptID downgrade(org.apache.hadoop.mapreduce.TaskAttemptID old) { if (old instanceof TaskAttemptID) { return (TaskAttemptID) old; } else { return new TaskAttemptID(TaskID.downgrade(old.getTaskID()), old.getId()); } }
java
{ "resource": "" }
q162337
BlockInlineChecksumReader.getFileLengthFromBlockSize
train
public static long getFileLengthFromBlockSize(long blockSize, int bytesPerChecksum, int checksumSize) { long numChunks; if (blockSize % bytesPerChecksum == 0) { numChunks = blockSize / bytesPerChecksum; } else { numChunks = blockSize / bytesPerChecksum + 1; } return blockSize + numChunks * checksumSize + BlockInlineChecksumReader.getHeaderSize(); }
java
{ "resource": "" }
q162338
BlockInlineChecksumReader.getPosFromBlockOffset
train
public static long getPosFromBlockOffset(long offsetInBlock, int bytesPerChecksum, int checksumSize) { // We only support to read full chunks, so offsetInBlock must be the boundary // of the chunks. assert offsetInBlock % bytesPerChecksum == 0; // The position in the file will be the same as the file size for the block // size. return getFileLengthFromBlockSize(offsetInBlock, bytesPerChecksum, checksumSize); }
java
{ "resource": "" }
q162339
FSDatasetAsyncDiskService.execute
train
synchronized void execute(File root, Runnable task) { if (executors == null) { throw new RuntimeException("AsyncDiskService is already shutdown"); } ThreadPoolExecutor executor = executors.get(root); if (executor == null) { throw new RuntimeException("Cannot find root " + root + " for execution of task " + task); } else { executor.execute(task); } }
java
{ "resource": "" }
q162340
FSDatasetAsyncDiskService.shutdown
train
synchronized void shutdown() { if (executors == null) { LOG.warn("AsyncDiskService has already shut down."); } else { LOG.info("Shutting down all async disk service threads..."); for (Map.Entry<File, ThreadPoolExecutor> e : executors.entrySet()) { e.getValue().shutdown(); } // clear the executor map so that calling execute again will fail. executors = null; LOG.info("All async disk service threads have been shut down."); } }
java
{ "resource": "" }
q162341
FSDatasetAsyncDiskService.deleteAsync
train
void deleteAsync(FSDataset.FSVolume volume, File blockFile, File metaFile, String blockName, int namespaceId) { DataNode.LOG.info("Scheduling block " + blockName + " file " + blockFile + " for deletion"); ReplicaFileDeleteTask deletionTask = new ReplicaFileDeleteTask(volume, blockFile, metaFile, blockName, namespaceId); execute(volume.getCurrentDir(), deletionTask); }
java
{ "resource": "" }
q162342
FSDatasetAsyncDiskService.deleteAsyncFile
train
void deleteAsyncFile(FSDataset.FSVolume volume, File file){ DataNode.LOG.info("Scheduling file " + file.toString() + " for deletion"); FileDeleteTask deletionTask = new FileDeleteTask(volume, file); execute(volume.getCurrentDir(), deletionTask); }
java
{ "resource": "" }
q162343
NativeS3FileSystem.createParent
train
private void createParent(Path path) throws IOException { Path parent = path.getParent(); if (parent != null) { String key = pathToKey(makeAbsolute(parent)); if (key.length() > 0) { store.storeEmptyFile(key + FOLDER_SUFFIX); } } }
java
{ "resource": "" }
q162344
GenericOptionsParser.buildGeneralOptions
train
@SuppressWarnings("static-access") private static Options buildGeneralOptions(Options opts) { Option fs = OptionBuilder.withArgName("local|namenode:port") .hasArg() .withDescription("specify a namenode") .create("fs"); Option jt = OptionBuilder.withArgName("local|jobtracker:port") .hasArg() .withDescription("specify a job tracker") .create("jt"); Option oconf = OptionBuilder.withArgName("configuration file") .hasArg() .withDescription("specify an application configuration file") .create("conf"); Option property = OptionBuilder.withArgName("property=value") .hasArg() .withDescription("use value for given property") .create('D'); Option libjars = OptionBuilder.withArgName("paths") .hasArg() .withDescription("comma separated jar files to include in the classpath.") .create("libjars"); Option files = OptionBuilder.withArgName("paths") .hasArg() .withDescription("comma separated files to be copied to the " + "map reduce cluster") .create("files"); Option archives = OptionBuilder.withArgName("paths") .hasArg() .withDescription("comma separated archives to be unarchived" + " on the compute machines.") .create("archives"); opts.addOption(fs); opts.addOption(jt); opts.addOption(oconf); opts.addOption(property); opts.addOption(libjars); opts.addOption(files); opts.addOption(archives); return opts; }
java
{ "resource": "" }
q162345
GenericOptionsParser.processGeneralOptions
train
private void processGeneralOptions(Configuration conf, CommandLine line) { if (line.hasOption("fs")) { FileSystem.setDefaultUri(conf, line.getOptionValue("fs")); } if (line.hasOption("jt")) { conf.set("mapred.job.tracker", line.getOptionValue("jt")); } if (line.hasOption("conf")) { String[] values = line.getOptionValues("conf"); for(String value : values) { conf.addResource(new Path(value)); } } try { if (line.hasOption("libjars")) { conf.set("tmpjars", validateFiles(line.getOptionValue("libjars"), conf)); //setting libjars in client classpath URL[] libjars = getLibJars(conf); if(libjars!=null && libjars.length>0) { conf.setClassLoader(new URLClassLoader(libjars, conf.getClassLoader())); Thread.currentThread().setContextClassLoader( new URLClassLoader(libjars, Thread.currentThread().getContextClassLoader())); } } if (line.hasOption("files")) { conf.set("tmpfiles", validateFiles(line.getOptionValue("files"), conf)); } if (line.hasOption("archives")) { conf.set("tmparchives", validateFiles(line.getOptionValue("archives"), conf)); } } catch (IOException ioe) { System.err.println(StringUtils.stringifyException(ioe)); } if (line.hasOption('D')) { String[] property = line.getOptionValues('D'); for(String prop : property) { String[] keyval = prop.split("=", 2); if (keyval.length == 2) { conf.set(keyval[0], keyval[1]); } } } conf.setBoolean("mapred.used.genericoptionsparser", true); }
java
{ "resource": "" }
q162346
GenericOptionsParser.getLibJars
train
public static URL[] getLibJars(Configuration conf) throws IOException { String jars = conf.get("tmpjars"); if(jars==null) { return null; } String[] files = jars.split(","); URL[] cp = new URL[files.length]; for (int i=0;i<cp.length;i++) { Path tmp = new Path(files[i]); cp[i] = FileSystem.getLocal(conf).pathToFile(tmp).toURI().toURL(); } return cp; }
java
{ "resource": "" }
q162347
GenericOptionsParser.parseGeneralOptions
train
private String[] parseGeneralOptions(Options opts, Configuration conf, String[] args) { opts = buildGeneralOptions(opts); CommandLineParser parser = new GnuParser(); try { commandLine = parser.parse(opts, args, true); processGeneralOptions(conf, commandLine); return commandLine.getArgs(); } catch(ParseException e) { LOG.warn("options parsing failed: "+e.getMessage()); HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("general options are: ", opts); } return args; }
java
{ "resource": "" }
q162348
GenericOptionsParser.printGenericCommandUsage
train
public static void printGenericCommandUsage(PrintStream out) { out.println("Generic options supported are"); out.println("-conf <configuration file> specify an application configuration file"); out.println("-D <property=value> use value for given property"); out.println("-fs <local|namenode:port> specify a namenode"); out.println("-jt <local|jobtracker:port> specify a job tracker"); out.println("-files <comma separated list of files> " + "specify comma separated files to be copied to the map reduce cluster"); out.println("-libjars <comma separated list of jars> " + "specify comma separated jar files to include in the classpath."); out.println("-archives <comma separated list of archives> " + "specify comma separated archives to be unarchived" + " on the compute machines.\n"); out.println("The general command line syntax is"); out.println("bin/hadoop command [genericOptions] [commandOptions]\n"); }
java
{ "resource": "" }
q162349
FaultManager.addNode
train
public void addNode(String name, Set<ResourceType> resourceTypes) { List<FaultStatsForType> faultStats = new ArrayList<FaultStatsForType>( resourceTypes.size()); for (ResourceType type : resourceTypes) { faultStats.add(new FaultStatsForType(type)); } nodeToFaultStats.put(name, faultStats); }
java
{ "resource": "" }
q162350
FaultManager.nodeFeedback
train
public void nodeFeedback(String nodeName, List<ResourceType> resourceTypes, NodeUsageReport usageReport) { List<FaultStatsForType> faultStats = nodeToFaultStats.get(nodeName); if (faultStats == null) { LOG.info("Received node feedback for deleted node " + nodeName); return; } boolean statsModified = false; synchronized (faultStats) { if (tooManyFailedConnectionsInSession(usageReport)) { for (FaultStatsForType stat : faultStats) { if (resourceTypes.contains(stat.type)) { stat.numSessionsWithFailedConnections++; statsModified = true; } } } if (tooManyFailuresInSession(usageReport)) { for (FaultStatsForType stat : faultStats) { if (resourceTypes.contains(stat.type)) { stat.numSessionsWithTooManyFailures++; statsModified = true; } } } } if (statsModified) { blacklistIfNeeded(nodeName, faultStats); } }
java
{ "resource": "" }
q162351
FaultManager.isBlacklisted
train
public boolean isBlacklisted(String nodeName, ResourceType type) { List<ResourceType> blacklistedResourceTypes = blacklistedNodes.get(nodeName); if (blacklistedResourceTypes != null) { synchronized (blacklistedResourceTypes) { return blacklistedResourceTypes.contains(type); } } else { return false; } }
java
{ "resource": "" }
q162352
FaultManager.getBlacklistedNodes
train
public List<String> getBlacklistedNodes() { List<String> ret = new ArrayList<String>(); for (String nodeName : blacklistedNodes.keySet()) { ret.add(nodeName); } return ret; }
java
{ "resource": "" }
q162353
FaultManager.blacklistIfNeeded
train
private void blacklistIfNeeded( String nodeName, List<FaultStatsForType> faultStats) { for (FaultStatsForType stat : faultStats) { if (isBlacklisted(nodeName, stat.type)) { continue; } if (tooManyFailuresOnNode(stat) || tooManyConnectionFailuresOnNode(stat)) { nm.blacklistNode(nodeName, stat.type); blacklist(nodeName, stat.type); } } }
java
{ "resource": "" }
q162354
FaultManager.blacklist
train
private void blacklist(String nodeName, ResourceType type) { List<ResourceType> blacklistedResourceTypes = blacklistedNodes.get(nodeName); if (blacklistedResourceTypes == null) { blacklistedResourceTypes = new ArrayList<ResourceType>(); blacklistedNodes.put(nodeName, blacklistedResourceTypes); } synchronized (blacklistedResourceTypes) { if (!blacklistedResourceTypes.contains(type)) { blacklistedResourceTypes.add(type); } } }
java
{ "resource": "" }
q162355
FSPermissionChecker.checkPermission
train
void checkPermission(String path, INode[] inodes, boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess, FsAction access, FsAction subAccess) throws AccessControlException { if (LOG.isDebugEnabled()) { LOG.debug("ACCESS CHECK: " + this + ", doCheckOwner=" + doCheckOwner + ", ancestorAccess=" + ancestorAccess + ", parentAccess=" + parentAccess + ", access=" + access + ", subAccess=" + subAccess); } int ancestorIndex = inodes.length - 2; for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null; ancestorIndex--); checkTraverse(inodes, ancestorIndex); if (ancestorAccess != null && inodes.length > 1) { check(inodes, ancestorIndex, ancestorAccess); } if (parentAccess != null && inodes.length > 1) { check(inodes, inodes.length - 2, parentAccess); } if (access != null) { check(inodes[inodes.length - 1], access); } if (subAccess != null) { checkSubAccess(inodes[inodes.length - 1], subAccess); } if (doCheckOwner) { checkOwner(inodes[inodes.length - 1]); } }
java
{ "resource": "" }
q162356
WebUtils.convertResourceTypesToStrings
train
public static Collection<String> convertResourceTypesToStrings( Collection<ResourceType> resourceTypes) { List<String> retList = new ArrayList<String>(resourceTypes.size()); for (ResourceType resourceType : resourceTypes) { retList.add(resourceType.toString()); } return retList; }
java
{ "resource": "" }
q162357
WebUtils.validateAttributeNames
train
public static String validateAttributeNames( Enumeration<String> attributeNames) { while (attributeNames.hasMoreElements()) { String attribute = attributeNames.nextElement(); if (!attribute.equals("users") && !attribute.equals("poolGroups") && !attribute.equals("poolInfos") && !attribute.equals("toKillSessionId") && !attribute.equals("killSessionsToken")) { return "Illegal parameter " + attribute + ", only 'users, " + "poolGroups, 'poolInfos', 'toKillSessionId' and 'killSessionsToken'" + "parameters allowed."; } } return null; }
java
{ "resource": "" }
q162358
WebUtils.isValidKillSessionsToken
train
public static boolean isValidKillSessionsToken(String token) { if (token == null || token.isEmpty()) { return false; } for (String validToken:VALID_TOKENS) { if (token.equals (validToken)) { return true; } } return false; }
java
{ "resource": "" }
q162359
WebUtils.getJspParameterFilters
train
public static JspParameterFilters getJspParameterFilters( String userFilter, String poolGroupFilter, String poolInfoFilter) { JspParameterFilters filters = new JspParameterFilters(); if (userFilter != null && !userFilter.equals("null")) { filters.getUserFilterSet().addAll(Arrays.asList(userFilter.split(","))); filters.getHtmlOutput().append(("<b>users:</b> " + userFilter + "<br>")); } if (poolGroupFilter != null && !poolGroupFilter.equals("null")) { filters.getPoolGroupFilterSet().addAll( Arrays.asList(poolGroupFilter.split(","))); filters.getHtmlOutput().append("<b>poolGroups:</b> " + poolGroupFilter + "<br>"); } if (poolInfoFilter != null && !poolInfoFilter.equals("null")) { filters.getHtmlOutput().append( "<b>poolInfos:</b> " + poolInfoFilter + "<br>"); for (String poolInfoString : poolInfoFilter.split(",")) { String[] poolInfoStrings = poolInfoString.split("[.]"); if (poolInfoStrings.length == 2) { filters.getPoolInfoFilterSet().add(new PoolInfo(poolInfoStrings[0], poolInfoStrings[1])); } } } return filters; }
java
{ "resource": "" }
q162360
ZlibCompressor.setInputFromSavedData
train
synchronized void setInputFromSavedData() { int len = Math.min(userBufLen, uncompressedDirectBuf.remaining()); ((ByteBuffer)uncompressedDirectBuf).put(userBuf, userBufOff, len); userBufLen -= len; userBufOff += len; uncompressedDirectBufLen = uncompressedDirectBuf.position(); }
java
{ "resource": "" }
q162361
AtomicFileOutputStream.abort
train
public void abort() { try { super.close(); } catch (IOException ioe) { LOG.warn("Unable to abort file " + tmpFile, ioe); } if (!tmpFile.delete()) { LOG.warn("Unable to delete tmp file during abort " + tmpFile); } }
java
{ "resource": "" }
q162362
SimulatorJobTracker.startTracker
train
public static SimulatorJobTracker startTracker(JobConf conf, long startTime, SimulatorEngine engine) throws IOException { SimulatorJobTracker result = null; try { SimulatorClock simClock = new SimulatorClock(startTime); result = new SimulatorJobTracker(conf, simClock, engine); result.taskScheduler.setTaskTrackerManager(result); } catch (IOException e) { LOG.warn("Error starting tracker: " + StringUtils.stringifyException(e)); } catch (InterruptedException e) { LOG.warn("Error starting tracker: " + StringUtils.stringifyException(e)); } if (result != null) { JobEndNotifier.startNotifier(); } return result; }
java
{ "resource": "" }
q162363
SimulatorJobTracker.startTracker
train
public static SimulatorJobTracker startTracker(JobConf conf, long startTime) throws IOException, InterruptedException { return startTracker(conf, startTime, new SimulatorEngine()); }
java
{ "resource": "" }
q162364
SimulatorJobTracker.getClock
train
static Clock getClock() { assert(engine.getCurrentTime() == clock.getTime()): " Engine time = " + engine.getCurrentTime() + " JobTracker time = " + clock.getTime(); return clock; }
java
{ "resource": "" }
q162365
SimulatorJobTracker.cleanupJob
train
private void cleanupJob(JobInProgress job) { cleanupQueue.add(job.getJobID()); while(cleanupQueue.size()> JOBS_IN_MUMAK_MEMORY) { JobID removedJob = cleanupQueue.poll(); // retireJob(removedJob, ""); } }
java
{ "resource": "" }
q162366
SimulatorJobTracker.validateAndSetClock
train
private void validateAndSetClock(long newSimulationTime) { // We do not use the getClock routine here as // the Engine and JobTracker clocks are different at // this point. long currentSimulationTime = clock.getTime(); if (newSimulationTime < currentSimulationTime) { // time has gone backwards throw new IllegalArgumentException("Time has gone backwards! " + "newSimulationTime: " + newSimulationTime + " while currentTime: " + currentSimulationTime); } // the simulation time should also match that in the engine assert(newSimulationTime == engine.getCurrentTime()) : " newTime =" + newSimulationTime + " engineTime = " + engine.getCurrentTime(); // set the current simulation time clock.setTime(newSimulationTime); }
java
{ "resource": "" }
q162367
SimulatorJobTracker.getMapCompletionTasks
train
private List<TaskTrackerAction> getMapCompletionTasks( TaskTrackerStatus status, List<TaskTrackerAction> tasksToKill) { boolean loggingEnabled = LOG.isDebugEnabled(); // Build up the list of tasks about to be killed Set<TaskAttemptID> killedTasks = new HashSet<TaskAttemptID>(); if (tasksToKill != null) { for (TaskTrackerAction taskToKill : tasksToKill) { killedTasks.add(((KillTaskAction)taskToKill).getTaskID()); } } String trackerName = status.getTrackerName(); List<TaskTrackerAction> actions = new ArrayList<TaskTrackerAction>(); // loop through the list of task statuses for (TaskStatus report : status.getTaskReports()) { TaskAttemptID taskAttemptId = report.getTaskID(); SimulatorJobInProgress job = getSimulatorJob(taskAttemptId.getJobID()); if(job ==null) { // This job has completed before. // and this is a zombie reduce-task Set<JobID> jobsToCleanup = trackerToJobsToCleanup.get(trackerName); if (jobsToCleanup == null) { jobsToCleanup = new HashSet<JobID>(); trackerToJobsToCleanup.put(trackerName, jobsToCleanup); } jobsToCleanup.add(taskAttemptId.getJobID()); continue; } JobStatus jobStatus = job.getStatus(); TaskInProgress tip = taskidToTIPMap.get(taskAttemptId); // if the job is running, attempt is running // no KillTask is being sent for this attempt // task is a reduce and attempt is in shuffle phase // this precludes sending both KillTask and AllMapsCompletion // for same reduce-attempt if (jobStatus.getRunState()== JobStatus.RUNNING && tip.isRunningTask(taskAttemptId) && !killedTasks.contains(taskAttemptId) && !report.getIsMap() && report.getPhase() == TaskStatus.Phase.SHUFFLE) { if (loggingEnabled) { LOG.debug("Need map-completion information for REDUCEattempt " + taskAttemptId + " in tracker " + trackerName); LOG.debug("getMapCompletion: job=" + job.getJobID() + " pendingMaps=" + job.pendingMaps()); } // Check whether the number of finishedMaps equals the // number of maps boolean canSendMapCompletion = false; canSendMapCompletion = (job.finishedMaps()==job.desiredMaps()); if (canSendMapCompletion) { if (loggingEnabled) { LOG.debug("Adding MapCompletion for taskAttempt " + taskAttemptId + " in tracker " + trackerName); LOG.debug("FinishedMaps for job:" + job.getJobID() + " is = " + job.finishedMaps() + "/" + job.desiredMaps()); LOG.debug("AllMapsCompleted for task " + taskAttemptId + " time=" + getClock().getTime()); } actions.add(new AllMapsCompletedTaskAction(taskAttemptId)); } } } return actions; }
java
{ "resource": "" }
q162368
BookKeeperJournalInputStream.nextEntryStream
train
private InputStream nextEntryStream() throws IOException { long nextLedgerEntryId = currentStreamState.getNextLedgerEntryId(); if (nextLedgerEntryId > maxLedgerEntryIdSeen) { updateMaxLedgerEntryIdSeen(); if (nextLedgerEntryId > maxLedgerEntryIdSeen) { // Return null if we've reached the end of the ledger: we can not // read beyond the end of the ledger and it is up to the caller to // either find the new "tail" of the ledger (if the ledger is in- // progress) or open the next ledger (if the ledger is finalized) if (LOG.isDebugEnabled()) { LOG.debug("Requesting to ledger entryId " + nextLedgerEntryId + ", but "+ " maxLedgerEntryIdSeen is " + maxLedgerEntryIdSeen + ", ledger length is " + ledger.getLength()); } return null; } } try { Enumeration<LedgerEntry> entries = ledger.readEntries(nextLedgerEntryId, nextLedgerEntryId); currentStreamState.incrementNextLedgerEntryId(); if (entries.hasMoreElements()) { LedgerEntry entry = entries.nextElement(); if (entries.hasMoreElements()) { throw new IllegalStateException("More than one entry retrieved!"); } currentStreamState.setOffsetInEntry(0); return entry.getEntryInputStream(); } } catch (BKException e) { throw new IOException("Unrecoverable BookKeeper error reading entry " + nextLedgerEntryId, e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IOException("Interrupted reading BookKeeper entry " + nextLedgerEntryId, e); } return null; }
java
{ "resource": "" }
q162369
BookKeeperJournalInputStream.position
train
public void position(long position) throws IOException { if (position == 0) { currentStreamState.setNextLedgerEntryId(firstLedgerEntryId); currentStreamState.setOffsetInEntry(0); entryStream = null; } else if (savedStreamState == null || position != savedStreamState.getReaderPosition()) { // Seek to an arbitrary position through "brute force" if (position > Integer.MAX_VALUE) { throw new IllegalArgumentException("Asked to position to " + position + ", but can only \"brute-force\" skip up" + Integer.MAX_VALUE); } position(0); skip(position, (int) position); } else { // savedStream != null && position == savedStream.getReaderPosition() int bytesToSkip = 0; if (savedStreamState.getOffsetInLedger() > position) { // Since reading from the input stream is buffered, we usually will // read further into the ledger than the reader has actually // read into. In this case we will need to find out exactly *what* // position within the ledger entry matches with the reader's last // known good position. long entryStartPosition = savedStreamState.getOffsetInLedger() - savedStreamState.getOffsetInEntry(); bytesToSkip = (int) (position - entryStartPosition); } else if (savedStreamState.getOffsetInLedger() < position) { throw new IllegalArgumentException("Saved offset in ledger (" + savedStreamState.getOffsetInLedger() + ") < position(" + position + ")"); } long nextLedgerEntryId = savedStreamState.getNextLedgerEntryId() == firstLedgerEntryId ? firstLedgerEntryId : (savedStreamState.getNextLedgerEntryId() - 1); currentStreamState.setNextLedgerEntryId(nextLedgerEntryId); if (bytesToSkip > 0) { entryStream = null; skip(position, bytesToSkip); } else { if (currentStreamState.getNextLedgerEntryId() > 0) { currentStreamState.setNextLedgerEntryId(currentStreamState.getNextLedgerEntryId() - 1); } entryStream = nextEntryStream(); } } currentStreamState.setOffsetInLedger(position); }
java
{ "resource": "" }
q162370
RaidShell.verifyParity
train
private void verifyParity(String[] args, int startIndex) { boolean restoreReplication = false; int repl = -1; Path root = null; for (int i = startIndex; i < args.length; i++) { String arg = args[i]; if (arg.equals("-restore")) { restoreReplication = true; } else if (arg.equals("-repl")){ i++; if (i >= args.length) { throw new IllegalArgumentException("Missing repl after -r option"); } repl = Integer.parseInt(args[i]); } else { root = new Path(arg); } } if (root == null) { throw new IllegalArgumentException("Too few arguments"); } if (repl == -1) { throw new IllegalArgumentException("Need to specify -r option"); } if (repl < 1 || repl > 3) { throw new IllegalArgumentException("repl could only in the range [1, 3]"); } Codec matched = null; String rootPath = root.toUri().getPath(); if (!rootPath.endsWith(Path.SEPARATOR)) { rootPath += Path.SEPARATOR; } for (Codec code : Codec.getCodecs()) { if (rootPath.startsWith(code.getParityPrefix())) { matched = code; break; } } if (matched == null) { throw new IllegalArgumentException( "root needs to starts with parity dirs"); } try { FileSystem fs = root.getFileSystem(conf); // Make sure default uri is the same as root conf.set(FileSystem.FS_DEFAULT_NAME_KEY, fs.getUri().toString()); ParityVerifier pv = new ParityVerifier(conf, restoreReplication, repl, matched); pv.verifyParities(root, System.out); } catch (IOException ex) { System.err.println("findMissingParityFiles: " + ex); } }
java
{ "resource": "" }
q162371
RaidShell.recover
train
public Path[] recover(String cmd, String argv[], int startindex) throws IOException { Path[] paths = new Path[(argv.length - startindex) / 2]; int j = 0; for (int i = startindex; i < argv.length; i = i + 2) { String path = argv[i]; long corruptOffset = Long.parseLong(argv[i+1]); LOG.info("RaidShell recoverFile for " + path + " corruptOffset " + corruptOffset); Path recovered = new Path("/tmp/recovered." + System.currentTimeMillis()); FileSystem fs = recovered.getFileSystem(conf); DistributedFileSystem dfs = (DistributedFileSystem)fs; Configuration raidConf = new Configuration(conf); raidConf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedRaidFileSystem"); raidConf.set("fs.raid.underlyingfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem"); raidConf.setBoolean("fs.hdfs.impl.disable.cache", true); java.net.URI dfsUri = dfs.getUri(); FileSystem raidFs = FileSystem.get(dfsUri, raidConf); FileUtil.copy(raidFs, new Path(path), fs, recovered, false, conf); paths[j] = recovered; LOG.info("Raidshell created recovery file " + paths[j]); j++; } return paths; }
java
{ "resource": "" }
q162372
RaidShell.isFileCorrupt
train
protected boolean isFileCorrupt(final DistributedFileSystem dfs, final FileStatus fileStat) throws IOException { return isFileCorrupt(dfs, fileStat, false, conf, this.numNonRaidedMissingBlks, this.numStrpMissingBlksMap); }
java
{ "resource": "" }
q162373
FastProtocolRegister.tryGetMethod
train
public static Method tryGetMethod(String id) { if (id.length() != NAME_LEN) { // we use it to fast discard the request without doing map lookup return null; } return idToMethod.get(id); }
java
{ "resource": "" }
q162374
Job.setCombinerClass
train
public void setCombinerClass(Class<? extends Reducer> cls ) throws IllegalStateException { ensureState(JobState.DEFINE); conf.setClass(COMBINE_CLASS_ATTR, cls, Reducer.class); }
java
{ "resource": "" }
q162375
Job.setMapOutputKeyClass
train
public void setMapOutputKeyClass(Class<?> theClass ) throws IllegalStateException { ensureState(JobState.DEFINE); conf.setMapOutputKeyClass(theClass); }
java
{ "resource": "" }
q162376
Job.setMapOutputValueClass
train
public void setMapOutputValueClass(Class<?> theClass ) throws IllegalStateException { ensureState(JobState.DEFINE); conf.setMapOutputValueClass(theClass); }
java
{ "resource": "" }
q162377
Job.setOutputKeyClass
train
public void setOutputKeyClass(Class<?> theClass ) throws IllegalStateException { ensureState(JobState.DEFINE); conf.setOutputKeyClass(theClass); }
java
{ "resource": "" }
q162378
Job.setOutputValueClass
train
public void setOutputValueClass(Class<?> theClass ) throws IllegalStateException { ensureState(JobState.DEFINE); conf.setOutputValueClass(theClass); }
java
{ "resource": "" }
q162379
Job.setJobName
train
public void setJobName(String name) throws IllegalStateException { ensureState(JobState.DEFINE); conf.setJobName(name); }
java
{ "resource": "" }
q162380
Job.killTask
train
public void killTask(TaskAttemptID taskId) throws IOException { ensureState(JobState.RUNNING); info.killTask(org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId), false); }
java
{ "resource": "" }
q162381
Job.getCounters
train
public Counters getCounters() throws IOException { ensureState(JobState.RUNNING); org.apache.hadoop.mapred.Counters ctrs = info.getCounters(); if (ctrs == null) { return null; } else { return new Counters(ctrs); } }
java
{ "resource": "" }
q162382
Job.setUseNewAPI
train
private void setUseNewAPI() throws IOException { int numReduces = conf.getNumReduceTasks(); String oldMapperClass = "mapred.mapper.class"; String oldReduceClass = "mapred.reducer.class"; conf.setBooleanIfUnset("mapred.mapper.new-api", conf.get(oldMapperClass) == null); if (conf.getUseNewMapper()) { String mode = "new map API"; ensureNotSet("mapred.input.format.class", mode); ensureNotSet(oldMapperClass, mode); if (numReduces != 0) { ensureNotSet("mapred.partitioner.class", mode); } else { ensureNotSet("mapred.output.format.class", mode); } } else { String mode = "map compatability"; ensureNotSet(JobContext.INPUT_FORMAT_CLASS_ATTR, mode); ensureNotSet(JobContext.MAP_CLASS_ATTR, mode); if (numReduces != 0) { ensureNotSet(JobContext.PARTITIONER_CLASS_ATTR, mode); } else { ensureNotSet(JobContext.OUTPUT_FORMAT_CLASS_ATTR, mode); } } if (numReduces != 0) { conf.setBooleanIfUnset("mapred.reducer.new-api", conf.get(oldReduceClass) == null); if (conf.getUseNewReducer()) { String mode = "new reduce API"; ensureNotSet("mapred.output.format.class", mode); ensureNotSet(oldReduceClass, mode); } else { String mode = "reduce compatability"; ensureNotSet(JobContext.OUTPUT_FORMAT_CLASS_ATTR, mode); ensureNotSet(JobContext.REDUCE_CLASS_ATTR, mode); } } }
java
{ "resource": "" }
q162383
Job.submit
train
public void submit() throws IOException, InterruptedException, ClassNotFoundException { ensureState(JobState.DEFINE); setUseNewAPI(); info = jobClient.submitJobInternal(conf); state = JobState.RUNNING; }
java
{ "resource": "" }
q162384
Job.waitForCompletion
train
public boolean waitForCompletion(boolean verbose ) throws IOException, InterruptedException, ClassNotFoundException { if (state == JobState.DEFINE) { submit(); } if (verbose) { jobClient.monitorAndPrintJob(conf, info); } else { info.waitForCompletion(); } return isSuccessful(); }
java
{ "resource": "" }
q162385
StreamJob.setUserJobConfProps
train
protected void setUserJobConfProps(boolean doEarlyProps) { Iterator it = userJobConfProps_.keySet().iterator(); while (it.hasNext()) { String key = (String) it.next(); String val = (String)userJobConfProps_.get(key); boolean earlyName = key.equals("fs.default.name"); earlyName |= key.equals("stream.shipped.hadoopstreaming"); if (doEarlyProps == earlyName) { msg("xxxJobConf: set(" + key + ", " + val + ") early=" + doEarlyProps); jobConf_.set(key, val); } } }
java
{ "resource": "" }
q162386
JournalNodeHttpServer.getJournalStats
train
public static Map<String, Map<String, String>> getJournalStats( Collection<Journal> journals) { Map<String, Map<String, String>> stats = new HashMap<String, Map<String, String>>(); for (Journal j : journals) { try { Map<String, String> stat = new HashMap<String, String>(); stats.put(j.getJournalId(), stat); stat.put("Txid committed", Long.toString(j.getCommittedTxnId())); stat.put("Txid segment", Long.toString(j.getCurrentSegmentTxId())); stat.put("Txid written", Long.toString(j.getHighestWrittenTxId())); stat.put("Current lag", Long.toString(j.getCurrentLagTxns())); stat.put("Writer epoch", Long.toString(j.getLastWriterEpoch())); } catch (IOException e) { LOG.error("Error when collectng stats", e); } } return stats; }
java
{ "resource": "" }
q162387
JournalNodeHttpServer.sendResponse
train
static void sendResponse(String output, HttpServletResponse response) throws IOException { PrintWriter out = null; try { out = response.getWriter(); out.write(output); } finally { if (out != null) { out.close(); } } }
java
{ "resource": "" }
q162388
DFSAdmin.report
train
public void report() throws IOException { DistributedFileSystem dfs = getDFS(); if (dfs != null) { DiskStatus ds = dfs.getDiskStatus(); long capacity = ds.getCapacity(); long used = ds.getDfsUsed(); long remaining = ds.getRemaining(); long presentCapacity = used + remaining; boolean mode = dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET); UpgradeStatusReport status = dfs.distributedUpgradeProgress(UpgradeAction.GET_STATUS); if (mode) { System.out.println("Safe mode is ON"); } if (status != null) { System.out.println(status.getStatusText(false)); } System.out.println("Configured Capacity: " + capacity + " (" + StringUtils.byteDesc(capacity) + ")"); System.out.println("Present Capacity: " + presentCapacity + " (" + StringUtils.byteDesc(presentCapacity) + ")"); System.out.println("DFS Remaining: " + remaining + " (" + StringUtils.byteDesc(remaining) + ")"); System.out.println("DFS Used: " + used + " (" + StringUtils.byteDesc(used) + ")"); System.out.println("DFS Used%: " + StringUtils.limitDecimalTo2(((1.0 * used) / presentCapacity) * 100) + "%"); /* These counts are not always upto date. They are updated after * iteration of an internal list. Should be updated in a few seconds to * minutes. Use "-metaSave" to list of all such blocks and accurate * counts. */ System.out.println("Under replicated blocks: " + dfs.getUnderReplicatedBlocksCount()); System.out.println("Blocks with corrupt replicas: " + dfs.getCorruptBlocksCount()); System.out.println("Missing blocks: " + dfs.getMissingBlocksCount()); System.out.println(); System.out.println("-------------------------------------------------"); DatanodeInfo[] live = dfs.getClient().datanodeReport( DatanodeReportType.LIVE); DatanodeInfo[] dead = dfs.getClient().datanodeReport( DatanodeReportType.DEAD); System.out.println("Datanodes available: " + live.length + " (" + (live.length + dead.length) + " total, " + dead.length + " dead)\n"); for (DatanodeInfo dn : live) { System.out.println(dn.getDatanodeReport()); System.out.println(); } for (DatanodeInfo dn : dead) { System.out.println(dn.getDatanodeReport()); System.out.println(); } } }
java
{ "resource": "" }
q162389
DFSAdmin.upgradeProgress
train
public int upgradeProgress(String[] argv, int idx) throws IOException { DistributedFileSystem dfs = getDFS(); if (dfs == null) { System.out.println("FileSystem is " + getFS().getUri()); return -1; } if (idx != argv.length - 1) { printUsage("-upgradeProgress"); return -1; } UpgradeAction action; if ("status".equalsIgnoreCase(argv[idx])) { action = UpgradeAction.GET_STATUS; } else if ("details".equalsIgnoreCase(argv[idx])) { action = UpgradeAction.DETAILED_STATUS; } else if ("force".equalsIgnoreCase(argv[idx])) { action = UpgradeAction.FORCE_PROCEED; } else { printUsage("-upgradeProgress"); return -1; } UpgradeStatusReport status = dfs.distributedUpgradeProgress(action); String statusText = (status == null ? "There are no upgrades in progress." : status.getStatusText(action == UpgradeAction.DETAILED_STATUS)); System.out.println(statusText); return 0; }
java
{ "resource": "" }
q162390
DFSAdmin.getClientDatanodeProtocol
train
private ClientDatanodeProtocol getClientDatanodeProtocol(String dnAddr) throws IOException { String hostname = null; int port; int index; Configuration conf = getConf(); if (dnAddr == null) { // Defaulting the configured address for the port dnAddr = conf.get(FSConstants.DFS_DATANODE_IPC_ADDRESS_KEY); hostname = "localhost"; } index = dnAddr.indexOf(':'); if (index < 0) { return null; } port = Integer.parseInt(dnAddr.substring(index+1)); if (hostname == null) { hostname = dnAddr.substring(0, index); } InetSocketAddress addr = NetUtils.createSocketAddr(hostname + ":" + port); if (ClientDatanodeProtocol.LOG.isDebugEnabled()) { ClientDatanodeProtocol.LOG.debug("ClientDatanodeProtocol addr=" + addr); } return (ClientDatanodeProtocol)RPC.getProxy(ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID, addr, conf); }
java
{ "resource": "" }
q162391
DFSAdmin.getBlockInfo
train
private int getBlockInfo(String[] argv, int i) throws IOException { long blockId = Long.valueOf(argv[i++]); LocatedBlockWithFileName locatedBlock = getDFS().getClient().getBlockInfo(blockId); if (null == locatedBlock) { System.err.println("Could not find the block with id : " + blockId); return -1; } StringBuilder sb = new StringBuilder(); sb.append("block: ") .append(locatedBlock.getBlock()).append("\n") .append("filename: ") .append(locatedBlock.getFileName()).append("\n") .append("locations: "); DatanodeInfo[] locs = locatedBlock.getLocations(); for (int k=0; k<locs.length; k++) { if (k > 0) { sb.append(" , "); } sb.append(locs[k].getHostName()); } System.out.println(sb.toString()); return 0; }
java
{ "resource": "" }
q162392
HftpFileSystem.openConnection
train
protected HttpURLConnection openConnection(String path, String query) throws IOException { try { final URL url = new URI("http", null, nnAddr.getAddress().getHostAddress(), nnAddr.getPort(), path, query, null).toURL(); if (LOG.isTraceEnabled()) { LOG.trace("url=" + url); } return (HttpURLConnection)url.openConnection(); } catch (URISyntaxException e) { throw (IOException)new IOException().initCause(e); } }
java
{ "resource": "" }
q162393
HarIndex.getHarIndex
train
public static HarIndex getHarIndex(FileSystem fs, Path initializer) throws IOException { if (!initializer.getName().endsWith(HAR)) { initializer = initializer.getParent(); } InputStream in = null; try { Path indexFile = new Path(initializer, INDEX); FileStatus indexStat = fs.getFileStatus(indexFile); in = fs.open(indexFile); HarIndex harIndex = new HarIndex(in, indexStat.getLen()); harIndex.harDirectory = initializer; return harIndex; } finally { if (in != null) { in.close(); } } }
java
{ "resource": "" }
q162394
HarIndex.parseLine
train
void parseLine(String line) throws UnsupportedEncodingException { String[] splits = line.split(" "); boolean isDir = "dir".equals(splits[1]) ? true: false; if (!isDir && splits.length >= 6) { String name = URLDecoder.decode(splits[0], "UTF-8"); String partName = URLDecoder.decode(splits[2], "UTF-8"); long startIndex = Long.parseLong(splits[3]); long length = Long.parseLong(splits[4]); String[] newsplits = URLDecoder.decode(splits[5],"UTF-8").split(" "); if (newsplits != null && newsplits.length >= 5) { long mtime = Long.parseLong(newsplits[0]); IndexEntry entry = new IndexEntry( name, startIndex, length, mtime, partName); entries.add(entry); } } }
java
{ "resource": "" }
q162395
HarIndex.findEntry
train
public IndexEntry findEntry(String partName, long partFileOffset) { for (IndexEntry e: entries) { boolean nameMatch = partName.equals(e.partFileName); boolean inRange = (partFileOffset >= e.startOffset) && (partFileOffset < e.startOffset + e.length); if (nameMatch && inRange) { return e; } } return null; }
java
{ "resource": "" }
q162396
HarIndex.findEntryByFileName
train
public IndexEntry findEntryByFileName(String fileName) { for (IndexEntry e: entries) { if (fileName.equals(e.fileName)) { return e; } } return null; }
java
{ "resource": "" }
q162397
BookKeeperJournalManager.prepareBookKeeperEnv
train
@VisibleForTesting public static void prepareBookKeeperEnv(final String availablePath, ZooKeeper zooKeeper) throws IOException { final CountDownLatch availablePathLatch = new CountDownLatch(1); StringCallback cb = new StringCallback() { @Override public void processResult(int rc, String path, Object ctx, String name) { if (Code.OK.intValue() == rc || Code.NODEEXISTS.intValue() == rc) { availablePathLatch.countDown(); LOG.info("Successfully created bookie available path:" + availablePath); } else { Code code = Code.get(rc); LOG.error("Failed to create available bookie path (" + availablePath + ")", KeeperException.create(code, path)); } } }; ZkUtils.createFullPathOptimistic(zooKeeper, availablePath, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, cb, null); try { int timeoutMs = zooKeeper.getSessionTimeout(); if (!availablePathLatch.await(timeoutMs, TimeUnit.MILLISECONDS)) { throw new IOException("Couldn't create the bookie available path : " + availablePath + ", timed out after " + timeoutMs + " ms."); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IOException("Interrupted when creating the bookie available " + "path: " + availablePath, e); } }
java
{ "resource": "" }
q162398
BookKeeperJournalManager.createZkMetadataIfNotExists
train
private void createZkMetadataIfNotExists(StorageInfo si) throws IOException { try { if (!hasSomeJournalData()) { try { // First create the parent path zk.create(zkParentPath, new byte[] { '0' }, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); // Write format/namespace information to ZooKeeper FormatInfoWritable writable = localFormatInfoWritable.get(); writable.set(PROTO_VERSION, si); byte[] data = WritableUtil.writableToByteArray(writable); zk.create(formatInfoPath, data, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } catch (KeeperException e) { keeperException("Unrecoverable ZooKeeper error initializing " + zkParentPath, e); } catch (InterruptedException e) { interruptedException("Interrupted initializing " + zkParentPath + " in ZooKeeper", e); } } } catch (IOException e) { LOG.error("Unable to initialize metadata", e); throw e; } }
java
{ "resource": "" }
q162399
BookKeeperJournalManager.zkPathExists
train
private boolean zkPathExists(String path) throws IOException { try { return zk.exists(path, false) != null; } catch (KeeperException e) { keeperException("Unrecoverable ZooKeeper error checking if " + path + " exists", e); } catch (InterruptedException e) { interruptedException("Interrupted checking if ZooKeeper path " + path + " exists", e); } return false; // Should never be reached }
java
{ "resource": "" }