_id
stringlengths
2
7
title
stringlengths
3
140
partition
stringclasses
3 values
text
stringlengths
73
34.1k
language
stringclasses
1 value
meta_information
dict
q162200
LookasideCacheFileSystem.mapCachePath
train
Path mapCachePath(Path hdfsPath) { assert hdfsPath.isAbsolute(); Path value = new Path(cacheDir + Path.SEPARATOR + hdfsPath); return value; }
java
{ "resource": "" }
q162201
LookasideCacheFileSystem.evictCache
train
public void evictCache(Path hdfsPath, Path localPath, long size) throws IOException { boolean done = cacheFs.delete(localPath, false); if (!done) { if (LOG.isDebugEnabled()) { LOG.debug("Evict for path: " + hdfsPath + " local path " + localPath + " unsuccessful."); } } }
java
{ "resource": "" }
q162202
LookasideCacheFileSystem.create
train
@Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { FSDataOutputStream fd = new FSDataOutputStream( new CacheOutputStream(conf, this, f, permission, overwrite, bufferSize, replication, blockSize, progress)); return fd; }
java
{ "resource": "" }
q162203
Command.runAll
train
public int runAll() { int exitCode = 0; if (args == null) { // no argument return run(); } for (String src : args) { try { Path srcPath = new Path(src); FileSystem fs = srcPath.getFileSystem(getConf()); FileStatus[] statuses = fs.globStatus(srcPath); if (statuses == null) { System.err.println("Can not find listing for " + src); exitCode = -1; } else { for(FileStatus s : statuses) { try { run(s.getPath()); } catch (FileNotFoundException ex) { // We are testing if the srcPath was given as a specific path // or as a mask. If it was a mask we skip the exception. // otherwise we have to propagate it if (s.getPath().equals(srcPath.makeQualified(fs))) { throw ex; } } } } } catch (IOException e) { exitCode = -1; handleIOException(e); } } return exitCode; }
java
{ "resource": "" }
q162204
FreightStreamer.printToStdout
train
private void printToStdout(InputStream in) throws IOException { try { IOUtils.copyBytes(in, System.out, getConf(), false); } finally { in.close(); } }
java
{ "resource": "" }
q162205
FreightStreamer.copyToLocal
train
private void copyToLocal(final FileSystem srcFS, final Path src, final File dst, final boolean copyCrc) throws IOException { /* Keep the structure similar to ChecksumFileSystem.copyToLocal(). * Ideal these two should just invoke FileUtil.copy() and not repeat * recursion here. Of course, copy() should support two more options : * copyCrc and useTmpFile (may be useTmpFile need not be an option). */ if (!srcFS.getFileStatus(src).isDir()) { if (dst.exists()) { // match the error message in FileUtil.checkDest(): throw new IOException("Target " + dst + " already exists"); } // use absolute name so that tmp file is always created under dest dir File tmp = FileUtil.createLocalTempFile(dst.getAbsoluteFile(), COPYTOLOCAL_PREFIX, true); if (!FileUtil.copy(srcFS, src, tmp, false, srcFS.getConf())) { throw new IOException("Failed to copy " + src + " to " + dst); } if (!tmp.renameTo(dst)) { throw new IOException("Failed to rename tmp file " + tmp + " to local destination \"" + dst + "\"."); } if (copyCrc) { if (!(srcFS instanceof ChecksumFileSystem)) { throw new IOException("Source file system does not have crc files"); } ChecksumFileSystem csfs = (ChecksumFileSystem) srcFS; File dstcs = FileSystem.getLocal(srcFS.getConf()) .pathToFile(csfs.getChecksumFile(new Path(dst.getCanonicalPath()))); copyToLocal(csfs.getRawFileSystem(), csfs.getChecksumFile(src), dstcs, false); } } else { // once FileUtil.copy() supports tmp file, we don't need to mkdirs(). dst.mkdirs(); for(FileStatus path : srcFS.listStatus(src)) { copyToLocal(srcFS, path.getPath(), new File(dst, path.getPath().getName()), copyCrc); } } }
java
{ "resource": "" }
q162206
FreightStreamer.tail
train
private void tail(String[] cmd, int pos) throws IOException { CommandFormat c = new CommandFormat("tail", 1, 1, "f"); String src = null; Path path = null; try { List<String> parameters = c.parse(cmd, pos); src = parameters.get(0); } catch(IllegalArgumentException iae) { System.err.println("Usage: java FreightStreamer " + TAIL_USAGE); throw iae; } boolean foption = c.getOpt("f") ? true: false; path = new Path(src); FileSystem srcFs = path.getFileSystem(getConf()); if (srcFs.isDirectory(path)) { throw new IOException("Source must be a file."); } long fileSize = srcFs.getFileStatus(path).getLen(); long offset = (fileSize > 1024) ? fileSize - 1024: 0; while (true) { FSDataInputStream in = srcFs.open(path); in.seek(offset); IOUtils.copyBytes(in, System.out, 1024, false); offset = in.getPos(); in.close(); if (!foption) { break; } fileSize = srcFs.getFileStatus(path).getLen(); offset = (fileSize > offset) ? offset: fileSize; try { Thread.sleep(5000); } catch (InterruptedException e) { break; } } }
java
{ "resource": "" }
q162207
LayoutVersion.supports
train
public static boolean supports(final Feature f, final int lv) { final EnumSet<Feature> set = map.get(lv); return set != null && set.contains(f); }
java
{ "resource": "" }
q162208
GetConf.doWork
train
private int doWork(String[] args) { if (args.length == 1) { CommandHandler handler = Command.getHandler(args[0]); if (handler != null) { return handler.doWork(this); } } printUsage(); return -1; }
java
{ "resource": "" }
q162209
JobInProgressTraits.reportTasksInProgress
train
public Vector<TaskInProgress> reportTasksInProgress(boolean shouldBeMap, boolean shouldBeComplete) { Vector<TaskInProgress> results = new Vector<TaskInProgress>(); TaskInProgress tips[] = null; if (shouldBeMap) { tips = maps; } else { tips = reduces; } for (int i = 0; i < tips.length; i++) { if (tips[i].isComplete() == shouldBeComplete) { results.add(tips[i]); } } return results; }
java
{ "resource": "" }
q162210
JobInProgressTraits.reportCleanupTIPs
train
public Vector<TaskInProgress> reportCleanupTIPs(boolean shouldBeComplete) { Vector<TaskInProgress> results = new Vector<TaskInProgress>(); for (int i = 0; i < cleanup.length; i++) { if (cleanup[i].isComplete() == shouldBeComplete) { results.add(cleanup[i]); } } return results; }
java
{ "resource": "" }
q162211
JobInProgressTraits.reportSetupTIPs
train
public Vector<TaskInProgress> reportSetupTIPs(boolean shouldBeComplete) { Vector<TaskInProgress> results = new Vector<TaskInProgress>(); for (int i = 0; i < setup.length; i++) { if (setup[i].isComplete() == shouldBeComplete) { results.add(setup[i]); } } return results; }
java
{ "resource": "" }
q162212
JobInProgressTraits.getTaskInProgress
train
public TaskInProgress getTaskInProgress(TaskID tipid) { if (tipid.isMap()) { if (cleanup.length > 0 && tipid.equals(cleanup[0].getTIPId())) { // cleanup map tip return cleanup[0]; } if (setup.length > 0 && tipid.equals(setup[0].getTIPId())) { //setup map tip return setup[0]; } for (int i = 0; i < maps.length; i++) { if (tipid.equals(maps[i].getTIPId())){ return maps[i]; } } } else { if (cleanup.length > 0 && tipid.equals(cleanup[1].getTIPId())) { // cleanup reduce tip return cleanup[1]; } if (setup.length > 0 && tipid.equals(setup[1].getTIPId())) { //setup reduce tip return setup[1]; } for (int i = 0; i < reduces.length; i++) { if (tipid.equals(reduces[i].getTIPId())){ return reduces[i]; } } } return null; }
java
{ "resource": "" }
q162213
BinaryProtocol.close
train
public void close() throws IOException, InterruptedException { LOG.debug("closing connection"); stream.close(); uplink.closeConnection(); uplink.interrupt(); uplink.join(); }
java
{ "resource": "" }
q162214
BinaryProtocol.writeObject
train
private void writeObject(Writable obj) throws IOException { // For Text and BytesWritable, encode them directly, so that they end up // in C++ as the natural translations. if (obj instanceof Text) { Text t = (Text) obj; int len = t.getLength(); WritableUtils.writeVInt(stream, len); stream.write(t.getBytes(), 0, len); } else if (obj instanceof BytesWritable) { BytesWritable b = (BytesWritable) obj; int len = b.getLength(); WritableUtils.writeVInt(stream, len); stream.write(b.getBytes(), 0, len); } else { buffer.reset(); obj.write(buffer); int length = buffer.getLength(); WritableUtils.writeVInt(stream, length); stream.write(buffer.getData(), 0, length); } }
java
{ "resource": "" }
q162215
HdfsProxy.stop
train
public void stop() { try { if (server != null) { server.stop(); server.join(); } } catch (Exception e) { LOG.warn("Got exception shutting down proxy", e); } }
java
{ "resource": "" }
q162216
Path.getParent
train
public Path getParent() { String path = uri.getPath(); int lastSlash = path.lastIndexOf('/'); int start = hasWindowsDrive(path, true) ? 3 : 0; if ((path.length() == start) || // empty path (lastSlash == start && path.length() == start+1)) { // at root return null; } String parent; if (lastSlash==-1) { parent = CUR_DIR; } else { int end = hasWindowsDrive(path, true) ? 3 : 0; parent = path.substring(0, lastSlash==end?end+1:lastSlash); } return new Path(uri.getScheme(), uri.getAuthority(), parent); }
java
{ "resource": "" }
q162217
Path.makeQualified
train
public Path makeQualified(FileSystem fs) { Path path = this; if (!isAbsolute()) { FileSystem.LogForCollect .info("make Qualify non absolute path: " + this.toString() + " working directory: " + fs.getWorkingDirectory()); path = new Path(fs.getWorkingDirectory(), this); } URI pathUri = path.toUri(); URI fsUri = fs.getUri(); String scheme = pathUri.getScheme(); String authority = pathUri.getAuthority(); if (scheme != null && (authority != null || fsUri.getAuthority() == null)) return path; if (scheme == null) { scheme = fsUri.getScheme(); } if (authority == null) { authority = fsUri.getAuthority(); if (authority == null) { authority = ""; } } return new Path(scheme+":"+"//"+authority + pathUri.getPath()); }
java
{ "resource": "" }
q162218
ValueAggregatorBaseDescriptor.configure
train
public void configure(JobConf job) { this.inputFile = job.get("map.input.file"); maxNumItems = job.getLong("aggregate.max.num.unique.values", Long.MAX_VALUE); }
java
{ "resource": "" }
q162219
INodeHardLinkFile.loadHardLinkFileInfo
train
public static HardLinkFileInfo loadHardLinkFileInfo(long hardLinkID, FSImageLoadingContext context) { // update the latest hard link ID context.getFSDirectory().resetLastHardLinkIDIfLarge(hardLinkID); // create the hard link file info if necessary HardLinkFileInfo fileInfo = context.getHardLinkFileInfo(hardLinkID); if (fileInfo == null) { fileInfo = new HardLinkFileInfo(hardLinkID); context.associateHardLinkIDWithFileInfo(hardLinkID, fileInfo); } return fileInfo; }
java
{ "resource": "" }
q162220
DirectoryTraversal.getNextFile
train
public FileStatus getNextFile() throws IOException { // Check if traversal is done. while (!doneTraversal()) { // If traversal is not done, check if the stack is not empty. while (!stack.isEmpty()) { // If the stack is not empty, look at the top node. Node node = stack.peek(); // Check if the top node has an element. if (node.hasNext()) { FileStatus element = node.next(); // Is the next element a directory. if (!element.isDir()) { // It is a file, return it. return element; } // Next element is a directory, push it on to the stack and // continue try { pushNewNode(element); } catch (FileNotFoundException e) { // Ignore and move to the next element. } continue; } else { // Top node has no next element, pop it and continue. stack.pop(); continue; } } // If the stack is empty, do we have more paths? while (!paths.isEmpty()) { FileStatus next = paths.remove(0); pathIdx++; if (!next.isDir()) { return next; } try { pushNewNode(next); } catch (FileNotFoundException e) { continue; } break; } } return null; }
java
{ "resource": "" }
q162221
TotalOrderPartitioner.getPartition
train
@SuppressWarnings("unchecked") // is memcmp-able and uses the trie public int getPartition(K key, V value, int numPartitions) { return partitions.findPartition(key); }
java
{ "resource": "" }
q162222
TotalOrderPartitioner.readPartitions
train
@SuppressWarnings("unchecked") // map output key class private K[] readPartitions(FileSystem fs, Path p, Class<K> keyClass, JobConf job) throws IOException { SequenceFile.Reader reader = new SequenceFile.Reader(fs, p, job); ArrayList<K> parts = new ArrayList<K>(); K key = (K) ReflectionUtils.newInstance(keyClass, job); NullWritable value = NullWritable.get(); while (reader.next(key, value)) { parts.add(key); key = (K) ReflectionUtils.newInstance(keyClass, job); } reader.close(); return parts.toArray((K[])Array.newInstance(keyClass, parts.size())); }
java
{ "resource": "" }
q162223
TotalOrderPartitioner.buildTrie
train
private TrieNode buildTrie(BinaryComparable[] splits, int lower, int upper, byte[] prefix, int maxDepth) { final int depth = prefix.length; if (depth >= maxDepth || lower == upper) { return new LeafTrieNode(depth, splits, lower, upper); } InnerTrieNode result = new InnerTrieNode(depth); byte[] trial = Arrays.copyOf(prefix, prefix.length + 1); // append an extra byte on to the prefix int currentBound = lower; for(int ch = 0; ch < 255; ++ch) { trial[depth] = (byte) (ch + 1); lower = currentBound; while (currentBound < upper) { if (splits[currentBound].compareTo(trial, 0, trial.length) >= 0) { break; } currentBound += 1; } trial[depth] = (byte) ch; result.child[0xFF & ch] = buildTrie(splits, lower, currentBound, trial, maxDepth); } // pick up the rest trial[depth] = 127; result.child[255] = buildTrie(splits, currentBound, upper, trial, maxDepth); return result; }
java
{ "resource": "" }
q162224
DFSActionImpl.mkdir
train
private void mkdir(IStructuredSelection selection) { List<DFSFolder> folders = filterSelection(DFSFolder.class, selection); if (folders.size() >= 1) { DFSFolder folder = folders.get(0); InputDialog dialog = new InputDialog(Display.getCurrent().getActiveShell(), "Create subfolder", "Enter the name of the subfolder", "", null); if (dialog.open() == InputDialog.OK) folder.mkdir(dialog.getValue()); } }
java
{ "resource": "" }
q162225
DFSActionImpl.open
train
private void open(IStructuredSelection selection) throws IOException, PartInitException, InvocationTargetException, InterruptedException { for (DFSFile file : filterSelection(DFSFile.class, selection)) { IStorageEditorInput editorInput = new DFSFileEditorInput(file); targetPart.getSite().getWorkbenchWindow().getActivePage().openEditor( editorInput, "org.eclipse.ui.DefaultTextEditor"); } }
java
{ "resource": "" }
q162226
JobInitializer.getAverageWaitMsecsPerHardAdmissionJob
train
synchronized float getAverageWaitMsecsPerHardAdmissionJob() { float averageWaitMsecsPerHardAdmissionJob = -1f; if (!hardAdmissionMillisQueue.isEmpty()) { long totalWait = 0; for (Long waitMillis : hardAdmissionMillisQueue) { totalWait += waitMillis; } averageWaitMsecsPerHardAdmissionJob = ((float) totalWait) / hardAdmissionMillisQueue.size(); } return averageWaitMsecsPerHardAdmissionJob; }
java
{ "resource": "" }
q162227
JobInitializer.getJobAdmissionWaitInfo
train
synchronized JobAdmissionWaitInfo getJobAdmissionWaitInfo(JobInProgress job) { Integer rank = jobToRank.get(job); int position = (rank == null) ? -1 : rank; float averageWaitMsecsPerHardAdmissionJob = getAverageWaitMsecsPerHardAdmissionJob(); return new JobAdmissionWaitInfo( exceedTaskLimit(), position, waitingQueue.size(), averageWaitMsecsPerHardAdmissionJob, hardAdmissionMillisQueue.size()); }
java
{ "resource": "" }
q162228
ServerLogReaderAvatar.detectJournalManager
train
protected void detectJournalManager() throws IOException { int failures = 0; do { try { Stat stat = new Stat(); String primaryAddr = zk.getPrimaryAvatarAddress(logicalName, stat, true, true); if (primaryAddr == null || primaryAddr.trim().isEmpty()) { primaryURI = null; remoteJournalManager = null; LOG.warn("Failover detected, wait for it to finish..."); failures = 0; sleep(FAILOVER_RETRY_SLEEP); continue; } primaryURI = addrToURI(primaryAddr); LOG.info("Read primary URI from zk: " + primaryURI); if (primaryURI.equals(avatarZeroURI)) { remoteJournalManager = remoteJournalManagerZero; } else if (primaryURI.equals(avatarOneURI)) { remoteJournalManager = remoteJournalManagerOne; } else { LOG.warn("Invalid primaryURI: " + primaryURI); primaryURI = null; remoteJournalManager = null; failures = 0; sleep(FAILOVER_RETRY_SLEEP); } } catch (KeeperException kex) { if (KeeperException.Code.CONNECTIONLOSS == kex.code() && failures < AvatarZooKeeperClient.ZK_CONNECTION_RETRIES) { failures++; // This means there was a failure connecting to zookeeper // we should retry since some nodes might be down. sleep(FAILOVER_RETRY_SLEEP); continue; } throwIOException(kex.getMessage(), kex); } catch (InterruptedException e) { throwIOException(e.getMessage(), e); } catch (URISyntaxException e) { throwIOException(e.getMessage(), e); } } while (remoteJournalManager == null); }
java
{ "resource": "" }
q162229
ArrayOutputStream.expandIfNecessary
train
private void expandIfNecessary(int size) { if (bytes.length >= size) { // no need to expand return; } // either double, or expand to fit size int newlength = Math.max(2 * bytes.length, size); bytes = Arrays.copyOf(bytes, newlength); }
java
{ "resource": "" }
q162230
ArrayOutputStream.write
train
public void write(byte b[], int off, int len) { expandIfNecessary(count + len); System.arraycopy(b, off, bytes, count, len); count += len; }
java
{ "resource": "" }
q162231
LsImageVisitor.newLine
train
private void newLine() { numBlocks = 0; perms = username = group = path = linkTarget = replication = hardlinkId = ""; filesize = 0l; type = INode.INodeType.REGULAR_INODE.toString(); inInode = true; }
java
{ "resource": "" }
q162232
ReduceTask.getMapFiles
train
private Path[] getMapFiles(FileSystem fs, boolean isLocal) throws IOException { List<Path> fileList = new ArrayList<Path>(); if (isLocal) { // for local jobs for(int i = 0; i < numMaps; ++i) { fileList.add(mapOutputFile.getInputFile(i, getTaskID())); } } else { // for non local jobs for (FileStatus filestatus : mapOutputFilesOnDisk) { fileList.add(filestatus.getPath()); } } return fileList.toArray(new Path[0]); }
java
{ "resource": "" }
q162233
ReduceTask.getClosestPowerOf2
train
private static int getClosestPowerOf2(int value) { if (value <= 0) throw new IllegalArgumentException("Undefined for " + value); final int hob = Integer.highestOneBit(value); return Integer.numberOfTrailingZeros(hob) + (((hob >>> 1) & value) == 0 ? 0 : 1); }
java
{ "resource": "" }
q162234
BookKeeperJournalMetadataManager.init
train
public void init() throws IOException { try { if (zooKeeper.exists(zooKeeperParentPath, false) == null) { zooKeeper.create(zooKeeperParentPath, new byte[] { '0' }, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); LOG.info("Created ZNode " + zooKeeperParentPath); } if (zooKeeper.exists(ledgerParentPath, false) == null) { zooKeeper.create(ledgerParentPath, new byte[] { '0' }, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); LOG.info("Created ZNode" + ledgerParentPath); } } catch (InterruptedException e) { interruptedException("Interrupted ensuring that ZNodes " + zooKeeperParentPath + " and " + ledgerParentPath + " exist!", e); } catch (KeeperException e) { keeperException( "Unrecoverable ZooKeeper error ensuring that ZNodes " + zooKeeperParentPath + " and " + ledgerParentPath + " exist!", e); } }
java
{ "resource": "" }
q162235
BookKeeperJournalMetadataManager.fullyQualifiedPathForLedger
train
public String fullyQualifiedPathForLedger(EditLogLedgerMetadata e) { String nameForLedger = nameForLedger(e); return fullyQualifiedPathForLedger(nameForLedger); }
java
{ "resource": "" }
q162236
BookKeeperJournalMetadataManager.deleteLedgerMetadata
train
public boolean deleteLedgerMetadata(EditLogLedgerMetadata ledger, int version) throws IOException { String ledgerPath = fullyQualifiedPathForLedger(ledger); try { zooKeeper.delete(ledgerPath, version); return true; } catch (KeeperException.NoNodeException e) { LOG.warn(ledgerPath + " does not exist. Returning false, ignoring " + e); } catch (KeeperException.BadVersionException e) { keeperException("Unable to delete " + ledgerPath + ", version does not match." + " Updated by another process?", e); } catch (KeeperException e) { keeperException("Unrecoverable ZooKeeper error deleting " + ledgerPath, e); } catch (InterruptedException e) { interruptedException("Interrupted deleting " + ledgerPath, e); } return false; }
java
{ "resource": "" }
q162237
BookKeeperJournalMetadataManager.verifyEditLogLedgerMetadata
train
public boolean verifyEditLogLedgerMetadata(EditLogLedgerMetadata metadata, String fullPathToVerify) { Preconditions.checkNotNull(metadata); try { EditLogLedgerMetadata otherMetadata = readEditLogLedgerMetadata(fullPathToVerify); if (otherMetadata == null) { LOG.warn("No metadata found " + fullPathToVerify + "!"); } if (LOG.isTraceEnabled()) { LOG.trace("Verifying " + otherMetadata + " read from " + fullPathToVerify + " against " + metadata); } return metadata.equals(otherMetadata); } catch (IOException e) { LOG.error("Unrecoverable error when verifying " + fullPathToVerify, e); } return false; }
java
{ "resource": "" }
q162238
BookKeeperJournalMetadataManager.listLedgers
train
public Collection<EditLogLedgerMetadata> listLedgers( boolean includeInProgressLedgers) throws IOException { // Use TreeSet to sort ledgers by firstTxId TreeSet<EditLogLedgerMetadata> ledgers = new TreeSet<EditLogLedgerMetadata>(); try { List<String> ledgerNames = zooKeeper.getChildren(ledgerParentPath, false); for (String ledgerName : ledgerNames) { if (ledgerName.endsWith(BKJM_EDIT_CORRUPT)) { continue; } if (!includeInProgressLedgers && ledgerName.contains(BKJM_EDIT_INPROGRESS)) { continue; } String fullLedgerMetadataPath = fullyQualifiedPathForLedger(ledgerName); EditLogLedgerMetadata metadata = readEditLogLedgerMetadata(fullLedgerMetadataPath); if (metadata != null) { if (LOG.isTraceEnabled()) { LOG.trace("Read " + metadata + " from " + fullLedgerMetadataPath); } ledgers.add(metadata); } else { // metadata would be returns null iff path doesn't exist LOG.warn("ZNode " + fullLedgerMetadataPath + " might have been finalized and deleted."); } } } catch (InterruptedException e) { interruptedException( "Interrupted listing ledgers under " + ledgerParentPath, e); } catch (KeeperException e) { keeperException("Unrecoverable ZooKeeper error listing ledgers " + "under " + ledgerParentPath, e); } return ledgers; }
java
{ "resource": "" }
q162239
DataStorage.doMerge
train
boolean doMerge(String[] srcDataDirs, Collection<File> dstDataDirs, int namespaceId, NamespaceInfo nsInfo, StartupOption startOpt) throws IOException { HashMap<File, File> dirsToMerge = new HashMap<File, File>(); int i = 0; for (Iterator<File> it = dstDataDirs.iterator(); it.hasNext(); i++) { File dstDataDir = it.next(); if (dstDataDir.exists()) { continue; } File srcDataDir = NameSpaceSliceStorage.getNsRoot( namespaceId, new File(srcDataDirs[i], STORAGE_DIR_CURRENT)); if (!srcDataDir.exists() || !srcDataDir.isDirectory()) { LOG.info("Source data directory " + srcDataDir + " doesn't exist."); continue; } dirsToMerge.put(srcDataDir, dstDataDir); } if (dirsToMerge.size() == 0) //No merge is needed return false; if (dirsToMerge.size() != dstDataDirs.size()) { // Last merge succeeds partially throw new IOException("Merge fail: not all directories are merged successfully."); } MergeThread[] mergeThreads = new MergeThread[dirsToMerge.size()]; // start to merge i = 0; for (Map.Entry<File, File> entry: dirsToMerge.entrySet()) { MergeThread thread = new MergeThread(entry.getKey(), entry.getValue(), nsInfo); thread.start(); mergeThreads[i] = thread; i++; } // wait for merge to be done for (MergeThread thread : mergeThreads) { try { thread.join(); } catch (InterruptedException e) { throw (InterruptedIOException)new InterruptedIOException().initCause(e); } } // check for errors for (MergeThread thread : mergeThreads) { if (thread.error != null) throw new IOException(thread.error); } return true; }
java
{ "resource": "" }
q162240
DataStorage.recoverTransitionRead
train
void recoverTransitionRead(DataNode datanode, int namespaceId, NamespaceInfo nsInfo, Collection<File> dataDirs, StartupOption startOpt, String nameserviceId) throws IOException { // First ensure datanode level format/snapshot/rollback is completed // recoverTransitionRead(datanode, nsInfo, dataDirs, startOpt); // Create list of storage directories for the Name Space Collection<File> nsDataDirs = new ArrayList<File>(); for(Iterator<File> it = dataDirs.iterator(); it.hasNext();) { File dnRoot = it.next(); File nsRoot = NameSpaceSliceStorage.getNsRoot( namespaceId, new File(dnRoot, STORAGE_DIR_CURRENT)); nsDataDirs.add(nsRoot); } boolean merged = false; String[] mergeDataDirs = nameserviceId == null? null: datanode.getConf().getStrings("dfs.merge.data.dir." + nameserviceId); if (startOpt.equals(StartupOption.REGULAR) && mergeDataDirs != null && mergeDataDirs.length > 0) { assert mergeDataDirs.length == dataDirs.size(); merged = doMerge(mergeDataDirs, nsDataDirs, namespaceId, nsInfo, startOpt); } if (!merged) { // mkdir for the list of NameSpaceStorage makeNameSpaceDataDir(nsDataDirs); } NameSpaceSliceStorage nsStorage = new NameSpaceSliceStorage( namespaceId, this.getCTime(), layoutMap); nsStorage.recoverTransitionRead(datanode, nsInfo, nsDataDirs, startOpt); addNameSpaceStorage(namespaceId, nsStorage); }
java
{ "resource": "" }
q162241
DataStorage.makeNameSpaceDataDir
train
public static void makeNameSpaceDataDir(Collection<File> dataDirs) throws IOException { for (File data : dataDirs) { try { DiskChecker.checkDir(data); } catch ( IOException e ) { LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": " + e.getMessage()); } } }
java
{ "resource": "" }
q162242
DataStorage.doTransition
train
private void doTransition(List<StorageDirectory> sds, NamespaceInfo nsInfo, StartupOption startOpt ) throws IOException { if (startOpt == StartupOption.ROLLBACK) doRollback(nsInfo); // rollback if applicable int numOfDirs = sds.size(); List<StorageDirectory> dirsToUpgrade = new ArrayList<StorageDirectory>(numOfDirs); List<StorageInfo> dirsInfo = new ArrayList<StorageInfo>(numOfDirs); for (StorageDirectory sd : sds) { sd.read(); layoutMap.put(sd.getRoot(), this.layoutVersion); checkVersionUpgradable(this.layoutVersion); assert this.layoutVersion >= FSConstants.LAYOUT_VERSION : "Future version is not allowed"; boolean federationSupported = this.layoutVersion <= FSConstants.FEDERATION_VERSION; // For pre-federation version - validate the namespaceID if (!federationSupported && getNamespaceID() != nsInfo.getNamespaceID()) { sd.unlock(); throw new IOException( "Incompatible namespaceIDs in " + sd.getRoot().getCanonicalPath() + ": namenode namespaceID = " + nsInfo.getNamespaceID() + "; datanode namespaceID = " + getNamespaceID()); } if (this.layoutVersion == FSConstants.LAYOUT_VERSION && this.cTime == nsInfo.getCTime()) continue; // regular startup // verify necessity of a distributed upgrade verifyDistributedUpgradeProgress(nsInfo); // do a global upgrade iff layout version changes and current layout is // older than FEDERATION. if (this.layoutVersion > FSConstants.LAYOUT_VERSION && this.layoutVersion > FSConstants.FEDERATION_VERSION) { if (isNsLevelUpgraded(getNamespaceID(), sd)) { throw new IOException("Ns level directory already upgraded for : " + sd.getRoot() + " ignoring upgrade"); } dirsToUpgrade.add(sd); // upgrade dirsInfo.add(new StorageInfo(this)); continue; } if (this.cTime >= nsInfo.getCTime()) { // layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime // must shutdown sd.unlock(); throw new IOException("Datanode state: LV = " + this.getLayoutVersion() + " CTime = " + this.getCTime() + " is newer than the namespace state: LV = " + nsInfo.getLayoutVersion() + " CTime = " + nsInfo.getCTime()); } } // Now do upgrade if dirsToUpgrade is not empty if (!dirsToUpgrade.isEmpty()) { doUpgrade(dirsToUpgrade, dirsInfo, nsInfo); } }
java
{ "resource": "" }
q162243
DataStorage.addNameSpaceStorage
train
private void addNameSpaceStorage(int nsID, NameSpaceSliceStorage nsStorage) throws IOException { if (!this.nsStorageMap.containsKey(nsID)) { this.nsStorageMap.put(nsID, nsStorage); } }
java
{ "resource": "" }
q162244
Host2NodesMap.contains
train
boolean contains(DatanodeDescriptor node) { if (node==null) { return false; } String host = node.getHost(); hostmapLock.readLock().lock(); try { DatanodeDescriptor[] nodes = map.get(host); if (nodes != null) { for(DatanodeDescriptor containedNode:nodes) { if (node==containedNode) { return true; } } } } finally { hostmapLock.readLock().unlock(); } return false; }
java
{ "resource": "" }
q162245
Host2NodesMap.add
train
boolean add(DatanodeDescriptor node) { hostmapLock.writeLock().lock(); try { if (node==null || contains(node)) { return false; } String host = node.getHost(); DatanodeDescriptor[] nodes = map.get(host); DatanodeDescriptor[] newNodes; if (nodes==null) { newNodes = new DatanodeDescriptor[1]; newNodes[0]=node; } else { // rare case: more than one datanode on the host newNodes = new DatanodeDescriptor[nodes.length+1]; System.arraycopy(nodes, 0, newNodes, 0, nodes.length); newNodes[nodes.length] = node; } map.put(host, newNodes); return true; } finally { hostmapLock.writeLock().unlock(); } }
java
{ "resource": "" }
q162246
Host2NodesMap.remove
train
boolean remove(DatanodeDescriptor node) { if (node==null) { return false; } String host = node.getHost(); hostmapLock.writeLock().lock(); try { DatanodeDescriptor[] nodes = map.get(host); if (nodes==null) { return false; } if (nodes.length==1) { if (nodes[0]==node) { map.remove(host); return true; } else { return false; } } //rare case int i=0; for(; i<nodes.length; i++) { if (nodes[i]==node) { break; } } if (i==nodes.length) { return false; } else { DatanodeDescriptor[] newNodes; newNodes = new DatanodeDescriptor[nodes.length-1]; System.arraycopy(nodes, 0, newNodes, 0, i); System.arraycopy(nodes, i+1, newNodes, i, nodes.length-i-1); map.put(host, newNodes); return true; } } finally { hostmapLock.writeLock().unlock(); } }
java
{ "resource": "" }
q162247
Host2NodesMap.getDatanodeByName
train
@Deprecated public DatanodeDescriptor getDatanodeByName(String name) { if (name == null) { return null; } int colon = name.indexOf(":"); String host; if (colon < 0) { host = name; } else { host = name.substring(0, colon); } hostmapLock.readLock().lock(); try { DatanodeDescriptor[] nodes = map.get(host); // no entry if (nodes == null) { return null; } for (DatanodeDescriptor containedNode : nodes) { if (name.equals(containedNode.getName())) { return containedNode; } } return null; } finally { hostmapLock.readLock().unlock(); } }
java
{ "resource": "" }
q162248
ValueAggregatorMapper.reduce
train
public void reduce(Text arg0, Iterator<Text> arg1, OutputCollector<Text, Text> arg2, Reporter arg3) throws IOException { throw new IOException("should not be called\n"); }
java
{ "resource": "" }
q162249
TaskErrorCollector.getRecentErrorCounts
train
public synchronized Map<TaskError, Integer> getRecentErrorCounts(long timeWindow) { long start = System.currentTimeMillis() - timeWindow; Map<TaskError, Integer> errorCounts = createErrorCountsMap(); Iterator<Map<TaskError, Integer>> errorCountsIter = errorCountsQueue.iterator(); Iterator<Long> startTimeIter = startTimeQueue.iterator(); while (errorCountsIter.hasNext() && start < startTimeIter.next()) { Map<TaskError, Integer> windowErrorCounts = errorCountsIter.next(); for (Map.Entry<TaskError, Integer> entry : windowErrorCounts.entrySet()) { errorCounts.put(entry.getKey(), errorCounts.get(entry.getKey()) + entry.getValue()); } } return errorCounts; }
java
{ "resource": "" }
q162250
TaskErrorCollector.parseConfigFile
train
private Map<String, TaskError> parseConfigFile(URL configURL) { Map<String, TaskError> knownErrors = new LinkedHashMap<String, TaskError>(); try { Element root = getRootElement(configURL); NodeList elements = root.getChildNodes(); for (int i = 0; i < elements.getLength(); ++i) { Node node = elements.item(i); if (!(node instanceof Element)) { continue; } Element element = (Element)node; if (matched(element, "error")) { String name = element.getAttribute("name"); String pattern = ""; String description = ""; NodeList fields = element.getChildNodes(); for (int j = 0; j < fields.getLength(); ++j) { Node fieldNode = fields.item(j); if (!(fieldNode instanceof Element)) { continue; } Element field = (Element)fieldNode; if (matched(field, "pattern")) { pattern = getText(field); } else if (matched(field, "description")) { description = getText(field); } } TaskError taskError = new TaskError(name, pattern, description); LOG.info("Adding TaskError " + taskError); knownErrors.put(name, taskError); } } } catch (IOException ie) { LOG.error("Error parsing config file " + configURL, ie); } return knownErrors; }
java
{ "resource": "" }
q162251
CoronaJobTracker.processBadResource
train
public void processBadResource(int grant, boolean abandonHost) { synchronized (lockObject) { Set<String> excludedHosts = null; TaskInProgress tip = requestToTipMap.get(grant); if (!job.canLaunchJobCleanupTask() && (!tip.isRunnable() || (tip.isRunning() && !(speculatedMaps.contains(tip) || speculatedReduces.contains(tip))))) { // The task is not runnable anymore. Job is done/killed/failed or the // task has finished and this is a speculative resource // Or the task is running and this is a speculative resource // but the speculation is no longer needed resourceTracker.releaseResource(grant); return; } if (abandonHost) { ResourceGrant resource = resourceTracker.getGrant(grant); String hostToExlcude = resource.getAddress().getHost(); taskToContextMap.get(tip).excludedHosts.add(hostToExlcude); excludedHosts = taskToContextMap.get(tip).excludedHosts; } ResourceRequest newReq = resourceTracker.releaseAndRequestResource(grant, excludedHosts); requestToTipMap.put(newReq.getId(), tip); TaskContext context = taskToContextMap.get(tip); if (context == null) { context = new TaskContext(newReq); } else { context.resourceRequests.add(newReq); } taskToContextMap.put(tip, context); } }
java
{ "resource": "" }
q162252
CoronaJobTracker.updateTaskStatuses
train
private void updateTaskStatuses(TaskTrackerStatus status) { TaskTrackerInfo trackerInfo = TaskTrackerInfo.fromStatus(status); String trackerName = status.getTrackerName(); for (TaskStatus report : status.getTaskReports()) { // Ensure that every report has information about task tracker report.setTaskTracker(trackerName); LOG.debug("Task status report: " + report); updateTaskStatus(trackerInfo, report); // Take any actions outside updateTaskStatus() setupReduceRequests(job); processFetchFailures(report); } }
java
{ "resource": "" }
q162253
CoronaJobTracker.updateTaskStatus
train
private void updateTaskStatus(TaskTrackerInfo info, TaskStatus report) { TaskAttemptID taskId = report.getTaskID(); // Here we want strict job id comparison. if (!this.jobId.equals(taskId.getJobID())) { LOG.warn("Task " + taskId + " belongs to unknown job " + taskId.getJobID()); return; } TaskInProgress tip = taskLookupTable.getTIP(taskId); if (tip == null) { return; } TaskStatus status = tip.getTaskStatus(taskId); TaskStatus.State knownState = (status == null) ? null : status .getRunState(); // Remove it from the expired task list if (report.getRunState() != TaskStatus.State.UNASSIGNED) { expireTasks.removeTask(taskId); } // Fallback heartbeats may claim that task is RUNNING, while it was killed if (report.getRunState() == TaskStatus.State.RUNNING && !TaskStatus.TERMINATING_STATES.contains(knownState)) { expireTasks.updateTask(taskId); } // Clone TaskStatus object here, because CoronaJobInProgress // or TaskInProgress can modify this object and // the changes should not get reflected in TaskTrackerStatus. // An old TaskTrackerStatus is used later in countMapTasks, etc. job.updateTaskStatus(tip, (TaskStatus) report.clone(), info); }
java
{ "resource": "" }
q162254
CoronaJobTracker.saveNewRequestForTip
train
private void saveNewRequestForTip(TaskInProgress tip, ResourceRequest req) { requestToTipMap.put(req.getId(), tip); TaskContext context = taskToContextMap.get(tip); if (context == null) { context = new TaskContext(req); } else { context.resourceRequests.add(req); } taskToContextMap.put(tip, context); }
java
{ "resource": "" }
q162255
CoronaJobTracker.getNewJobId
train
@Override public JobID getNewJobId() throws IOException { int value = jobCounter.incrementAndGet(); if (value > 1) { throw new RuntimeException( "CoronaJobTracker can only run one job! (value=" + value + ")"); } createSession(); // the jobtracker can run only a single job. it's jobid is fixed based // on the sessionId. jobId = jobIdFromSessionId(sessionId); return jobId; }
java
{ "resource": "" }
q162256
CoronaJobTracker.dispatchCommitActions
train
private void dispatchCommitActions(List<CommitTaskAction> commitActions) throws IOException { if (!commitActions.isEmpty()) { TaskAttemptID[] wasCommitting; try { wasCommitting = commitPermissionClient .getAndSetCommitting(commitActions); } catch (IOException e) { LOG.error("Commit permission client is faulty - killing this JT"); try { close(false); } catch (InterruptedException e1) { throw new IOException(e1); } throw e; } int i = 0; for (CommitTaskAction action : commitActions) { TaskAttemptID oldCommitting = wasCommitting[i]; if (oldCommitting != null) { // Fail old committing task attempt failTask(oldCommitting, "Unknown committing attempt", false); } // Commit new task TaskAttemptID newToCommit = action.getTaskID(); if (!newToCommit.equals(oldCommitting)) { String trackerName = taskLookupTable.getAssignedTracker(newToCommit); taskLauncher.commitTask(trackerName, resourceTracker.getTrackerAddr(trackerName), action); } else { LOG.warn("Repeated try to commit same attempt id. Ignoring"); } // iterator next ++i; } } }
java
{ "resource": "" }
q162257
CoronaJobTracker.isMatchingJobId
train
private boolean isMatchingJobId(JobID jobId) { if (isStandalone) { // Requests to remote JT must hold exact attempt id. return this.jobId.equals(jobId); } else { // Local JT serves as translator between job id and job attempt id. return this.jobId.equals(getMainJobID(jobId)); } }
java
{ "resource": "" }
q162258
CoronaJobTracker.expiredLaunchingTask
train
public void expiredLaunchingTask(TaskAttemptID taskId) { synchronized (lockObject) { String trackerName = taskLookupTable.getAssignedTracker(taskId); trackerStats.recordTimeout(trackerName); localJTSubmitter.submit(new TaskTimeout(trackerName)); failTask(taskId, "Error launching task", false); } }
java
{ "resource": "" }
q162259
CoronaJobTracker.prepareFailover
train
public void prepareFailover() { if (!RemoteJTProxy.isJTRestartingEnabled(conf)) { return; } LOG.info("prepareFailover done"); this.isPurgingJob = false; if (this.parentHeartbeat != null) { // Because our failover mechanism based on remotJTProxy can't // reach remote job tracker, we stop the interTrackerServer to // trigger the failover this.interTrackerServer.stop(); } }
java
{ "resource": "" }
q162260
StreamJob.go
train
@Deprecated public int go() throws IOException { try { return run(argv_); } catch (Exception ex) { throw new IOException(ex.getMessage()); } }
java
{ "resource": "" }
q162261
StreamJob.listJobConfProperties
train
protected void listJobConfProperties() { msg("==== JobConf properties:"); Iterator it = jobConf_.iterator(); TreeMap sorted = new TreeMap(); while(it.hasNext()) { Map.Entry en = (Map.Entry)it.next(); sorted.put(en.getKey(), en.getValue()); } it = sorted.entrySet().iterator(); while(it.hasNext()) { Map.Entry en = (Map.Entry)it.next(); msg(en.getKey() + "=" + en.getValue()); } msg("===="); }
java
{ "resource": "" }
q162262
StreamJob.submitAndMonitorJob
train
public int submitAndMonitorJob() throws IOException { if (jar_ != null && isLocalHadoop()) { // getAbs became required when shell and subvm have different working dirs... File wd = new File(".").getAbsoluteFile(); StreamUtil.unJar(new File(jar_), wd); } // if jobConf_ changes must recreate a JobClient jc_ = new JobClient(jobConf_); boolean error = true; running_ = null; String lastReport = null; try { running_ = jc_.submitJob(jobConf_); jobId_ = running_.getID(); LOG.info("getLocalDirs(): " + Arrays.asList(jobConf_.getLocalDirs())); LOG.info("Running job: " + jobId_); jobInfo(); while (!running_.isComplete()) { try { Thread.sleep(1000); } catch (InterruptedException e) { } running_ = jc_.getJob(jobId_); String report = null; report = " map " + Math.round(running_.mapProgress() * 100) + "% reduce " + Math.round(running_.reduceProgress() * 100) + "%"; if (!report.equals(lastReport)) { LOG.info(report); lastReport = report; } } if (!running_.isSuccessful()) { jobInfo(); LOG.error("Job not Successful!"); return 1; } LOG.info("Job complete: " + jobId_); LOG.info("Output: " + output_); error = false; } catch(FileNotFoundException fe) { LOG.error("Error launching job , bad input path : " + fe.getMessage()); return 2; } catch(InvalidJobConfException je) { LOG.error("Error launching job , Invalid job conf : " + je.getMessage()); return 3; } catch(FileAlreadyExistsException fae) { LOG.error("Error launching job , Output path already exists : " + fae.getMessage()); return 4; } catch(IOException ioe) { LOG.error("Error Launching job : " + ioe.getMessage()); return 5; } finally { if (error && (running_ != null)) { LOG.info("killJob..."); running_.killJob(); } jc_.close(); } return 0; }
java
{ "resource": "" }
q162263
DFSOutputStream.checkIfLastPacketTimeout
train
private void checkIfLastPacketTimeout() { synchronized (ackQueue) { if( !ackQueue.isEmpty() && ( System.currentTimeMillis() - lastPacketSentTime > packetTimeout) ) { DFSClient.LOG.warn("Packet " + ackQueue.getLast().seqno + " of " + block + " is timed out"); } } }
java
{ "resource": "" }
q162264
DFSOutputStream.setupPipelineForAppend
train
private boolean setupPipelineForAppend(LocatedBlock lastBlock) throws IOException { if (nodes == null || nodes.length == 0) { String msg = "Could not get block locations. " + "Source file \"" + src + "\" - Aborting..."; DFSClient.LOG.warn(msg); setLastException(new IOException(msg)); closed = true; if (streamer != null) streamer.close(); return false; } boolean success = createBlockOutputStream(nodes, dfsClient.clientName, false, true); long oldGenerationStamp = ((LocatedBlockWithOldGS)lastBlock).getOldGenerationStamp(); if (success) { // bump up the generation stamp in NN. Block newBlock = lastBlock.getBlock(); Block oldBlock = new Block(newBlock.getBlockId(), newBlock.getNumBytes(), oldGenerationStamp); dfsClient.namenode.updatePipeline(dfsClient.clientName, oldBlock, newBlock, nodes); } else { DFSClient.LOG.warn("Fall back to block recovery process when trying" + " to setup the append pipeline for file " + src); // set the old generation stamp block.setGenerationStamp(oldGenerationStamp); // fall back the block recovery while(processDatanodeError(true, true)) { try { Thread.sleep(1000); } catch (InterruptedException e) { lastException = new IOException(e); break; } } } return success; }
java
{ "resource": "" }
q162265
DFSOutputStream.nextBlockOutputStream
train
private DatanodeInfo[] nextBlockOutputStream(String client) throws IOException { LocatedBlock lb = null; boolean retry = false; DatanodeInfo[] nodes; ArrayList<DatanodeInfo> excludedNodes = new ArrayList<DatanodeInfo>(); int count = dfsClient.conf.getInt("dfs.client.block.write.retries", 3); boolean success; do { hasError = false; lastException = null; errorIndex = 0; retry = false; nodes = null; success = false; long startTime = System.currentTimeMillis(); DatanodeInfo[] excluded = excludedNodes.toArray(new DatanodeInfo[0]); lb = locateFollowingBlock(startTime, excluded.length > 0 ? excluded : null); block = lb.getBlock(); nodes = lb.getLocations(); // // Connect to first DataNode in the list. // success = createBlockOutputStream(nodes, dfsClient.clientName, false, false); if (!success) { DFSClient.LOG.info("Abandoning block " + block + " for file " + src); dfsClient.namenode.abandonBlock(block, src, dfsClient.clientName); if (errorIndex < nodes.length) { DFSClient.LOG.debug("Excluding datanode " + nodes[errorIndex]); excludedNodes.add(nodes[errorIndex]); } // Connection failed. Let's wait a little bit and retry retry = true; } } while (retry && --count >= 0); if (!success && nodes != null) { // in the last fail time, we will retry with the remaining nodes. while (nodes.length > 1 && !success) { if (errorIndex >= nodes.length) { break; } DatanodeInfo[] remainingNodes = new DatanodeInfo[nodes.length - 1]; for (int i = 0; i < errorIndex; i++) { remainingNodes[i] = nodes[i]; } for (int i = errorIndex + 1; i < nodes.length; i++) { remainingNodes[i - 1] = nodes[i]; } nodes = remainingNodes; success = createBlockOutputStream(nodes, dfsClient.clientName, false, false); } } if (!success) { throw new IOException("Unable to create new block."); } return nodes; }
java
{ "resource": "" }
q162266
DFSOutputStream.sync
train
public void sync() throws IOException { long start = System.currentTimeMillis(); try { long toWaitFor; synchronized (this) { eventStartSync(); /* Record current blockOffset. This might be changed inside * flushBuffer() where a partial checksum chunk might be flushed. * After the flush, reset the bytesCurBlock back to its previous value, * any partial checksum chunk will be sent now and in next packet. */ long saveOffset = bytesCurBlock; DFSOutputStreamPacket oldCurrentPacket = currentPacket; // flush checksum buffer as an incomplete chunk flushBuffer(false, shouldKeepPartialChunkData()); // bytesCurBlock potentially incremented if there was buffered data eventSyncStartWaitAck(); if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("DFSClient flush() : bytesCurBlock " + bytesCurBlock + " lastFlushOffset " + lastFlushOffset); } // Flush only if we haven't already flushed till this offset. if (lastFlushOffset != bytesCurBlock) { assert bytesCurBlock > lastFlushOffset; // record the valid offset of this flush lastFlushOffset = bytesCurBlock; enqueueCurrentPacket(); } else { // just discard the current packet since it is already been sent. if (oldCurrentPacket == null && currentPacket != null) { // If we didn't previously have a packet queued, and now we do, // but we don't plan on sending it, then we should not // skip a sequence number for it! currentSeqno--; } currentPacket = null; } if (shouldKeepPartialChunkData()) { // Restore state of stream. Record the last flush offset // of the last full chunk that was flushed. // bytesCurBlock = saveOffset; } toWaitFor = lastQueuedSeqno; } waitForAckedSeqno(toWaitFor); eventSyncPktAcked(); // If any new blocks were allocated since the last flush, // then persist block locations on namenode. // boolean willPersist; synchronized (this) { willPersist = persistBlocks; persistBlocks = false; } if (willPersist) { dfsClient.namenode.fsync(src, dfsClient.clientName); } long timeval = System.currentTimeMillis() - start; dfsClient.metrics.incSyncTime(timeval); eventEndSync(); } catch (IOException e) { lastException = new IOException("IOException flush:", e); closed = true; closeThreads(); throw e; } }
java
{ "resource": "" }
q162267
DFSOutputStream.flushInternal
train
private void flushInternal() throws IOException { isClosed(); dfsClient.checkOpen(); long toWaitFor; synchronized (this) { enqueueCurrentPacket(); toWaitFor = lastQueuedSeqno; } waitForAckedSeqno(toWaitFor); }
java
{ "resource": "" }
q162268
DFSOutputStream.closeThreads
train
private void closeThreads() throws IOException { try { if (streamer != null) { streamer.close(); streamer.join(); } // shutdown response after streamer has exited. if (response != null) { response.close(); response.join(); response = null; } } catch (InterruptedException e) { throw new InterruptedIOException("Failed to shutdown response thread"); } }
java
{ "resource": "" }
q162269
OutputHandler.output
train
public void output(K key, V value) throws IOException { collector.collect(key, value); }
java
{ "resource": "" }
q162270
OutputHandler.partitionedOutput
train
public void partitionedOutput(int reduce, K key, V value) throws IOException { PipesPartitioner.setNextPartition(reduce); collector.collect(key, value); }
java
{ "resource": "" }
q162271
OutputHandler.progress
train
public void progress(float progress) throws IOException { progressValue = progress; reporter.progress(); if (recordReader != null) { progressKey.set(progress); recordReader.next(progressKey, nullValue); } }
java
{ "resource": "" }
q162272
OutputHandler.waitForFinish
train
public synchronized boolean waitForFinish() throws Throwable { while (!done && exception == null) { wait(); } if (exception != null) { throw exception; } return done; }
java
{ "resource": "" }
q162273
DirectoryScanner.getDiskReportPerNamespace
train
private Map<Integer, DiskScanInfo[]> getDiskReportPerNamespace() { if (dataset.volumes == null) { LOG.warn("Dataset volumes are not initialized yet"); return new HashMap<Integer, DiskScanInfo[]>(); } // First get list of data directories FSVolume[] volumes = dataset.volumes.getVolumes(); ScanInfoListPerNamespace[] volumeReports = new ScanInfoListPerNamespace[volumes.length]; Map<Integer, Future<ScanInfoListPerNamespace>> volumeCompilers = new HashMap<Integer, Future<ScanInfoListPerNamespace>>(); for (int i = 0; i < volumes.length; i++) { // check volume is valid if (dataset.volumes.isValid(volumes[i])) { // and run compiler for it ReportCompiler reportCompiler = new ReportCompiler(volumes[i], datanode); Future<ScanInfoListPerNamespace> result = reportCompileThreadPool.submit(reportCompiler); volumeCompilers.put(i, result); } } for(Entry<Integer, Future<ScanInfoListPerNamespace>> e : volumeCompilers.entrySet()) { try { int volume = e.getKey(); volumeReports[volume] = e.getValue().get(); } catch (Exception ex) { LOG.error("Error compiling report", ex); // Propagate ex to DataBlockScanner to deal with throw new RuntimeException(ex); } } // Compile consolidated report for all the volumes ScanInfoListPerNamespace list = new ScanInfoListPerNamespace(); for (int i = 0; i < volumes.length; i++) { if (dataset.volumes.isValid(volumes[i])) { // volume is still valid list.addAll(volumeReports[i]); } } return list.toSortedArrays(); }
java
{ "resource": "" }
q162274
DirectoryScanner.checkDifferenceAndReconcile
train
void checkDifferenceAndReconcile() { resetDiffsAndStats(); checkDifference(); // now reconcile the differences for (Entry<Integer, LinkedList<ScanDifference>> entry : diffsPerNamespace.entrySet()) { Integer namespaceId = entry.getKey(); LinkedList<ScanDifference> diff = entry.getValue(); for (ScanDifference info : diff) { try { dataset.checkAndUpdate(namespaceId, delta, info); } catch (IOException e) { LOG.warn("Cannot reconcile block " + info.toString(), e); } } } }
java
{ "resource": "" }
q162275
JarModule.createJarPackage
train
public static File createJarPackage(IResource resource) { JarModule jarModule = new JarModule(resource); try { PlatformUI.getWorkbench().getProgressService().run(false, true, jarModule); } catch (Exception e) { e.printStackTrace(); return null; } File jarFile = jarModule.getJarFile(); if (jarFile == null) { ErrorMessageDialog.display("Run on Hadoop", "Unable to create or locate the JAR file for the Job"); return null; } return jarFile; }
java
{ "resource": "" }
q162276
INodeFileUnderConstruction.getValidTargets
train
DatanodeDescriptor[] getValidTargets() { if (targetGSs == null) { return null; } int count = 0; long lastBlockGS = this.getLastBlock().getGenerationStamp(); for (long targetGS : targetGSs) { if (lastBlockGS == targetGS) { count++; } } if (count == 0) { return null; } if (count == targets.length) { return targets; } else { DatanodeDescriptor[] validTargets = new DatanodeDescriptor[count]; for (int i=0, numOfValidTargets=0; i<targets.length; i++) { if (lastBlockGS == targetGSs[i]) { validTargets[numOfValidTargets++] = targets[i]; if (numOfValidTargets == count) { return validTargets; } } } return validTargets; } }
java
{ "resource": "" }
q162277
INodeFileUnderConstruction.setTargets
train
void setTargets(DatanodeDescriptor[] locs, long generationStamp) { setTargets(locs); if (locs == null) { targetGSs = null; return; } long[] targetGSs = new long[locs.length]; for (int i=0; i<targetGSs.length; i++) { targetGSs[i] = generationStamp; } this.targetGSs = targetGSs; }
java
{ "resource": "" }
q162278
INodeFileUnderConstruction.addTarget
train
boolean addTarget(DatanodeDescriptor node, long generationStamp) { if (this.targets == null) { this.targets = new DatanodeDescriptor[0]; } for (int i=0; i<targets.length; i++) { if (targets[i].equals(node)) { if (generationStamp != targetGSs[i]) { targetGSs[i] = generationStamp; return true; } return false; } } if (node != null) { node.addINode(this); } // allocate new data structure to store additional target DatanodeDescriptor[] newt = new DatanodeDescriptor[targets.length + 1]; long[] newgs = new long[targets.length + 1]; for (int i = 0; i < targets.length; i++) { newt[i] = this.targets[i]; newgs[i] = this.targetGSs[i]; } newt[targets.length] = node; newgs[targets.length] = generationStamp; this.targets = newt; this.targetGSs = newgs; this.primaryNodeIndex = -1; return true; }
java
{ "resource": "" }
q162279
INodeFileUnderConstruction.assignPrimaryDatanode
train
void assignPrimaryDatanode() { //assign the first alive datanode as the primary datanode if (targets.length == 0) { NameNode.stateChangeLog.warn("BLOCK*" + " INodeFileUnderConstruction.initLeaseRecovery:" + " No blocks found, lease removed."); } int previous = primaryNodeIndex; Block lastBlock = this.getLastBlock(); // find an alive datanode beginning from previous. // This causes us to cycle through the targets on successive retries. for(int i = 1; i <= targets.length; i++) { int j = (previous + i)%targets.length; if (targets[j].isAlive) { DatanodeDescriptor primary = targets[primaryNodeIndex = j]; primary.addBlockToBeRecovered(lastBlock, targets); NameNode.stateChangeLog.info("BLOCK* " + lastBlock + " recovery started, primary=" + primary); return; } } }
java
{ "resource": "" }
q162280
INodeFileUnderConstruction.setLastRecoveryTime
train
synchronized boolean setLastRecoveryTime(long now) { boolean expired = now - lastRecoveryTime > NameNode.LEASE_RECOVER_PERIOD; if (expired) { lastRecoveryTime = now; } return expired; }
java
{ "resource": "" }
q162281
INodeFileUnderConstruction.collectSubtreeBlocksAndClear
train
int collectSubtreeBlocksAndClear(List<BlockInfo> v, int blocksLimit, List<INode> removedINodes) { clearTargets(); return super.collectSubtreeBlocksAndClear(v, blocksLimit, removedINodes); }
java
{ "resource": "" }
q162282
INodeFileUnderConstruction.removeINodeFromDatanodeDescriptors
train
private void removeINodeFromDatanodeDescriptors(DatanodeDescriptor[] targets) { if (targets != null) { for (DatanodeDescriptor node : targets) { node.removeINode(this); } } }
java
{ "resource": "" }
q162283
INodeFileUnderConstruction.addINodeToDatanodeDescriptors
train
private void addINodeToDatanodeDescriptors(DatanodeDescriptor[] targets) { if (targets != null) { for (DatanodeDescriptor node : targets) { node.addINode(this); } } }
java
{ "resource": "" }
q162284
OuterJoinRecordReader.combine
train
protected boolean combine(Object[] srcs, TupleWritable dst) { assert srcs.length == dst.size(); return true; }
java
{ "resource": "" }
q162285
TrackerClientCache.getClient
train
public CoronaTaskTrackerProtocol getClient( String host, int port) throws IOException { String key = makeKey(host, port); Node ttNode = topologyCache.getNode(host); CoronaTaskTrackerProtocol client = null; synchronized (ttNode) { client = trackerClients.get(key); if (client == null) { client = createClient(host, port); trackerClients.put(key, client); } } return client; }
java
{ "resource": "" }
q162286
TrackerClientCache.createClient
train
private CoronaTaskTrackerProtocol createClient(String host, int port) throws IOException { String staticHost = NetUtils.getStaticResolution(host); InetSocketAddress s = null; InetAddress inetAddress = null; byte[] byteArr = null; if (staticHost != null) { inetAddress = InetAddress.getByName(staticHost); } else { byteArr = Utilities.asBytes(host); if ( byteArr == null) { inetAddress = InetAddress.getByName(host); } else { inetAddress = InetAddress.getByAddress(byteArr); } } s = new InetSocketAddress(inetAddress, port); LOG.info("Creating client to " + (staticHost != null ? staticHost : host) + ":" + s.getPort()); long connectTimeout = conf.getLong(CoronaJobTracker.TT_CONNECT_TIMEOUT_MSEC_KEY, 10000L); int rpcTimeout = conf.getInt(CoronaJobTracker.TT_RPC_TIMEOUT_MSEC_KEY, 60000); return RPC.waitForProxy( CoronaTaskTrackerProtocol.class, CoronaTaskTrackerProtocol.versionID, s, conf, connectTimeout, rpcTimeout); }
java
{ "resource": "" }
q162287
ShardWriter.close
train
public void close() throws IOException { LOG.info("Closing the shard writer, processed " + numForms + " forms"); try { try { if (maxNumSegments > 0) { writer.optimize(maxNumSegments); LOG.info("Optimized the shard into at most " + maxNumSegments + " segments"); } } finally { writer.close(); LOG.info("Closed Lucene index writer"); } moveFromTempToPerm(); LOG.info("Moved new index files to " + perm); } finally { dir.close(); LOG.info("Closed the shard writer"); } }
java
{ "resource": "" }
q162288
ShardWriter.restoreGeneration
train
private void restoreGeneration(FileSystem fs, Path perm, long startGen) throws IOException { FileStatus[] fileStatus = fs.listStatus(perm, new PathFilter() { public boolean accept(Path path) { return LuceneUtil.isSegmentsFile(path.getName()); } }); // remove the segments_N files whose generation are greater than // the starting generation for (int i = 0; i < fileStatus.length; i++) { Path path = fileStatus[i].getPath(); if (startGen < LuceneUtil.generationFromSegmentsFileName(path.getName())) { fs.delete(path); } } // always remove segments.gen in case last failed try removed segments_N // but not segments.gen, and segments.gen will be overwritten anyway. Path segmentsGenFile = new Path(LuceneUtil.IndexFileNames.SEGMENTS_GEN); if (fs.exists(segmentsGenFile)) { fs.delete(segmentsGenFile); } }
java
{ "resource": "" }
q162289
ShardWriter.moveFromTempToPerm
train
private void moveFromTempToPerm() throws IOException { try { FileStatus[] fileStatus = localFs.listStatus(temp, LuceneIndexFileNameFilter.getFilter()); Path segmentsPath = null; Path segmentsGenPath = null; // move the files created in temp dir except segments_N and segments.gen for (int i = 0; i < fileStatus.length; i++) { Path path = fileStatus[i].getPath(); String name = path.getName(); if (LuceneUtil.isSegmentsGenFile(name)) { assert (segmentsGenPath == null); segmentsGenPath = path; } else if (LuceneUtil.isSegmentsFile(name)) { assert (segmentsPath == null); segmentsPath = path; } else { fs.completeLocalOutput(new Path(perm, name), path); } } // move the segments_N file if (segmentsPath != null) { fs.completeLocalOutput(new Path(perm, segmentsPath.getName()), segmentsPath); } // move the segments.gen file if (segmentsGenPath != null) { fs.completeLocalOutput(new Path(perm, segmentsGenPath.getName()), segmentsGenPath); } } finally { // finally delete the temp dir (files should have been deleted) localFs.delete(temp); } }
java
{ "resource": "" }
q162290
ChmodParser.applyNewPermission
train
public short applyNewPermission(FileStatus file) { FsPermission perms = file.getPermission(); int existing = perms.toShort(); boolean exeOk = file.isDir() || (existing & 0111) != 0; return (short)combineModes(existing, exeOk); }
java
{ "resource": "" }
q162291
MaxTxId.store
train
public synchronized void store(long maxTxId) throws IOException { long currentMaxTxId = get(); if (currentMaxTxId < maxTxId) { if (LOG.isDebugEnabled()) { LOG.debug("Resetting maxTxId to " + maxTxId); } set(maxTxId); } }
java
{ "resource": "" }
q162292
MaxTxId.get
train
public synchronized long get() throws IOException { try { lastZNodeStat = zooKeeper.exists(fullyQualifiedZNode, false); if (lastZNodeStat == null) { return -1; } byte[] data = zooKeeper.getData(fullyQualifiedZNode, false, lastZNodeStat); WritableUtil.readWritableFromByteArray(data, maxTxIdWritable); return maxTxIdWritable.get(); } catch (KeeperException e) { keeperException("Unrecoverable ZooKeeper error reading " + fullyQualifiedZNode, e); return -1; // This is never reached } catch (InterruptedException e) { interruptedException("Interrupted reading " + fullyQualifiedZNode, e); return -1; // This is never reached } }
java
{ "resource": "" }
q162293
ServerCore.checkAndSetServiceName
train
private void checkAndSetServiceName(Configuration conf, StartupInfo info) throws ConfigurationException { String fedrationMode = conf.get(FSConstants.DFS_FEDERATION_NAMESERVICES); String serviceName = info.serviceName; if (fedrationMode != null && !fedrationMode.trim().isEmpty()) { if (serviceName == null || serviceName.trim().isEmpty()) { throw new ConfigurationException("This is a fedrated DFS cluster, nameservice id is required."); } this.serviceName = serviceName; } }
java
{ "resource": "" }
q162294
ServerCore.shutdown
train
@Override public void shutdown() { LOG.info("Shutting down ..."); shouldShutdown = true; if (tserver != null) { tserver.stop(); } started = false; }
java
{ "resource": "" }
q162295
ServerCore.addClientAndConnect
train
@Override public long addClientAndConnect(String host, int port) throws TTransportException, IOException { long clientId = getNewClientId(); LOG.info("Adding client with id=" + clientId + " host=" + host + " port=" + port + " and connecting ..."); ClientHandler.Client clientHandler; try { clientHandler = getClientConnection(host, port); LOG.info("Succesfully connected to client " + clientId); } catch (IOException e1) { LOG.error("Failed to connect to client " + clientId, e1); throw e1; } catch (TTransportException e2) { LOG.error("Failed to connect to client " + clientId, e2); throw e2; } // Save the client to the internal structures ClientData clientData = new ClientData(clientId, clientHandler, host, port); addClient(clientData); LOG.info("Successfully added client " + clientId + " and connected."); return clientId; }
java
{ "resource": "" }
q162296
ServerCore.addClient
train
@Override public void addClient(ClientData clientData) { clientsData.put(clientData.id, clientData); dispatcher.assignClient(clientData.id); LOG.info("Succesfully added client " + clientData); metrics.numRegisteredClients.set(clientsData.size()); }
java
{ "resource": "" }
q162297
ServerCore.removeClient
train
@Override public boolean removeClient(long clientId) { ClientData clientData = clientsData.get(clientId); if (clientData == null) { return false; } dispatcher.removeClient(clientId); // Iterate over all the sets in which this client figures as subscribed // and remove it synchronized (subscriptions) { for (Set<Long> subscribedSet : clientData.subscriptions) { synchronized (subscribedSet) { subscribedSet.remove(clientId); } } } metrics.numTotalSubscriptions.set(numTotalSubscriptions. getAndAdd(-clientData.subscriptions.size())); clientsData.remove(clientId); LOG.info("Removed client " + clientData); metrics.numRegisteredClients.set(clientsData.size()); return true; }
java
{ "resource": "" }
q162298
ServerCore.getClientNotificationQueue
train
@Override public Queue<NamespaceNotification> getClientNotificationQueue(long clientId) { ClientData clientData = clientsData.get(clientId); return (clientData == null) ? null : clientData.queue; }
java
{ "resource": "" }
q162299
ServerCore.queueNotifications
train
private void queueNotifications(long clientId, NamespaceEvent event, long txId) throws TransactionIdTooOldException, InvalidClientIdException { if (txId == -1) { return; } if (LOG.isDebugEnabled()) { LOG.debug("Queueing notifications for client " + clientId + " from txId " + txId + " at [" + event.path + ", " + EventType.fromByteValue(event.type) + "] ..."); } ClientData clientData = clientsData.get(clientId); if (clientData == null) { LOG.error("Missing the client data for client id: " + clientId); throw new InvalidClientIdException("Missing the client data"); } // Store the notifications in the queue for this client serverHistory.addNotificationsToQueue(event, txId, clientData.queue); }
java
{ "resource": "" }