_id
stringlengths
2
7
title
stringlengths
3
140
partition
stringclasses
3 values
text
stringlengths
73
34.1k
language
stringclasses
1 value
meta_information
dict
q161900
NNStorage.inspectStorageDirs
train
void inspectStorageDirs(FSImageStorageInspector inspector) throws IOException { // Process each of the storage directories to find the pair of // newest image file and edit file for (Iterator<StorageDirectory> it = dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); inspector.inspectDirectory(sd); } }
java
{ "resource": "" }
q161901
NNStorage.readAndInspectDirs
train
FSImageStorageInspector readAndInspectDirs() throws IOException { int minLayoutVersion = Integer.MAX_VALUE; // the newest int maxLayoutVersion = Integer.MIN_VALUE; // the oldest // First determine what range of layout versions we're going to inspect for (Iterator<StorageDirectory> it = dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); if (!sd.getVersionFile().exists()) { FSImage.LOG.warn("Storage directory " + sd + " contains no VERSION file. Skipping..."); continue; } sd.read(); // sets layoutVersion minLayoutVersion = Math.min(minLayoutVersion, getLayoutVersion()); maxLayoutVersion = Math.max(maxLayoutVersion, getLayoutVersion()); } if (minLayoutVersion > maxLayoutVersion) { throw new IOException("No storage directories contained VERSION information"); } assert minLayoutVersion <= maxLayoutVersion; // If we have any storage directories with the new layout version // (ie edits_<txnid>) then use the new inspector, which will ignore // the old format dirs. FSImageStorageInspector inspector; if (LayoutVersion.supports(Feature.TXID_BASED_LAYOUT, minLayoutVersion)) { inspector = new FSImageTransactionalStorageInspector(); if (!LayoutVersion.supports(Feature.TXID_BASED_LAYOUT, maxLayoutVersion)) { FSImage.LOG.warn("Ignoring one or more storage directories with old layouts"); } } else { inspector = new FSImagePreTransactionalStorageInspector(conf); } inspectStorageDirs(inspector); return inspector; }
java
{ "resource": "" }
q161902
ProcfsBasedProcessTree.isAvailable
train
public static boolean isAvailable() { try { String osName = System.getProperty("os.name"); if (!osName.startsWith("Linux")) { LOG.info("ProcfsBasedProcessTree currently is supported only on " + "Linux."); return false; } } catch (SecurityException se) { LOG.warn("Failed to get Operating System name. " + se); return false; } return true; }
java
{ "resource": "" }
q161903
ProcfsBasedProcessTree.getProcessTree
train
public ProcfsBasedProcessTree getProcessTree() { if (pid != -1) { // Get the list of processes List<Integer> processList = getProcessList(); Map<Integer, ProcessInfo> allProcessInfo = new HashMap<Integer, ProcessInfo>(); // cache the processTree to get the age for processes Map<Integer, ProcessInfo> oldProcs = new HashMap<Integer, ProcessInfo>(processTree); processTree.clear(); ProcessInfo me = null; for (Integer proc : processList) { // Get information for each process ProcessInfo pInfo = new ProcessInfo(proc); if (constructProcessInfo(pInfo, procfsDir) != null) { allProcessInfo.put(proc, pInfo); if (proc.equals(this.pid)) { me = pInfo; // cache 'me' processTree.put(proc, pInfo); } } } if (me == null) { return this; } // Add each process to its parent. for (Map.Entry<Integer, ProcessInfo> entry : allProcessInfo.entrySet()) { Integer pID = entry.getKey(); if (pID != 1) { ProcessInfo pInfo = entry.getValue(); ProcessInfo parentPInfo = allProcessInfo.get(pInfo.getPpid()); if (parentPInfo != null) { parentPInfo.addChild(pInfo); } } } // now start constructing the process-tree LinkedList<ProcessInfo> pInfoQueue = new LinkedList<ProcessInfo>(); pInfoQueue.addAll(me.getChildren()); while (!pInfoQueue.isEmpty()) { ProcessInfo pInfo = pInfoQueue.remove(); if (!processTree.containsKey(pInfo.getPid())) { processTree.put(pInfo.getPid(), pInfo); } pInfoQueue.addAll(pInfo.getChildren()); } // update age values and compute the number of jiffies since last update for (Map.Entry<Integer, ProcessInfo> procs : processTree.entrySet()) { ProcessInfo oldInfo = oldProcs.get(procs.getKey()); if (procs.getValue() != null) { procs.getValue().updateJiffy(oldInfo); if (oldInfo != null) { procs.getValue().updateAge(oldInfo); } } } if (LOG.isDebugEnabled()) { // Log.debug the ProcfsBasedProcessTree LOG.debug(this.toString()); } } return this; }
java
{ "resource": "" }
q161904
ProcfsBasedProcessTree.isAnyProcessInTreeAlive
train
public boolean isAnyProcessInTreeAlive() { for (Integer pId : processTree.keySet()) { if (isAlive(pId.toString())) { return true; } } return false; }
java
{ "resource": "" }
q161905
ProcfsBasedProcessTree.checkPidPgrpidForMatch
train
static boolean checkPidPgrpidForMatch(String pidStr, String procfsDir) { Integer pId = Integer.parseInt(pidStr); // Get information for this process ProcessInfo pInfo = new ProcessInfo(pId); pInfo = constructProcessInfo(pInfo, procfsDir); if (pInfo == null) { // process group leader may have finished execution, but we still need to // kill the subProcesses in the process group. return true; } //make sure that pId and its pgrpId match if (!pInfo.getPgrpId().equals(pId)) { LOG.warn("Unexpected: Process with PID " + pId + " is not a process group leader."); return false; } if (LOG.isDebugEnabled()) { LOG.debug(pId + " is a process group leader, as expected."); } return true; }
java
{ "resource": "" }
q161906
ProcfsBasedProcessTree.assertAndDestroyProcessGroup
train
public static void assertAndDestroyProcessGroup(String pgrpId, long interval, boolean inBackground) throws IOException { // Make sure that the pid given is a process group leader if (!checkPidPgrpidForMatch(pgrpId, PROCFS)) { throw new IOException("Process with PID " + pgrpId + " is not a process group leader."); } destroyProcessGroup(pgrpId, interval, inBackground); }
java
{ "resource": "" }
q161907
ProcfsBasedProcessTree.getProcessTreeDump
train
public String getProcessTreeDump() { StringBuilder ret = new StringBuilder(); // The header. ret.append(String.format("\t|- PID PPID PGRPID SESSID CMD_NAME " + "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) " + "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n")); for (ProcessInfo p : processTree.values()) { if (p != null) { ret.append(String.format(PROCESSTREE_DUMP_FORMAT, p.getPid(), p .getPpid(), p.getPgrpId(), p.getSessionId(), p.getName(), p .getUtime(), p.getStime(), p.getVmem(), p.getRssmemPage(), p .getCmdLine(procfsDir))); } } return ret.toString(); }
java
{ "resource": "" }
q161908
ProcfsBasedProcessTree.getProcessNameContainsCount
train
public Collection<String> getProcessNameContainsCount(String name) { List<String> retProcessList = new ArrayList<String>(); // Get the list of processes List<Integer> processList = getProcessList(); for (Integer proc : processList) { // Get information for each process ProcessInfo p = new ProcessInfo(proc); if (constructProcessInfo(p, procfsDir) != null) { if (p.getCmdLine(procfsDir).contains(name)) { StringBuilder processSb = new StringBuilder(); processSb.append(String.format(PROCESSTREE_DUMP_FORMAT, p.getPid(), p.getPpid(), p.getPgrpId(), p.getSessionId(), p.getName(), p.getUtime(), p.getStime(), p.getVmem(), p.getRssmemPage(), p.getCmdLine(procfsDir))); retProcessList.add(processSb.toString()); } } } return retProcessList; }
java
{ "resource": "" }
q161909
ProcfsBasedProcessTree.getCumulativeVmem
train
public long getCumulativeVmem(int olderThanAge) { long total = 0; for (ProcessInfo p : processTree.values()) { if ((p != null) && (p.getAge() > olderThanAge)) { total += p.getVmem(); } } return total; }
java
{ "resource": "" }
q161910
ProcfsBasedProcessTree.getCumulativeCpuTime
train
public long getCumulativeCpuTime() { if (JIFFY_LENGTH_IN_MILLIS < 0) { return 0; } long incJiffies = 0; for (ProcessInfo p : processTree.values()) { if (p != null) { incJiffies += p.dtime; } } if (incJiffies * JIFFY_LENGTH_IN_MILLIS < Integer.MAX_VALUE) { // Ignore the values that are ridiculous cpuTime += incJiffies * JIFFY_LENGTH_IN_MILLIS; } return cpuTime; }
java
{ "resource": "" }
q161911
ProcfsBasedProcessTree.getProcessList
train
private List<Integer> getProcessList() { String[] processDirs = (new File(procfsDir)).list(); List<Integer> processList = new ArrayList<Integer>(); for (String dir : processDirs) { try { int pd = Integer.parseInt(dir); if ((new File(procfsDir, dir)).isDirectory()) { processList.add(Integer.valueOf(pd)); } } catch (NumberFormatException n) { // skip this directory } catch (SecurityException s) { // skip this process } } return processList; }
java
{ "resource": "" }
q161912
ProcfsBasedProcessTree.constructProcessInfo
train
private static ProcessInfo constructProcessInfo(ProcessInfo pinfo, String procfsDir) { ProcessInfo ret = null; // Read "procfsDir/<pid>/stat" file - typically /proc/<pid>/stat BufferedReader in = null; FileReader fReader = null; try { File pidDir = new File(procfsDir, String.valueOf(pinfo.getPid())); fReader = new FileReader(new File(pidDir, PROCFS_STAT_FILE)); in = new BufferedReader(fReader); } catch (FileNotFoundException f) { // The process vanished in the interim! LOG.debug("The process " + pinfo.getPid() + " may have finished in the interim."); return ret; } ret = pinfo; try { String str = in.readLine(); // only one line Matcher m = PROCFS_STAT_FILE_FORMAT.matcher(str); boolean mat = m.find(); if (mat) { // Set (name) (ppid) (pgrpId) (session) (utime) (stime) (vsize) (rss) pinfo.updateProcessInfo(m.group(2), Integer.parseInt(m.group(3)), Integer.parseInt(m.group(4)), Integer.parseInt(m.group(5)), Long.parseLong(m.group(7)), Long.parseLong(m.group(8)), Long.parseLong(m.group(10)), Long.parseLong(m.group(11))); } else { LOG.warn("Unexpected: procfs stat file is not in the expected format" + " for process with pid " + pinfo.getPid()); ret = null; } } catch (IOException io) { LOG.warn("Error reading the stream " + io); ret = null; } finally { // Close the streams try { fReader.close(); try { in.close(); } catch (IOException i) { LOG.warn("Error closing the stream " + in); } } catch (IOException i) { LOG.warn("Error closing the stream " + fReader); } } return ret; }
java
{ "resource": "" }
q161913
MStress_Client.parseOptions
train
private static void parseOptions(String args[]) { if (!(args.length == 14 || args.length == 12 || args.length == 5)) { usage(); } /* * As described in usage(): * -s dfs-server * -p dfs-port [-t [create|create-write|stat|readdir|read|rename|delete] * -a planfile-path * -c host * -n process-name * -P prefix */ for (int i = 0; i < args.length; i++) { if (args[i].equals("-s") && i+1 < args.length) { dfsServer_ = args[i+1]; System.out.println(args[i+1]); i++; } else if (args[i].equals("-p") && i+1 < args.length) { dfsPort_ = Integer.parseInt(args[i+1]); System.out.println(args[i+1]); i++; } else if (args[i].equals("-t") && i+1 < args.length) { testName_ = args[i+1]; System.out.println(args[i+1]); i++; } else if (args[i].equals("-a") && i+1 < args.length) { planfilePath_ = args[i+1]; System.out.println(args[i+1]); i++; } else if (args[i].equals("-c") && i+1 < args.length) { hostName_ = args[i+1]; System.out.println(args[i+1]); i++; } else if (args[i].equals("-n") && i+1 < args.length) { processName_ = args[i+1]; System.out.println(args[i+1]); i++; } else if (args[i].equals("-P") && i+1 < args.length) { prefix_ = args[i+1]; System.out.println(args[i+1]); i++; } } if (dfsServer_.length() == 0 || testName_.length() == 0 || planfilePath_.length() == 0 || hostName_.length() == 0 || processName_.length() == 0 || dfsPort_ == 0) { usage(); } if (prefix_ == null) { prefix_ = new String("PATH_PREFIX_"); } prefixLen_ = prefix_.length(); }
java
{ "resource": "" }
q161914
MStress_Client.usage
train
private static void usage() { String className = MStress_Client.class.getName(); System.out.printf("Usage: java %s -s dfs-server -p dfs-port" + "[-t [create|stat|read|readdir|delete|rename] -a planfile-path -c host -n process-name" + " -P prefix]\n", className); System.out.printf(" -t: this option requires -a, -c, and -n options.\n"); System.out.printf(" -P: default prefix is PATH_.\n"); System.out.printf("eg:\n"); System.out.printf(" java %s -s <metaserver-host> -p <metaserver-port> -t create" + " -a <planfile> -c localhost -n Proc_00\n", className); System.exit(1); }
java
{ "resource": "" }
q161915
MStress_Client.parsePlanFile
train
private static int parsePlanFile() { int ret = -1; try { FileInputStream fis = new FileInputStream(planfilePath_); DataInputStream dis = new DataInputStream(fis); BufferedReader br = new BufferedReader(new InputStreamReader(dis)); if (prefix_.isEmpty()) { prefix_ = "PATH_PREFIX_"; } String line; while ((line = br.readLine()) != null) { if (line.length() == 0 || line.startsWith("#")) { continue; } if (line.startsWith("type=")) { type_ = line.substring(5); continue; } if (line.startsWith("levels=")) { levels_ = Integer.parseInt(line.substring(7)); continue; } if (line.startsWith("inodes=")) { inodesPerLevel_ = Integer.parseInt(line.substring(7)); continue; } if (line.startsWith("nstat=")) { pathsToStat_ = Integer.parseInt(line.substring(6)); continue; } } dis.close(); if (levels_ > 0 && !type_.isEmpty() && inodesPerLevel_ > 0 && pathsToStat_ > 0) { ret = 0; } } catch (Exception e) { System.out.println("Error: " + e.getMessage()); } return ret; }
java
{ "resource": "" }
q161916
MStress_Client.CreateDFSPaths
train
private static int CreateDFSPaths(int level, String parentPath) { Boolean isLeaf = false; Boolean isDir = false; if (level + 1 >= levels_) { isLeaf = true; } if (isLeaf) { if (type_.equals("dir")) { isDir = true; } else { isDir = false; } } else { isDir = true; } for (int i = 0; i < inodesPerLevel_; i++) { String path = parentPath + "/" + prefix_ + Integer.toString(i); //System.out.printf("Creating (isdir=%b) [%s]\n", isDir, path.toString()); if (isDir) { try { long startTime = System.nanoTime(); if (dfsClient_.mkdirs(path) == false) { System.out.printf("Error in mkdirs(%s)\n", path); return -1; } timingMkdirs_.add(new Double((System.nanoTime() - startTime)/(1E9))); System.out.printf("Creating dir %s\n", path); totalCreateCount ++; if (totalCreateCount % COUNT_INCR == 0) { System.out.printf("Created paths so far: %d\n", totalCreateCount); } if (!isLeaf) { if (CreateDFSPaths(level+1, path) < 0) { System.out.printf("Error in CreateDFSPaths(%s)\n", path); return -1; } } } catch(IOException e) { e.printStackTrace(); return -1; } } else { try { System.out.printf("Creating file %s\n", path); long startTime = System.nanoTime(); OutputStream os = dfsClient_.create(path, true); timingCreate_.add(new Double((System.nanoTime() - startTime)/(1E9))); files_.put(path, os); totalCreateCount ++; if (totalCreateCount % COUNT_INCR == 0) { System.out.printf("Created paths so far: %d\n", totalCreateCount); } } catch( IOException e) { e.printStackTrace(); return -1; } } } return 0; }
java
{ "resource": "" }
q161917
MStress_Client.createWriteDFSPaths
train
private static int createWriteDFSPaths() { if (createDFSPaths() != 0) { return -1; } try { // write to all the files! for (Map.Entry<String, OutputStream> file : files_.entrySet()) { OutputStream os = file.getValue(); long startTime = System.nanoTime(); os.write(data_.getBytes()); timingWrite_.add(new Double((System.nanoTime() - startTime)/(1E9))); os.close(); } } catch (IOException e) { e.printStackTrace(); return -1; } return 0; }
java
{ "resource": "" }
q161918
CoronaJobTrackerRunner.localizeTaskConfiguration
train
@SuppressWarnings("deprecation") private void localizeTaskConfiguration(TaskTracker tracker, JobConf ttConf, String workDir, Task t, JobID jobID) throws IOException { Path jobFile = new Path(t.getJobFile()); FileSystem systemFS = tracker.systemFS; this.localizedJobFile = new Path(workDir, jobID + ".xml"); LOG.info("Localizing CJT configuration from " + jobFile + " to " + localizedJobFile); systemFS.copyToLocalFile(jobFile, localizedJobFile); JobConf localJobConf = new JobConf(localizedJobFile); boolean modified = Task.saveStaticResolutions(localJobConf); if (modified) { FSDataOutputStream out = new FSDataOutputStream( new FileOutputStream(localizedJobFile.toUri().getPath())); try { localJobConf.writeXml(out); } catch (IOException e) { out.close(); throw e; } } // Add the values from the job conf to the configuration of this runner this.conf.addResource(localizedJobFile); }
java
{ "resource": "" }
q161919
CoronaJobTrackerRunner.prepare
train
@Override public boolean prepare() throws IOException { if (!super.prepare()) { return false; } mapOutputFile.removeAll(getTask().getTaskID()); return true; }
java
{ "resource": "" }
q161920
CompositeInputFormat.addDefaults
train
protected void addDefaults() { try { Parser.CNode.addIdentifier("inner", InnerJoinRecordReader.class); Parser.CNode.addIdentifier("outer", OuterJoinRecordReader.class); Parser.CNode.addIdentifier("override", OverrideRecordReader.class); Parser.WNode.addIdentifier("tbl", WrappedRecordReader.class); } catch (NoSuchMethodException e) { throw new RuntimeException("FATAL: Failed to init defaults", e); } }
java
{ "resource": "" }
q161921
CompositeInputFormat.addUserIdentifiers
train
private void addUserIdentifiers(JobConf job) throws IOException { Pattern x = Pattern.compile("^mapred\\.join\\.define\\.(\\w+)$"); for (Map.Entry<String,String> kv : job) { Matcher m = x.matcher(kv.getKey()); if (m.matches()) { try { Parser.CNode.addIdentifier(m.group(1), job.getClass(m.group(0), null, ComposableRecordReader.class)); } catch (NoSuchMethodException e) { throw (IOException)new IOException( "Invalid define for " + m.group(1)).initCause(e); } } } }
java
{ "resource": "" }
q161922
CompositeInputFormat.getSplits
train
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { setFormat(job); job.setLong("mapred.min.split.size", Long.MAX_VALUE); return root.getSplits(job, numSplits); }
java
{ "resource": "" }
q161923
CompositeInputFormat.getRecordReader
train
@SuppressWarnings("unchecked") // child types unknown public ComposableRecordReader<K,TupleWritable> getRecordReader( InputSplit split, JobConf job, Reporter reporter) throws IOException { setFormat(job); return root.getRecordReader(split, job, reporter); }
java
{ "resource": "" }
q161924
LogLevel.main
train
public static void main(String[] args) { if (args.length == 3 && "-getlevel".equals(args[0])) { process("http://" + args[1] + "/logLevel?log=" + args[2]); return; } else if (args.length == 4 && "-setlevel".equals(args[0])) { process("http://" + args[1] + "/logLevel?log=" + args[2] + "&level=" + args[3]); return; } System.err.println(USAGES); System.exit(-1); }
java
{ "resource": "" }
q161925
S3FileSystem.getFileStatus
train
@Override public FileStatus getFileStatus(Path f) throws IOException { INode inode = store.retrieveINode(makeAbsolute(f)); if (inode == null) { throw new FileNotFoundException(f + ": No such file or directory."); } return new S3FileStatus(f.makeQualified(this), inode); }
java
{ "resource": "" }
q161926
ServerDispatcher.assignClient
train
@Override public void assignClient(long clientId) { LOG.info("Assigning client " + clientId + " ..."); synchronized (clientModificationsLock) { newlyAssignedClients.add(clientId); } }
java
{ "resource": "" }
q161927
ServerDispatcher.handleFailedDispatch
train
@Override public void handleFailedDispatch(long clientId, long failedTime) { ClientData clientData = core.getClientData(clientId); if (failedTime == -1 || clientData == null) return; // We only add it and don't update it because we are interested in // keeping track of the first moment it failed if (clientData.markedAsFailedTime == -1) { clientData.markedAsFailedTime = failedTime; LOG.info("Marked client " + clientId + " as failed at " + failedTime); } clientData.lastSentTime = failedTime; }
java
{ "resource": "" }
q161928
ServerDispatcher.handleSuccessfulDispatch
train
@Override public void handleSuccessfulDispatch(long clientId, long sentTime) { ClientData clientData = core.getClientData(clientId); if (sentTime == -1 || clientData == null) return; clientData.markedAsFailedTime = -1; if (clientData.markedAsFailedTime != -1) { LOG.info("Unmarking " + clientId + " at " + sentTime); } clientData.lastSentTime = sentTime; }
java
{ "resource": "" }
q161929
ServerDispatcher.updateClients
train
private void updateClients() { assignedClients.addAll(newlyAssignedClients); assignedClients.removeAll(removedClients); newlyAssignedClients.clear(); removedClients.clear(); }
java
{ "resource": "" }
q161930
DBCountPageView.populateAccess
train
private void populateAccess() throws SQLException { PreparedStatement statement = null ; try { statement = connection.prepareStatement( "INSERT INTO Access(url, referrer, time)" + " VALUES (?, ?, ?)"); Random random = new Random(); int time = random.nextInt(50) + 50; final int PROBABILITY_PRECISION = 100; // 1 / 100 final int NEW_PAGE_PROBABILITY = 15; // 15 / 100 //Pages in the site : String[] pages = {"/a", "/b", "/c", "/d", "/e", "/f", "/g", "/h", "/i", "/j"}; //linkMatrix[i] is the array of pages(indexes) that page_i links to. int[][] linkMatrix = {{1,5,7}, {0,7,4,6,}, {0,1,7,8}, {0,2,4,6,7,9}, {0,1}, {0,3,5,9}, {0}, {0,1,3}, {0,2,6}, {0,2,6}}; //a mini model of user browsing a la pagerank int currentPage = random.nextInt(pages.length); String referrer = null; for(int i=0; i<time; i++) { statement.setString(1, pages[currentPage]); statement.setString(2, referrer); statement.setLong(3, i); statement.execute(); int action = random.nextInt(PROBABILITY_PRECISION); //go to a new page with probability NEW_PAGE_PROBABILITY / PROBABILITY_PRECISION if(action < NEW_PAGE_PROBABILITY) { currentPage = random.nextInt(pages.length); // a random page referrer = null; } else { referrer = pages[currentPage]; action = random.nextInt(linkMatrix[currentPage].length); currentPage = linkMatrix[currentPage][action]; } } connection.commit(); }catch (SQLException ex) { connection.rollback(); throw ex; } finally { if(statement != null) { statement.close(); } } }
java
{ "resource": "" }
q161931
DBCountPageView.verify
train
private boolean verify() throws SQLException { //check total num pageview String countAccessQuery = "SELECT COUNT(*) FROM Access"; String sumPageviewQuery = "SELECT SUM(pageview) FROM Pageview"; Statement st = null; ResultSet rs = null; try { st = connection.createStatement(); rs = st.executeQuery(countAccessQuery); rs.next(); long totalPageview = rs.getLong(1); rs = st.executeQuery(sumPageviewQuery); rs.next(); long sumPageview = rs.getLong(1); LOG.info("totalPageview=" + totalPageview); LOG.info("sumPageview=" + sumPageview); return totalPageview == sumPageview && totalPageview != 0; }finally { if(st != null) st.close(); if(rs != null) rs.close(); } }
java
{ "resource": "" }
q161932
VolumeMap.get
train
DatanodeBlockInfo get(int namespaceId, Block block) { checkBlock(block); NamespaceMap nm = getNamespaceMap(namespaceId); if (nm == null) { return null; } return nm.getBlockInfo(block); }
java
{ "resource": "" }
q161933
VolumeMap.remove
train
DatanodeBlockInfo remove(int namespaceId, Block block) { NamespaceMap nm = getNamespaceMap(namespaceId); if (nm == null) { return null; } if (datasetDelta != null) { datasetDelta.removeBlock(namespaceId, block); } return nm.removeBlockInfo(block); }
java
{ "resource": "" }
q161934
VolumeMap.size
train
int size(int namespaceId) { NamespaceMap nm = getNamespaceMap(namespaceId); if (nm == null) { return 0; } return nm.size(); }
java
{ "resource": "" }
q161935
VolumeMap.getOngoingCreates
train
ActiveFile getOngoingCreates(int namespaceId, Block block) { checkBlock(block); NamespaceMap nm = getNamespaceMap(namespaceId); if (nm == null) { return null; } return nm.getOngoingCreates(block); }
java
{ "resource": "" }
q161936
DefaultJobHistoryParser.parseJobTasks
train
public static void parseJobTasks(String jobHistoryFile, JobHistory.JobInfo job, FileSystem fs) throws IOException { JobHistory.parseHistoryFromFS(jobHistoryFile, new JobTasksParseListener(job), fs); }
java
{ "resource": "" }
q161937
ConfigManager.validateAllPolicies
train
private void validateAllPolicies(Collection<PolicyInfo> all) throws IOException, NumberFormatException { for (PolicyInfo pinfo: all) { Path srcPath = pinfo.getSrcPath(); if (srcPath == null) { throw new IOException("Unable to find srcPath in policy."); } if (pinfo.getProperty("replication") == null) { throw new IOException("Unable to find replication in policy." + srcPath); } int repl = Integer.parseInt(pinfo.getProperty("replication")); if (pinfo.getProperty("modTimePeriod") == null) { throw new IOException("Unable to find modTimePeriod in policy." + srcPath); } long value = Long.parseLong(pinfo.getProperty("modTimePeriod")); List<PathInfo> dpaths = pinfo.getDestPaths(); if (dpaths == null || dpaths.size() == 0) { throw new IOException("Unable to find dest in policy." + srcPath); } for (PathInfo pp: dpaths) { if (pp.getPath() == null) { throw new IOException("Unable to find valid destPath in policy " + srcPath); } if (pp.getProperty("replication") == null) { throw new IOException("Unable to find dest replication in policy." + srcPath); } repl = Integer.parseInt(pp.getProperty("replication")); } } }
java
{ "resource": "" }
q161938
PurgeMonitor.purgeDirectories
train
private void purgeDirectories(FileSystem fs, Path root) throws IOException { DirectoryTraversal traversal = DirectoryTraversal.directoryRetriever(Arrays.asList(root), fs, directoryTraversalThreads, directoryTraversalShuffle); String prefix = root.toUri().getPath(); FileStatus dir; while ((dir = traversal.next()) != DirectoryTraversal.FINISH_TOKEN) { Path dirPath = dir.getPath(); if (dirPath.toUri().getPath().endsWith(RaidNode.HAR_SUFFIX)) { continue; } String dirStr = dirPath.toUri().getPath(); if (!dirStr.startsWith(prefix)) { continue; } entriesProcessed.incrementAndGet(); String src = dirStr.replaceFirst(prefix, ""); if (src.length() == 0) continue; Path srcPath = new Path(src); if (!fs.exists(srcPath)) { performDelete(fs, dirPath, true); } } }
java
{ "resource": "" }
q161939
PurgeMonitor.existsBetterParityFile
train
private static boolean existsBetterParityFile( Codec codec, FileStatus srcStat, Configuration conf) throws IOException { for (Codec c : Codec.getCodecs()) { if (c.priority > codec.priority) { ParityFilePair ppair = ParityFilePair.getParityFile( c, srcStat, conf); if (ppair != null) { return true; } } } return false; }
java
{ "resource": "" }
q161940
BytesWritable.copyTo
train
public int copyTo(byte[] dest, int start) throws BufferTooSmallException { if (size > (dest.length - start)) { throw new BufferTooSmallException("size is " + size + ", buffer availabe size is " + (dest.length - start)); } if (size > 0) { System.arraycopy(bytes, 0, dest, start, size); } return size; }
java
{ "resource": "" }
q161941
BytesWritable.setCapacity
train
public void setCapacity(int new_cap) { if (new_cap != getCapacity()) { byte[] new_data = new byte[new_cap]; if (new_cap < size) { size = new_cap; } if (size != 0) { System.arraycopy(bytes, 0, new_data, 0, size); } bytes = new_data; } }
java
{ "resource": "" }
q161942
BytesWritable.set
train
public void set(byte[] newData, int offset, int length) { setSize(0); setSize(length); System.arraycopy(newData, offset, bytes, 0, size); }
java
{ "resource": "" }
q161943
DataTransferHeaderOptions.setBits
train
protected static long setBits(long num, int start, int len, long value){ // Get rid of illegal bits of value: value = value & ((1L<<len)-1); long val_mask = value << start; long zero_mask = ~( ((1L << len) -1) << start ); return ( num & zero_mask ) | val_mask; }
java
{ "resource": "" }
q161944
LoadGenerator.initFileDirTables
train
private int initFileDirTables() { try { initFileDirTables(root); } catch (IOException e) { System.err.println(e.getLocalizedMessage()); e.printStackTrace(); return -1; } if (dirs.isEmpty()) { System.err.println("The test space " + root + " is empty"); return -1; } if (files.isEmpty()) { System.err.println("The test space " + root + " does not have any file"); return -1; } return 0; }
java
{ "resource": "" }
q161945
LoadGenerator.initFileDirTables
train
private void initFileDirTables(Path path) throws IOException { FileStatus[] stats = fs.listStatus(path); if (stats != null) { for (FileStatus stat : stats) { if (stat.isDir()) { dirs.add(stat.getPath().toString()); initFileDirTables(stat.getPath()); } else { Path filePath = stat.getPath(); if (filePath.getName().startsWith(StructureGenerator.FILE_NAME_PREFIX)) { files.add(filePath.toString()); } } } } }
java
{ "resource": "" }
q161946
JobTrackerTraits.getTaskDiagnosticsImpl
train
protected String[] getTaskDiagnosticsImpl(TaskAttemptID taskId) throws IOException { List<String> taskDiagnosticInfo = null; JobID jobId = taskId.getJobID(); TaskID tipId = taskId.getTaskID(); JobInProgressTraits job = getJobInProgress(jobId); if (job != null && job.inited()) { TaskInProgress tip = job.getTaskInProgress(tipId); if (tip != null) { taskDiagnosticInfo = tip.getDiagnosticInfo(taskId); } } return ((taskDiagnosticInfo == null) ? EMPTY_TASK_DIAGNOSTICS : taskDiagnosticInfo.toArray(new String[taskDiagnosticInfo.size()])); }
java
{ "resource": "" }
q161947
JobTrackerTraits.getTip
train
public TaskInProgress getTip(TaskID tipid) { JobInProgressTraits job = getJobInProgress(tipid.getJobID()); return (job == null ? null : job.getTaskInProgress(tipid)); }
java
{ "resource": "" }
q161948
LightWeightLinkedSet.pollFirst
train
public T pollFirst() { if (head == null) { return null; } T first = head.element; this.remove(first); return first; }
java
{ "resource": "" }
q161949
LightWeightLinkedSet.pollNToList
train
public void pollNToList(int n, List<T> retList) { if (n >= size) { // if we need to remove all elements then do fast polling pollAllToList(retList); } while (n-- > 0 && head != null) { T curr = head.element; this.removeElem(curr); retList.add(curr); } shrinkIfNecessary(); }
java
{ "resource": "" }
q161950
FileOutputFormat.getTaskOutputPath
train
public static Path getTaskOutputPath(JobConf conf, String name) throws IOException { // ${mapred.out.dir} Path outputPath = getOutputPath(conf); if (outputPath == null) { throw new IOException("Undefined job output-path"); } OutputCommitter committer = conf.getOutputCommitter(); Path workPath = outputPath; TaskAttemptContext context = new TaskAttemptContext(conf, TaskAttemptID.forName(conf.get("mapred.task.id"))); if (committer instanceof FileOutputCommitter) { workPath = ((FileOutputCommitter)committer).getWorkPath(context, outputPath); } // ${mapred.out.dir}/_temporary/_${taskid}/${name} return new Path(workPath, name); }
java
{ "resource": "" }
q161951
FileOutputFormat.getUniqueName
train
public static String getUniqueName(JobConf conf, String name) { int partition = conf.getInt("mapred.task.partition", -1); if (partition == -1) { throw new IllegalArgumentException( "This method can only be called from within a Job"); } String taskType = (conf.getBoolean("mapred.task.is.map", true)) ? "m" : "r"; NumberFormat numberFormat = NumberFormat.getInstance(); numberFormat.setMinimumIntegerDigits(5); numberFormat.setGroupingUsed(false); return name + "-" + taskType + "-" + numberFormat.format(partition); }
java
{ "resource": "" }
q161952
DFSInputStream.openInfo
train
synchronized void openInfo() throws IOException { if (src == null && blocks == null) { throw new IOException("No file provided to open"); } LocatedBlocks newInfo = src != null ? getLocatedBlocks(src, 0, prefetchSize) : blocks; if (newInfo == null) { throw new FileNotFoundException("Cannot open filename " + src); } // I think this check is not correct. A file could have been appended to // between two calls to openInfo(). if (locatedBlocks != null && !locatedBlocks.isUnderConstruction() && !newInfo.isUnderConstruction()) { Iterator<LocatedBlock> oldIter = locatedBlocks.getLocatedBlocksCopy() .iterator(); Iterator<LocatedBlock> newIter = newInfo.getLocatedBlocks().iterator(); while (oldIter.hasNext() && newIter.hasNext()) { if (! oldIter.next().getBlock().equals(newIter.next().getBlock())) { throw new IOException("Blocklist for " + src + " has changed!"); } } } // if the file is under construction, then fetch size of last block // from datanode. if (newInfo.isUnderConstruction() && newInfo.locatedBlockCount() > 0) { LocatedBlock last = newInfo.get(newInfo.locatedBlockCount()-1); if (last.getLocations().length > 0) { try { Block newBlock = getBlockInfo(last); // only if the block has data (not null) if (newBlock != null) { long newBlockSize = newBlock.getNumBytes(); newInfo.setLastBlockSize(newBlock.getBlockId(), newBlockSize); } } catch (IOException e) { DFSClient.LOG.debug("DFSClient file " + src + " is being concurrently append to" + " but datanodes probably does not have block " + last.getBlock(), e); } } } this.locatedBlocks = new DFSLocatedBlocks(newInfo, locatedBlockExpireTimeout); this.currentNode = null; if (!newInfo.isUnderConstruction()) { // No block should be under construction if the file is finalized. isCurrentBlockUnderConstruction = false; } }
java
{ "resource": "" }
q161953
DFSInputStream.getBlockInfo
train
private Block getBlockInfo(LocatedBlock locatedblock) throws IOException { if (locatedblock == null || locatedblock.getLocations().length == 0) { return null; } int replicaNotFoundCount = locatedblock.getLocations().length; for(DatanodeInfo datanode : locatedblock.getLocations()) { ProtocolProxy<ClientDatanodeProtocol> cdp = null; try { cdp = DFSClient.createClientDNProtocolProxy(datanode, dfsClient.conf, dfsClient.socketTimeout); final Block newBlock; if (cdp.isMethodSupported("getBlockInfo", int.class, Block.class)) { newBlock = cdp.getProxy().getBlockInfo( dfsClient.namespaceId, locatedblock.getBlock()); } else { newBlock = cdp.getProxy().getBlockInfo(locatedblock.getBlock()); } if (newBlock == null) { // special case : replica might not be on the DN, treat as 0 length replicaNotFoundCount--; } else { return newBlock; } } catch(IOException ioe) { if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Failed to getBlockInfo from datanode " + datanode + " for block " + locatedblock.getBlock(), ioe); } } finally { if (cdp != null) { RPC.stopProxy(cdp.getProxy()); } } } // Namenode told us about these locations, but none know about the replica // means that we hit the race between pipeline creation start and end. // we require all because some other exception could have happened // on a DN that has it. we want to report that error if (replicaNotFoundCount == 0) { DFSClient.LOG .warn("Cannot get block info from any datanode having block " + locatedblock.getBlock() + " for file " + src); return null; } throw new IOException("Cannot obtain block info for " + locatedblock); }
java
{ "resource": "" }
q161954
DFSInputStream.getBlockAt
train
private LocatedBlock getBlockAt(long offset, boolean updatePosition, boolean throwWhenNotFound) throws IOException { assert (locatedBlocks != null) : "locatedBlocks is null"; // search cached blocks first locatedBlocks.blockLocationInfoExpiresIfNeeded(); LocatedBlock blk = locatedBlocks.getBlockContainingOffset(offset); if (blk == null) { // block is not cached // fetch more blocks LocatedBlocks newBlocks; newBlocks = getLocatedBlocks(src, offset, prefetchSize); if (newBlocks == null) { if (!throwWhenNotFound) { return null; } throw new IOException("Could not find target position " + offset); } locatedBlocks.insertRange(newBlocks.getLocatedBlocks()); locatedBlocks.setFileLength(newBlocks.getFileLength()); } blk = locatedBlocks.getBlockContainingOffset(offset); if (blk == null) { if (!throwWhenNotFound) { return null; } throw new IOException("Failed to determine location for block at " + "offset=" + offset); } if (updatePosition) { // update current position this.pos = offset; this.blockEnd = blk.getStartOffset() + blk.getBlockSize() - 1; this.currentBlock = blk.getBlock(); isCurrentBlockUnderConstruction = locatedBlocks .isUnderConstructionBlock(this.currentBlock); } return blk; }
java
{ "resource": "" }
q161955
DFSInputStream.getBlockRange
train
private List<LocatedBlock> getBlockRange(final long offset, final long length) throws IOException { List<LocatedBlock> blockRange = new ArrayList<LocatedBlock>(); // Zero length. Not sure this ever happens in practice. if (length == 0) return blockRange; // A defensive measure to ensure that we never loop here eternally. // With a 256 M block size, 10000 blocks will correspond to 2.5 TB. // No one should read this much data at once in practice. int maxLoops = 10000; // Copy locatedBlocks to a local data structure. This ensures that // a concurrent invocation of openInfo() works OK, the reason being // that openInfo may completely replace locatedBlocks. DFSLocatedBlocks locatedBlocks = this.locatedBlocks; if (locatedBlocks == null) { // Make this an IO exception because this is input/output code error. throw new IOException("locatedBlocks is null"); } locatedBlocks.blockLocationInfoExpiresIfNeeded(); long remaining = length; long curOff = offset; while (remaining > 0) { // a defensive check to bail out of this loop at all costs if (--maxLoops < 0) { String msg = "Failed to getBlockRange at offset " + offset + ", length=" + length + ", curOff=" + curOff + ", remaining=" + remaining + ". Aborting..."; DFSClient.LOG.warn(msg); throw new IOException(msg); } LocatedBlock blk = locatedBlocks.getBlockContainingOffset(curOff); if (blk == null) { LocatedBlocks newBlocks; newBlocks = getLocatedBlocks(src, curOff, remaining); if (newBlocks == null) { throw new IOException("Could not get block locations for curOff=" + curOff + ", remaining=" + remaining + " (offset=" + offset + ")"); } locatedBlocks.insertRange(newBlocks.getLocatedBlocks()); continue; } blockRange.add(blk); long bytesRead = blk.getStartOffset() + blk.getBlockSize() - curOff; remaining -= bytesRead; curOff += bytesRead; } DFSClient.checkBlockRange(blockRange, offset, length); return blockRange; }
java
{ "resource": "" }
q161956
DFSInputStream.close
train
@Override public synchronized void close() throws IOException { try { if (closed) { return; } dfsClient.checkOpen(); if (blockReader != null) { closeBlockReader(blockReader, false); blockReader = null; } for (BlockReaderLocalBase brl : localBlockReaders.values()) { try { brl.close(); } catch (IOException ioe) { DFSClient.LOG.warn("Error when closing local block reader", ioe); } } localBlockReaders = null; super.close(); closed = true; } finally { // Avoid memory leak by making sure we remove the file from the renewal // threads map even in case of failures during close. if (dfsClient.blockLocationRenewal != null) { dfsClient.blockLocationRenewal.remove(this); } } }
java
{ "resource": "" }
q161957
DFSInputStream.read
train
@Override public synchronized int read(byte buf[], int off, int len) throws IOException { dfsClient.checkOpen(); if (closed) { dfsClient.incReadExpCntToStats(); throw new IOException("Stream closed"); } DFSClient.dfsInputStreamfailures.set(0); long start = System.currentTimeMillis(); if (pos < getFileLength() || (pos == getFileLength() && len == 0)) { int retries = 2; while (retries > 0) { try { // If position equals or is larger than the end position of the // block, we try to seek to the next block, unless: // 1. user tries to read 0 bytes (usually by available() call), AND // 2. there is at least a known block for the file (blockEnd != -1), AND // 3. pos is the end of the file, AND // 4. the end of the block is the end of the file // (the current block is the known last block of the file) // For this case, we want to stay in the current block, as in the case // that it is the last block (which is almost always true given // len == 0), the current block is the under-construction block whose size // you want to update. // if (len == 0) { // called by available() if (blockEnd == -1 // No current block selected || pos == getFileLength()) { // at the end of the file currentNode = blockSeekTo(pos, false); if (currentNode == null) { // In this case, user wants to know available information of // the file, but the file doesn't have any block created yet (it // is a 0 size file). Simply 0 should be returned. return 0; } } else { throw new IOException( "Try to read 0 bytes while current position is not the end of the file"); } } else if (pos > blockEnd || (this.isCurrentBlockUnderConstruction && blockReader != null && blockReader.eos && blockReader.available() == 0)) { currentNode = blockSeekTo(pos, true); } int realLen = (int) Math.min((long) len, (blockEnd - pos + 1L)); int result = readBuffer(buf, off, realLen); if (result >= 0) { pos += result; } else if (len != 0){ // got a EOS from reader though we expect more data on it. throw new IOException("Unexpected EOS from the reader"); } if (dfsClient.stats != null && result != -1) { dfsClient.stats.incrementBytesRead(result); } long timeval = System.currentTimeMillis() - start; dfsClient.metrics.incReadTime(timeval); dfsClient.metrics.incReadSize(result); dfsClient.metrics.incReadOps(); return (result >= 0) ? result : 0; } catch (InterruptedIOException iie) { throw iie; } catch (ChecksumException ce) { dfsClient.incReadExpCntToStats(); throw ce; } catch (IOException e) { dfsClient.incReadExpCntToStats(); if (retries == 1) { DFSClient.LOG.warn("DFS Read: " + StringUtils.stringifyException(e)); } blockEnd = -1; if (currentNode != null) { addToDeadNodes(currentNode); } if (--retries == 0) { if (len != 0) { throw e; } else { // When called by available(). No need to fail the query. In that case // available() value might not be updated, but it's OK. return 0; } } } } } return -1; }
java
{ "resource": "" }
q161958
DFSInputStream.closeBlockReader
train
private void closeBlockReader(BlockReader reader, boolean reuseConnection) throws IOException { if (reader.hasSentStatusCode()) { Socket oldSock = reader.takeSocket(); if (dfsClient.getDataTransferProtocolVersion() < DataTransferProtocol.READ_REUSE_CONNECTION_VERSION || !reuseConnection) { // close the sock for old datanode. if (oldSock != null) { IOUtils.closeSocket(oldSock); } } else { socketCache.put(oldSock); } } reader.close(); }
java
{ "resource": "" }
q161959
DFSInputStream.getBlockReader
train
protected BlockReader getBlockReader(int protocolVersion, int namespaceId, InetSocketAddress dnAddr, String file, long blockId, long generationStamp, long startOffset, long len, int bufferSize, boolean verifyChecksum, String clientName, long bytesToCheckReadSpeed, long minReadSpeedBps, boolean reuseConnection, FSClientReadProfilingData cliData, ReadOptions options) throws IOException { IOException err = null; boolean fromCache = true; // back compatible with the old datanode. if (protocolVersion < DataTransferProtocol.READ_REUSE_CONNECTION_VERSION || reuseConnection == false) { Socket sock = dfsClient.socketFactory.createSocket(); sock.setTcpNoDelay(true); NetUtils.connect(sock, dnAddr, dfsClient.socketTimeout); sock.setSoTimeout(dfsClient.socketTimeout); BlockReader reader = BlockReader.newBlockReader(protocolVersion, namespaceId, sock, src, blockId, generationStamp, startOffset, len, buffersize, verifyChecksum, clientName, bytesToCheckReadSpeed, minReadSpeedBps, false, cliData, options); return reader; } // Allow retry since there is no way of knowing whether the cached socket // is good until we actually use it for (int retries = 0; retries <= nCachedConnRetry && fromCache; ++retries) { Socket sock = socketCache.get(dnAddr); if (sock == null) { fromCache = false; sock = dfsClient.socketFactory.createSocket(); /** * TCP_NODELAY is crucial here because of bad interactions between * Nagle's alglrithm and delayed ACKs. With connection keepalive * between the client and DN, the conversation looks like: * 1. Client -> DN: Read block X * 2. DN -> client: data for block X; * 3. Client -> DN: Status OK (successful read) * 4. Client -> DN: Read block Y * * The fact that step #3 and #4 are both in the client -> DN direction * triggers Nagling. If the DN is using delayed ACKS, this results in * a delay of 40ms or more. * * TCP_NODELAY disables nagling and thus avoid this performance * disaster. */ sock.setTcpNoDelay(true); NetUtils.connect(sock, dnAddr, dfsClient.socketTimeout); sock.setSoTimeout(dfsClient.socketTimeout); } try { // The OP_READ_BLOCK request is sent as we make the BlockReader BlockReader reader = BlockReader.newBlockReader(protocolVersion, namespaceId, sock, src, blockId, generationStamp, startOffset, len, buffersize, verifyChecksum, clientName, bytesToCheckReadSpeed, minReadSpeedBps, true, cliData, options); return reader; } catch (IOException ex) { // Our socket is no good. DFSClient.LOG.debug("Error making BlockReader. Closing stale " + sock, ex); sock.close(); err = ex; } } throw err; }
java
{ "resource": "" }
q161960
DFSInputStream.read
train
public int read(long position, byte[] buffer, int offset, int length, ReadOptions options) throws IOException { // sanity checks dfsClient.checkOpen(); if (closed) { throw new IOException("Stream closed"); } DFSClient.dfsInputStreamfailures.set(0); long start = System.currentTimeMillis(); long filelen = getFileLength(); if ((position < 0) || (position >= filelen)) { return -1; } int realLen = length; if ((position + length) > filelen) { realLen = (int)(filelen - position); } DFSReadProfilingData pData = DFSClient.getDFSReadProfilingData(); // determine the block and byte range within the block // corresponding to position and realLen List<LocatedBlock> blockRange = getBlockRange(position, realLen); if (!tryPreadFromLocal(blockRange, position, buffer, offset, length, realLen, start)) { // non-local or multiple block read. int remaining = realLen; for (LocatedBlock blk : blockRange) { long targetStart = position - blk.getStartOffset(); long bytesToRead = Math.min(remaining, blk.getBlockSize() - targetStart); if (dfsClient.allowParallelReads && dfsClient.parallelReadsThreadPool != null) { fetchBlockByteRangeSpeculative(blk, targetStart, targetStart + bytesToRead - 1, buffer, offset, options); } else { if (pData != null) { cliData = new FSClientReadProfilingData(); pData.addDFSClientReadProfilingData(cliData); cliData.startRead(); } fetchBlockByteRange(blk, targetStart, targetStart + bytesToRead - 1, buffer, offset, options); if (pData != null) { cliData.endRead(); } } remaining -= bytesToRead; position += bytesToRead; offset += bytesToRead; } assert remaining == 0 : "Wrong number of bytes read."; } if (dfsClient.stats != null) { dfsClient.stats.incrementBytesRead(realLen); } long timeval = System.currentTimeMillis() - start; dfsClient.metrics.incPreadTime(timeval); dfsClient.metrics.incPreadSize(realLen); dfsClient.metrics.incPreadOps(); return realLen; }
java
{ "resource": "" }
q161961
DFSInputStream.readFullyScatterGather
train
@Override public List<ByteBuffer> readFullyScatterGather(long position, int length) throws IOException { // if the server does not support scatter-gather, // then use default implementation from FSDataInputStream. if (dfsClient.dataTransferVersion < DataTransferProtocol.SCATTERGATHER_VERSION) { return super.readFullyScatterGather(position, length); } // sanity checks dfsClient.checkOpen(); if (closed) { throw new IOException("Stream closed"); } DFSClient.dfsInputStreamfailures.set(0); long start = System.currentTimeMillis(); long filelen = getFileLength(); if ((position < 0) || (position > filelen)) { String msg = " Invalid position " + position + ". File " + src + " is of size " + filelen; DFSClient.LOG.warn(msg); throw new IOException(msg); } List<ByteBuffer> results = new LinkedList<ByteBuffer>(); int realLen = length; if ((position + length) > filelen) { realLen = (int)(filelen - position); } // determine the block and byte range within the block // corresponding to position and realLen List<LocatedBlock> blockRange = getBlockRange(position, realLen); int remaining = realLen; for (LocatedBlock blk : blockRange) { long targetStart = position - blk.getStartOffset(); long bytesToRead = Math.min(remaining, blk.getBlockSize() - targetStart); ByteBuffer bb = fetchBlockByteRangeScatterGather(blk, targetStart, bytesToRead); results.add(bb); remaining -= bytesToRead; position += bytesToRead; } assert remaining == 0 : "Wrong number of bytes read."; if (dfsClient.stats != null) { dfsClient.stats.incrementBytesRead(realLen); } long timeval = System.currentTimeMillis() - start; dfsClient.metrics.incPreadTime(timeval); dfsClient.metrics.incPreadSize(realLen); dfsClient.metrics.incPreadOps(); return results; }
java
{ "resource": "" }
q161962
DFSInputStream.seek
train
@Override public synchronized void seek(long targetPos) throws IOException { if (targetPos > getFileLength()) { throw new IOException("Cannot seek after EOF"); } boolean done = false; if (pos <= targetPos && targetPos <= blockEnd) { // // If this seek is to a positive position in the current // block, and this piece of data might already be lying in // the TCP buffer, then just eat up the intervening data. // int diff = (int)(targetPos - pos); if (diff <= DFSClient.TCP_WINDOW_SIZE) { try { pos += blockReader.skip(diff); if (pos == targetPos) { done = true; } } catch (IOException e) {//make following read to retry dfsClient.incReadExpCntToStats(); DFSClient.LOG.debug("Exception while seek to " + targetPos + " from " + currentBlock +" of " + src + " from " + currentNode + ": " + StringUtils.stringifyException(e)); } } else { if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("seek() out of TCP buffer " + " block " + currentBlock + " current pos: " + pos + " target pos: " + targetPos); } } } if (!done) { pos = targetPos; blockEnd = -1; } }
java
{ "resource": "" }
q161963
DFSInputStream.seekToNewSource
train
public synchronized boolean seekToNewSource(long targetPos, boolean throwWhenNotFound) throws IOException { boolean markedDead = deadNodes.containsKey(currentNode); addToDeadNodes(currentNode); DatanodeInfo oldNode = currentNode; DatanodeInfo newNode = blockSeekTo(targetPos, throwWhenNotFound); if (!markedDead) { /* remove it from deadNodes. blockSeekTo could have cleared * deadNodes and added currentNode again. Thats ok. */ deadNodes.remove(oldNode); } if (!oldNode.getStorageID().equals(newNode.getStorageID())) { currentNode = newNode; return true; } else { return false; } }
java
{ "resource": "" }
q161964
TaskTrackerStatus.isTaskRunning
train
private boolean isTaskRunning(TaskStatus taskStatus) { TaskStatus.State state = taskStatus.getRunState(); return (state == State.RUNNING || state == State.UNASSIGNED || taskStatus.inTaskCleanupPhase()); }
java
{ "resource": "" }
q161965
TaskTrackerStatus.countMapTasks
train
public int countMapTasks() { int mapCount = 0; for (Iterator<TaskStatus> it = taskReports.iterator(); it.hasNext();) { TaskStatus ts = (TaskStatus) it.next(); if (ts.getIsMap() && isTaskRunning(ts)) { mapCount++; } } return mapCount; }
java
{ "resource": "" }
q161966
TaskTrackerStatus.countOccupiedMapSlots
train
public int countOccupiedMapSlots() { int mapSlotsCount = 0; for (Iterator<TaskStatus> it = taskReports.iterator(); it.hasNext();) { TaskStatus ts = (TaskStatus) it.next(); if (ts.getIsMap() && isTaskRunning(ts)) { mapSlotsCount += ts.getNumSlots(); } } return mapSlotsCount; }
java
{ "resource": "" }
q161967
TaskTrackerStatus.countReduceTasks
train
public int countReduceTasks() { int reduceCount = 0; for (Iterator<TaskStatus> it = taskReports.iterator(); it.hasNext();) { TaskStatus ts = (TaskStatus) it.next(); if ((!ts.getIsMap()) && isTaskRunning(ts)) { reduceCount++; } } return reduceCount; }
java
{ "resource": "" }
q161968
TaskTrackerStatus.countOccupiedReduceSlots
train
public int countOccupiedReduceSlots() { int reduceSlotsCount = 0; for (Iterator<TaskStatus> it = taskReports.iterator(); it.hasNext();) { TaskStatus ts = (TaskStatus) it.next(); if ((!ts.getIsMap()) && isTaskRunning(ts)) { reduceSlotsCount += ts.getNumSlots(); } } return reduceSlotsCount; }
java
{ "resource": "" }
q161969
BinaryRecordInput.get
train
public static BinaryRecordInput get(DataInput inp) { BinaryRecordInput bin = (BinaryRecordInput) bIn.get(); bin.setDataInput(inp); return bin; }
java
{ "resource": "" }
q161970
NameNodeSafeModeInfo.canLeave
train
@Override public boolean canLeave() { if (reached == 0) { return false; } if (namesystem.now() - reached < extension) { reportStatus("STATE* Safe mode ON.", false); return false; } return !needEnter(); }
java
{ "resource": "" }
q161971
NameNodeSafeModeInfo.reportStatus
train
private void reportStatus(String msg, boolean rightNow) { long curTime = FSNamesystem.now(); if (!rightNow && (curTime - lastStatusReport < 20 * 1000)) { return; } FLOG.info(msg + " \n" + getTurnOffTip()); lastStatusReport = curTime; }
java
{ "resource": "" }
q161972
NameNodeSafeModeInfo.isConsistent
train
private boolean isConsistent() { if (this.reached < 0) { return true; // Safemode is off. } if (namesystem.getTotalBlocks() == -1 && namesystem.getSafeBlocks() == -1) { return true; // manual safe mode } long activeBlocks = namesystem.getBlocksTotal() - namesystem.getPendingDeletionBlocks(); return (namesystem.getTotalBlocks() == activeBlocks) || (namesystem.getSafeBlocks() >= 0 && namesystem.getSafeBlocks() <= namesystem .getTotalBlocks()); }
java
{ "resource": "" }
q161973
ReduceContext.nextKey
train
public boolean nextKey() throws IOException,InterruptedException { while (hasMore && nextKeyIsSame) { nextKeyValue(); } if (hasMore) { if (inputKeyCounter != null) { inputKeyCounter.increment(1); } return nextKeyValue(); } else { return false; } }
java
{ "resource": "" }
q161974
FSImageCompression.createCompression
train
static FSImageCompression createCompression(Configuration conf, boolean forceUncompressed) throws IOException { boolean compressImage = (!forceUncompressed) && conf.getBoolean( HdfsConstants.DFS_IMAGE_COMPRESS_KEY, HdfsConstants.DFS_IMAGE_COMPRESS_DEFAULT); if (!compressImage) { return createNoopCompression(); } String codecClassName = conf.get( HdfsConstants.DFS_IMAGE_COMPRESSION_CODEC_KEY, HdfsConstants.DFS_IMAGE_COMPRESSION_CODEC_DEFAULT); return createCompression(conf, codecClassName); }
java
{ "resource": "" }
q161975
FSImageCompression.readCompressionHeader
train
public static FSImageCompression readCompressionHeader( Configuration conf, DataInputStream dis) throws IOException { boolean isCompressed = dis.readBoolean(); if (!isCompressed) { return createNoopCompression(); } else { String codecClassName = Text.readString(dis); return createCompression(conf, codecClassName); } }
java
{ "resource": "" }
q161976
FSImageCompression.unwrapInputStream
train
public InputStream unwrapInputStream(InputStream is) throws IOException { if (imageCodec != null) { return imageCodec.createInputStream(is); } else { return is; } }
java
{ "resource": "" }
q161977
FSImageCompression.writeHeaderAndWrapStream
train
DataOutputStream writeHeaderAndWrapStream(OutputStream os) throws IOException { DataOutputStream dos = new DataOutputStream(os); dos.writeBoolean(imageCodec != null); if (imageCodec != null) { String codecClassName = imageCodec.getClass().getCanonicalName(); Text.writeString(dos, codecClassName); return new DataOutputStream(imageCodec.createOutputStream(os)); } else { // use a buffered output stream return new DataOutputStream(new BufferedOutputStream(os)); } }
java
{ "resource": "" }
q161978
TaskID.downgrade
train
public static TaskID downgrade(org.apache.hadoop.mapreduce.TaskID old) { if (old instanceof TaskID) { return (TaskID) old; } else { return new TaskID(JobID.downgrade(old.getJobID()), old.isMap(), old.getId()); } }
java
{ "resource": "" }
q161979
Standby.quiesceIngestWithReprocess
train
private void quiesceIngestWithReprocess() throws IOException { if (ingest != null) { LOG.info("Standby: Quiescing - quiescing ongoing ingest"); quiesceIngest(); reprocessCurrentSegmentIfNeeded(ingest.getIngestStatus()); } }
java
{ "resource": "" }
q161980
Standby.quiesceIngest
train
private void quiesceIngest() throws IOException { InjectionHandler.processEvent(InjectionEvent.STANDBY_QUIESCE_INGEST); synchronized (ingestStateLock) { assertState(StandbyIngestState.INGESTING_EDITS, StandbyIngestState.NOT_INGESTING); ingest.quiesce(); } try { ingestThread.join(); currentIngestState = StandbyIngestState.NOT_INGESTING; LOG.info("Standby: Quiesce - Ingest thread for segment: " + ingest.toString() + " exited."); } catch (InterruptedException e) { LOG.info("Standby: Quiesce - Ingest thread interrupted."); throw new IOException(e.getMessage()); } }
java
{ "resource": "" }
q161981
Standby.instantiateIngest
train
private void instantiateIngest() throws IOException { InjectionHandler.processEvent(InjectionEvent.STANDBY_INSTANTIATE_INGEST); try { synchronized (ingestStateLock) { if (checkIngestState()) { LOG.info("Standby: Ingest for txid: " + currentSegmentTxId + " is already running"); return; } assertState(StandbyIngestState.NOT_INGESTING); ingest = new Ingest(this, fsnamesys, confg, currentSegmentTxId); ingestThread = new Thread(ingest); ingestThread.setName("Ingest_for_" + currentSegmentTxId); ingestThread.start(); currentIngestState = StandbyIngestState.INGESTING_EDITS; } LOG.info("Standby: Instatiated ingest for txid: " + currentSegmentTxId); } catch (IOException e) { setIngestFailures(ingestFailures + 1); currentIngestState = StandbyIngestState.NOT_INGESTING; throw e; } }
java
{ "resource": "" }
q161982
Standby.reprocessCurrentSegmentIfNeeded
train
private void reprocessCurrentSegmentIfNeeded(boolean status) throws IOException { if (status) { return; } assertState(StandbyIngestState.NOT_INGESTING); LOG.info("Standby: Quiesce - reprocessing edits segment starting at: " + currentSegmentTxId); instantiateIngest(); quiesceIngest(); // verify that the entire transaction log was truly consumed // when re-processing, if we fail here, we cannot do anything // better than fail if (!ingest.getIngestStatus()) { String emsg = "Standby: Quiesce could not successfully ingest " + "transaction log starting at " + currentSegmentTxId; LOG.warn(emsg); setIngestFailures(ingestFailures + 1); throw new IOException(emsg); } }
java
{ "resource": "" }
q161983
Standby.triggerCheckpoint
train
void triggerCheckpoint(boolean uncompressed) throws IOException { String pref = "Standby: Checkpoint - "; LOG.info(pref + "triggering checkpoint manually"); // check error conditions if (uncompressed) { throwIOException(pref + " uncompressed option not supported", null); } if (manualCheckpointLatch.getCount() > 0) { throwIOException(pref + "Another manual checkpoint is in progress", null); } // set the manual checkpoint latch manualCheckpointLatch = new CountDownLatch(2); lastCheckpointTime = delayedScheduledCheckpointTime = 0; try { manualCheckpointLatch.await(); } catch (InterruptedException e) { throwIOException(pref + "interrupted when performing manual checkpoint", e); } // check if checkpoint succeeded if (checkpointFailures > 0) { throwIOException(pref + "manual checkpoint failed", null); } LOG.info(pref + "manual checkpoint done"); }
java
{ "resource": "" }
q161984
Standby.handleCheckpointFailure
train
private void handleCheckpointFailure() { setCheckpointFailures(checkpointFailures + 1); if (checkpointFailures > MAX_CHECKPOINT_FAILURES) { LOG.fatal("Standby: Checkpointing - standby failed to checkpoint in " + checkpointFailures + " attempts. Aborting"); } else { // We want to give some time for some transition error to recover // and DNS caching to expire. This is mainly for small clusters // where checkpoint can be very fast. Doesn't hurt if we sleep // on large clusters too. // LOG.info("Sleeping " + CHECKPOINT_SLEEP_BEFORE_RETRY + " msecs before retry checkpoints..."); try { Thread.sleep(CHECKPOINT_SLEEP_BEFORE_RETRY); return; } catch (InterruptedException ie) { LOG.warn("Standby: Checkpointing - Thread interrupted" + " while sleeping before a retry.", ie); } } FSEditLog.runtime.exit(-1); }
java
{ "resource": "" }
q161985
Standby.uploadImage
train
private void uploadImage(long txid) throws IOException { final long start = AvatarNode.now(); LOG.info("Standby: Checkpointing - Upload fsimage to remote namenode."); checkpointStatus("Image upload started"); imageUploader = new ImageUploader(txid); imageUploader.start(); // wait for the upload to complete while (running && !imageUploader.done && AvatarNode.now() - start < MAX_CHECKPOINT_UPLOAD_TIMEOUT) { try { imageUploader.join(3000); } catch (InterruptedException ie) { LOG.error("Reveived interruption when uploading image for txid: " + txid); Thread.currentThread().interrupt(); throw (IOException) new InterruptedIOException().initCause(ie); } } if (!running || !imageUploader.succeeded) { InjectionHandler.processEvent(InjectionEvent.STANDBY_UPLOAD_FAIL); throw new IOException( "Standby: Checkpointing - Image upload failed (time= " + (AvatarNode.now() - start) + " ms).", imageUploader.error); } imageUploader = null; LOG.info("Standby: Checkpointing - Upload fsimage to remote namenode DONE."); checkpointStatus("Image upload completed"); }
java
{ "resource": "" }
q161986
Standby.putFSImage
train
private void putFSImage(long txid) throws IOException { TransferFsImage.uploadImageFromStorage(fsName, machineName, infoPort, fsImage.storage, txid); }
java
{ "resource": "" }
q161987
Standby.checkImageValidation
train
private void checkImageValidation() throws IOException { try { imageValidator.join(); } catch (InterruptedException ie) { throw (IOException) new InterruptedIOException().initCause(ie); } if (!imageValidator.succeeded) { throw new IOException("Image file validation failed", imageValidator.error); } }
java
{ "resource": "" }
q161988
Standby.createImageValidation
train
private void createImageValidation(File imageFile) throws IOException { synchronized (imageValidatorLock) { InjectionHandler.processEvent(InjectionEvent.STANDBY_VALIDATE_CREATE); if (!running) { // fails the checkpoint InjectionHandler.processEvent(InjectionEvent.STANDBY_VALIDATE_CREATE_FAIL); throw new IOException("Standby: standby is quiescing"); } imageValidator = new ImageValidator(imageFile); imageValidator.start(); } }
java
{ "resource": "" }
q161989
Standby.interruptImageValidation
train
private void interruptImageValidation() throws IOException { synchronized (imageValidatorLock) { if (imageValidator != null) { imageValidator.interrupt(); try { imageValidator.join(); } catch (InterruptedException e) { throw new InterruptedIOException("Standby: received interruption"); } } } }
java
{ "resource": "" }
q161990
Standby.initSecondary
train
void initSecondary(Configuration conf) throws IOException { fsName = AvatarNode.getRemoteNamenodeHttpName(conf, avatarNode.getInstanceId()); // Initialize other scheduling parameters from the configuration checkpointEnabled = conf.getBoolean("fs.checkpoint.enabled", false); checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600); checkpointTxnCount = NNStorageConfiguration.getCheckpointTxnCount(conf); delayedScheduledCheckpointTime = conf.getBoolean("fs.checkpoint.delayed", false) ? AvatarNode.now() + checkpointPeriod * 1000 : 0; // initialize the webserver for uploading files. String infoAddr = NetUtils.getServerAddress(conf, "dfs.secondary.info.bindAddress", "dfs.secondary.info.port", "dfs.secondary.http.address"); InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); String infoBindIpAddress = infoSocAddr.getAddress().getHostAddress(); int tmpInfoPort = infoSocAddr.getPort(); infoServer = new HttpServer("secondary", infoBindIpAddress, tmpInfoPort, tmpInfoPort == 0, conf); infoServer.setAttribute("name.system.image", fsImage); this.infoServer.setAttribute("name.conf", conf); infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class); infoServer.start(); avatarNode.httpServer.setAttribute("avatar.node", avatarNode); avatarNode.httpServer.addInternalServlet("outstandingnodes", "/outstandingnodes", OutStandingDatanodesServlet.class); // The web-server port can be ephemeral... ensure we have the correct info infoPort = infoServer.getPort(); conf.set("dfs.secondary.http.address", infoBindIpAddress + ":" +infoPort); LOG.info("Secondary Web-server up at: " + infoBindIpAddress + ":" +infoPort); LOG.warn("Checkpoint Period :" + checkpointPeriod + " secs " + "(" + checkpointPeriod/60 + " min)"); if (delayedScheduledCheckpointTime > 0) { LOG.warn("Standby: Checkpointing will be delayed by: " + checkpointPeriod + " seconds"); } LOG.warn("Log Size Trigger :" + checkpointTxnCount + " transactions."); }
java
{ "resource": "" }
q161991
Standby.assertState
train
private void assertState(StandbyIngestState... expectedStates) throws IOException { for (StandbyIngestState s : expectedStates) { if (currentIngestState == s) return; } throw new IOException("Standby: illegal state - current: " + currentIngestState); }
java
{ "resource": "" }
q161992
FSInputStream.readFullyScatterGather
train
public List<ByteBuffer> readFullyScatterGather(long position, int length) throws IOException { byte[] buf = new byte[length]; readFully(position, buf, 0, length); LinkedList<ByteBuffer> results = new LinkedList<ByteBuffer>(); results.add(ByteBuffer.wrap(buf, 0, length)); return results; }
java
{ "resource": "" }
q161993
EditLogOutputStream.flush
train
public void flush(boolean durable) throws IOException { numSync++; long start = System.nanoTime(); flushAndSync(durable); long time = DFSUtil.getElapsedTimeMicroSeconds(start); totalTimeSync += time; if (sync != null) { sync.inc(time); } }
java
{ "resource": "" }
q161994
RawLocalFileSystem.setOwner
train
@Override public void setOwner(Path p, String username, String groupname ) throws IOException { if (username == null && groupname == null) { throw new IOException("username == null && groupname == null"); } if (username == null) { execCommand(pathToFile(p), Shell.SET_GROUP_COMMAND, groupname); } else { //OWNER[:[GROUP]] String s = username + (groupname == null? "": ":" + groupname); execCommand(pathToFile(p), Shell.SET_OWNER_COMMAND, s); } }
java
{ "resource": "" }
q161995
RawLocalFileSystem.setPermission
train
@Override public void setPermission(Path p, FsPermission permission ) throws IOException { FsAction user = permission.getUserAction(); FsAction group = permission.getGroupAction(); FsAction other = permission.getOtherAction(); File f = pathToFile(p); // Fork chmod if group and other permissions are different... if (group != other) { execSetPermission(f, permission); return; } boolean rv = true; // read perms rv = f.setReadable(group.implies(FsAction.READ), false); checkReturnValue(rv, p, permission); if (group.implies(FsAction.READ) != user.implies(FsAction.READ)) { f.setReadable(user.implies(FsAction.READ), true); checkReturnValue(rv, p, permission); } // write perms rv = f.setWritable(group.implies(FsAction.WRITE), false); checkReturnValue(rv, p, permission); if (group.implies(FsAction.WRITE) != user.implies(FsAction.WRITE)) { f.setWritable(user.implies(FsAction.WRITE), true); checkReturnValue(rv, p, permission); } // exec perms rv = f.setExecutable(group.implies(FsAction.EXECUTE), false); checkReturnValue(rv, p, permission); if (group.implies(FsAction.EXECUTE) != user.implies(FsAction.EXECUTE)) { f.setExecutable(user.implies(FsAction.EXECUTE), true); checkReturnValue(rv, p, permission); } }
java
{ "resource": "" }
q161996
OfflineAnonymizer.anonymize
train
public void anonymize() throws Exception { EventRecord er = null; SerializedRecord sr = null; BufferedWriter bfw = new BufferedWriter(new FileWriter(logfile.getName() + ".anonymized")); System.out.println("Anonymizing log records..."); while ((er = parser.getNext()) != null) { if (er.isValid()) { sr = new SerializedRecord(er); Anonymizer.anonymize(sr); bfw.write(LocalStore.pack(sr).toString()); bfw.write(LocalStore.RECORD_SEPARATOR); } } bfw.flush(); bfw.close(); System.out.println("Anonymized log records written to " + logfile.getName() + ".anonymized"); System.out.println("Compressing output file..."); LocalStore.zipCompress(logfile.getName() + ".anonymized"); System.out.println("Compressed output file written to " + logfile.getName() + ".anonymized" + LocalStore.COMPRESSION_SUFFIX); }
java
{ "resource": "" }
q161997
OneSidedPentomino.initializePieces
train
protected void initializePieces() { pieces.add(new Piece("x", " x /xxx/ x ", false, oneRotation)); pieces.add(new Piece("v", "x /x /xxx", false, fourRotations)); pieces.add(new Piece("t", "xxx/ x / x ", false, fourRotations)); pieces.add(new Piece("w", " x/ xx/xx ", false, fourRotations)); pieces.add(new Piece("u", "x x/xxx", false, fourRotations)); pieces.add(new Piece("i", "xxxxx", false, twoRotations)); pieces.add(new Piece("f", " xx/xx / x ", false, fourRotations)); pieces.add(new Piece("p", "xx/xx/x ", false, fourRotations)); pieces.add(new Piece("z", "xx / x / xx", false, twoRotations)); pieces.add(new Piece("n", "xx / xxx", false, fourRotations)); pieces.add(new Piece("y", " x /xxxx", false, fourRotations)); pieces.add(new Piece("l", " x/xxxx", false, fourRotations)); pieces.add(new Piece("F", "xx / xx/ x ", false, fourRotations)); pieces.add(new Piece("P", "xx/xx/ x", false, fourRotations)); pieces.add(new Piece("Z", " xx/ x /xx ", false, twoRotations)); pieces.add(new Piece("N", " xx/xxx ", false, fourRotations)); pieces.add(new Piece("Y", " x /xxxx", false, fourRotations)); pieces.add(new Piece("L", "x /xxxx", false, fourRotations)); }
java
{ "resource": "" }
q161998
OneSidedPentomino.main
train
public static void main(String[] args) { Pentomino model = new OneSidedPentomino(3, 30); int solutions = model.solve(); System.out.println(solutions + " solutions found."); }
java
{ "resource": "" }
q161999
CoronaStateUpdate.get
train
private <T> T get(Class<T> clazz) { try { return clazz.cast(get()); } catch (ClassCastException e) { return null; } }
java
{ "resource": "" }