_id
stringlengths
2
7
title
stringlengths
3
140
partition
stringclasses
3 values
text
stringlengths
73
34.1k
language
stringclasses
1 value
meta_information
dict
q160700
NodeManager.getAllocatedCpuForType
train
public int getAllocatedCpuForType(ResourceType type) { int total = 0; for (ClusterNode node: nameToNode.values()) { synchronized (node) { if (node.deleted) { continue; } total += node.getAllocatedCpuForType(type); } } return total; }
java
{ "resource": "" }
q160701
NodeManager.getFreeNodesForType
train
public List<String> getFreeNodesForType(ResourceType type) { ArrayList<String> freeNodes = new ArrayList<String>(); for (Map.Entry<String, ClusterNode> entry: nameToNode.entrySet()) { ClusterNode node = entry.getValue(); synchronized (node) { if (!node.deleted && node.getMaxCpuForType(type) > node.getAllocatedCpuForType(type)) { freeNodes.add(entry.getKey() + ": " + node.getFree().toString()); } } } return freeNodes; }
java
{ "resource": "" }
q160702
NodeManager.nodeFeedback
train
public void nodeFeedback( String handle, List<ResourceType> resourceTypes, List<NodeUsageReport> reportList) { // Iterate over each report. for (NodeUsageReport usageReport : reportList) { faultManager.nodeFeedback(usageReport.getNodeName(), resourceTypes, usageReport); } }
java
{ "resource": "" }
q160703
NodeManager.blacklistNode
train
void blacklistNode(String nodeName, ResourceType resourceType) { LOG.info("Node " + nodeName + " has been blacklisted for resource " + resourceType); clusterManager.getMetrics().setBlacklistedNodes( faultManager.getBlacklistedNodeCount()); deleteAppFromNode(nodeName, resourceType); }
java
{ "resource": "" }
q160704
NodeManager.resolve
train
public RequestedNode resolve(String host, ResourceType type) { RunnableIndices indices = typeToIndices.get(type); return indices.getOrCreateRequestedNode(host); }
java
{ "resource": "" }
q160705
NodeManager.resetNodesLastHeartbeatTime
train
public void resetNodesLastHeartbeatTime() { long now = ClusterManager.clock.getTime(); for (ClusterNode node : nameToNode.values()) { node.lastHeartbeatTime = now; } }
java
{ "resource": "" }
q160706
NodeManager.write
train
public void write(JsonGenerator jsonGenerator) throws IOException { jsonGenerator.writeStartObject(); // nameToNode begins jsonGenerator.writeFieldName("nameToNode"); jsonGenerator.writeStartObject(); for (Map.Entry<String, ClusterNode> entry : nameToNode.entrySet()) { jsonGenerator.writeFieldName(entry.getKey()); entry.getValue().write(jsonGenerator); } jsonGenerator.writeEndObject(); // nameToNode ends // hostsToSessions begins // We create a new Map of type <ClusterNode.name, Set<SessionIds>>. // The original hostsToSessions map has the ClusterNode as its key, and // we do not need to persist the entire ClusterNode again, since we have // already done that with nameToNode. Map<String, Set<String>> hostsToSessionsMap = new HashMap<String, Set<String>>(); for (Map.Entry<ClusterNode, Set<String>> entry : hostsToSessions.entrySet()) { hostsToSessionsMap.put(entry.getKey().getName(), entry.getValue()); } jsonGenerator.writeObjectField("hostsToSessions", hostsToSessionsMap); // hostsToSessions ends jsonGenerator.writeObjectField("nameToApps", nameToApps); // faultManager is not required // We can rebuild the loadManager jsonGenerator.writeEndObject(); }
java
{ "resource": "" }
q160707
ReceivedBlockInfo.setDelHints
train
public void setDelHints(String delHints) { if (delHints == null || delHints.isEmpty()) { throw new IllegalArgumentException("DelHints is empty"); } this.delHints = delHints; }
java
{ "resource": "" }
q160708
PipesMapRunner.run
train
@SuppressWarnings("unchecked") public void run(RecordReader<K1, V1> input, OutputCollector<K2, V2> output, Reporter reporter) throws IOException { Application<K1, V1, K2, V2> application = null; try { RecordReader<FloatWritable, NullWritable> fakeInput = (!Submitter.getIsJavaRecordReader(job) && !Submitter.getIsJavaMapper(job)) ? (RecordReader<FloatWritable, NullWritable>) input : null; application = new Application<K1, V1, K2, V2>(job, fakeInput, output, reporter, (Class<? extends K2>) job.getOutputKeyClass(), (Class<? extends V2>) job.getOutputValueClass()); } catch (InterruptedException ie) { throw new RuntimeException("interrupted", ie); } DownwardProtocol<K1, V1> downlink = application.getDownlink(); boolean isJavaInput = Submitter.getIsJavaRecordReader(job); downlink.runMap(reporter.getInputSplit(), job.getNumReduceTasks(), isJavaInput); boolean skipping = job.getBoolean("mapred.skip.on", false); try { if (isJavaInput) { // allocate key & value instances that are re-used for all entries K1 key = input.createKey(); V1 value = input.createValue(); downlink.setInputTypes(key.getClass().getName(), value.getClass().getName()); while (input.next(key, value)) { // map pair to output downlink.mapItem(key, value); if(skipping) { //flush the streams on every record input if running in skip mode //so that we don't buffer other records surrounding a bad record. downlink.flush(); } } downlink.endOfInput(); } application.waitForFinish(); } catch (Throwable t) { application.abort(t); } finally { application.cleanup(); } }
java
{ "resource": "" }
q160709
DFSck.printUsage
train
static void printUsage() { System.err.println("Usage: DFSck <path> [-list-corruptfileblocks | " + "[-move | -delete | -openforwrite ] " + "[-files [-blocks [-locations | -racks]]]] " + "[-limit <limit>] [-service serviceName]" + "[-(zero/one)]"); System.err.println("\t<path>\tstart checking from this path"); System.err.println("\t-move\tmove corrupted files to /lost+found"); System.err.println("\t-delete\tdelete corrupted files"); System.err.println("\t-files\tprint out files being checked"); System.err.println("\t-openforwrite\tprint out files opened for write"); System.err.println("\t-list-corruptfileblocks\tprint out list of missing " + "blocks and files they belong to"); System.err.println("\t-blocks\tprint out block report"); System.err.println("\t-locations\tprint out locations for every block"); System.err.println("\t-racks\tprint out network topology for data-node locations"); System.err.println("\t-limit\tlimit output to <limit> corrupt files. " + "The default value of the limit is 500."); System.err.println("\t\tBy default fsck ignores files opened for write, " + "use -openforwrite to report such files. They are usually " + " tagged CORRUPT or HEALTHY depending on their block " + "allocation status"); ToolRunner.printGenericCommandUsage(System.err); }
java
{ "resource": "" }
q160710
DFSck.listCorruptFileBlocks
train
private Integer listCorruptFileBlocks(String dir, int limit, String baseUrl) throws IOException { int errCode = -1; int numCorrupt = 0; int cookie = 0; String lastBlock = null; final String noCorruptLine = "has no CORRUPT files"; final String noMoreCorruptLine = "has no more CORRUPT files"; final String cookiePrefix = "Cookie:"; boolean allDone = false; while (!allDone) { final StringBuffer url = new StringBuffer(baseUrl); if (cookie > 0) { url.append("&startblockafterIndex=").append(String.valueOf(cookie)); } else if (lastBlock != null) { // for backwards compatibility purpose url.append("&startblockafter=").append(lastBlock); } URL path = new URL(url.toString()); // SecurityUtil.fetchServiceTicket(path); URLConnection connection = path.openConnection(); InputStream stream = connection.getInputStream(); BufferedReader input = new BufferedReader(new InputStreamReader(stream, "UTF-8")); try { String line = null; while ((line = input.readLine()) != null) { if (line.startsWith(cookiePrefix)){ try{ cookie = Integer.parseInt(line.split("\t")[1]); } catch (Exception e){ allDone = true; break; } continue; } if ((line.endsWith(noCorruptLine)) || (line.endsWith(noMoreCorruptLine)) || (line.endsWith(NamenodeFsck.HEALTHY_STATUS)) || (line.endsWith(NamenodeFsck.NONEXISTENT_STATUS)) || numCorrupt >= limit) { allDone = true; break; } if ((line.isEmpty()) || (line.startsWith("FSCK started by")) || (line.startsWith("Unable to locate any corrupt files under")) || (line.startsWith("The filesystem under path"))) continue; numCorrupt++; if (numCorrupt == 1) { out.println("The list of corrupt files under path '" + dir + "' are:"); } out.println(line); try { // Get the block # that we need to send in next call lastBlock = line.split("\t")[0]; } catch (Exception e) { allDone = true; break; } } } finally { input.close(); } } out.println("The filesystem under path '" + dir + "' has " + numCorrupt + " CORRUPT files"); if (numCorrupt == 0) errCode = 0; return errCode; }
java
{ "resource": "" }
q160711
DFSck.updateConfKeys
train
private static void updateConfKeys(Configuration conf, String suffix, String nameserviceId) { String value = conf.get(FSConstants.DFS_NAMENODE_HTTP_ADDRESS_KEY + suffix + (nameserviceId.isEmpty() ? "" : ("." + nameserviceId))); if (value != null) { conf.set(FSConstants.DFS_NAMENODE_HTTP_ADDRESS_KEY, value); } }
java
{ "resource": "" }
q160712
DFSck.optionExist
train
private static boolean optionExist(String args[], String opt) { for (String arg : args) { if (arg.equalsIgnoreCase(opt)) { return true; } } return false; }
java
{ "resource": "" }
q160713
Hash.parseHashType
train
public static int parseHashType(String name) { if ("jenkins".equalsIgnoreCase(name)) { return JENKINS_HASH; } else if ("murmur".equalsIgnoreCase(name)) { return MURMUR_HASH; } else { return INVALID_HASH; } }
java
{ "resource": "" }
q160714
Hash.getInstance
train
public static Hash getInstance(int type) { switch(type) { case JENKINS_HASH: return JenkinsHash.getInstance(); case MURMUR_HASH: return MurmurHash.getInstance(); default: return null; } }
java
{ "resource": "" }
q160715
NotifierUtils.getVersion
train
public static int getVersion(URI editsURI) throws IOException { if (editsURI.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) { StorageDirectory sd = new NNStorage(new StorageInfo()).new StorageDirectory( new File(editsURI.getPath())); File versionFile = sd.getVersionFile(); if (!versionFile.exists()) { throw new IOException("No VERSION file in: " + editsURI + "version file: " + versionFile ); } Properties props = Storage.getProps(versionFile); String layout = props.getProperty(Storage.LAYOUT_VERSION); if (layout == null) { throw new IOException("No layout version in: " + editsURI); } return Integer.valueOf(layout); } else { throw new IOException("Non file journals not supported yet."); } }
java
{ "resource": "" }
q160716
NotifierUtils.uriToFile
train
public static File uriToFile(URI u) throws IOException { if (!u.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) { throw new IOException("URI does not represent a file"); } return new File(u.getPath()); }
java
{ "resource": "" }
q160717
NotifierUtils.getAllAncestors
train
public static List<String> getAllAncestors(String eventPath) { // check if the path is valid. if (eventPath == null || !eventPath.startsWith(Path.SEPARATOR)) { return null; } if (eventPath.equals(Path.SEPARATOR)) { return Arrays.asList(Path.SEPARATOR); } List<String> ancestors = new ArrayList<String>(); while (eventPath.length() > 0) { ancestors.add(eventPath); eventPath = eventPath.substring(0, eventPath.lastIndexOf(Path.SEPARATOR)); } // add the root directory ancestors.add(Path.SEPARATOR); return ancestors; }
java
{ "resource": "" }
q160718
JobControl.addJob
train
synchronized public String addJob(Job aJob) { String id = this.getNextJobID(); aJob.setJobID(id); aJob.setState(Job.WAITING); this.addToQueue(aJob); return id; }
java
{ "resource": "" }
q160719
DataBlockScanner.updateBlockInfo
train
private synchronized void updateBlockInfo(LogEntry e) { BlockScanInfo info = blockMap.get(new Block(e.blockId, 0, e.genStamp)); if(info != null && e.verificationTime > 0 && info.lastScanTime < e.verificationTime) { delBlockInfo(info); info.lastScanTime = e.verificationTime; info.lastScanType = ScanType.VERIFICATION_SCAN; addBlockInfo(info); } }
java
{ "resource": "" }
q160720
DataBlockScanner.addBlock
train
synchronized void addBlock(Block block) { if (!isInitialized()) { return; } BlockScanInfo info = blockMap.get(block); if ( info != null ) { LOG.warn("Adding an already existing block " + block); delBlockInfo(info); } info = new BlockScanInfo(block); info.lastScanTime = getNewBlockScanTime(); addBlockInfo(info); adjustThrottler(); }
java
{ "resource": "" }
q160721
DataBlockScanner.deleteBlock
train
synchronized void deleteBlock(Block block) { if (!isInitialized()) { return; } BlockScanInfo info = blockMap.get(block); if ( info != null ) { delBlockInfo(info); } }
java
{ "resource": "" }
q160722
DataBlockScanner.verifyFirstBlock
train
private void verifyFirstBlock() { BlockScanInfo block = null; synchronized (this) { if ( blockInfoSet.size() > 0 ) { block = blockInfoSet.first(); } } if ( block != null ) { verifyBlock(block); processedBlocks.add(block.block.getBlockId()); } }
java
{ "resource": "" }
q160723
DistCp.checkSrcPath
train
private static void checkSrcPath(Configuration conf, List<Path> srcPaths ) throws IOException { List<IOException> rslt = new ArrayList<IOException>(); List<Path> unglobbed = new LinkedList<Path>(); for (Path p : srcPaths) { FileSystem fs = p.getFileSystem(conf); FileStatus[] inputs = fs.globStatus(p); if(inputs != null && inputs.length > 0) { for (FileStatus onePath: inputs) { unglobbed.add(onePath.getPath()); } } else { rslt.add(new IOException("Input source " + p + " does not exist.")); } } if (!rslt.isEmpty()) { throw new InvalidInputException(rslt); } srcPaths.clear(); srcPaths.addAll(unglobbed); }
java
{ "resource": "" }
q160724
DistCp.getCopier
train
public static DistCopier getCopier(final Configuration conf, final Arguments args) throws IOException { DistCopier dc = new DistCopier(conf, args); dc.setupJob(); if (dc.getJobConf() != null) { return dc; } else { return null; } }
java
{ "resource": "" }
q160725
DistCp.copy
train
static void copy(final Configuration conf, final Arguments args ) throws IOException { DistCopier copier = getCopier(conf, args); if (copier != null) { try { JobClient client = copier.getJobClient(); RunningJob job = client.submitJob(copier.getJobConf()); try { if (!client.monitorAndPrintJob(copier.getJobConf(), job)) { throw new IOException("Job failed!"); } } catch (InterruptedException ie) { Thread.currentThread().interrupt(); } copier.finalizeCopiedFiles(); } finally { copier.cleanupJob(); } } }
java
{ "resource": "" }
q160726
DistCp.run
train
public int run(String[] args) { try { copy(conf, Arguments.valueOf(args, conf)); return 0; } catch (IllegalArgumentException e) { System.err.println(StringUtils.stringifyException(e) + "\n" + usage); ToolRunner.printGenericCommandUsage(System.err); return -1; } catch (InvalidInputException e) { System.err.println(StringUtils.stringifyException(e) + "\n"); return -1; } catch (DuplicationException e) { System.err.println(StringUtils.stringifyException(e)); return DuplicationException.ERROR_CODE; } catch (RemoteException e) { final IOException unwrapped = e.unwrapRemoteException( FileNotFoundException.class, AccessControlException.class, QuotaExceededException.class); System.err.println(StringUtils.stringifyException(unwrapped)); return -3; } catch (Exception e) { System.err.println("With failures, global counters are inaccurate; " + "consider running with -i"); System.err.println("Copy failed: " + StringUtils.stringifyException(e)); return -999; } }
java
{ "resource": "" }
q160727
DistCp.makeRelative
train
static String makeRelative(Path root, Path absPath) { if (!absPath.isAbsolute()) { throw new IllegalArgumentException("!absPath.isAbsolute(), absPath=" + absPath); } String p = absPath.toUri().getPath(); StringTokenizer pathTokens = new StringTokenizer(p, "/"); for(StringTokenizer rootTokens = new StringTokenizer( root.toUri().getPath(), "/"); rootTokens.hasMoreTokens(); ) { if (!rootTokens.nextToken().equals(pathTokens.nextToken())) { return null; } } StringBuilder sb = new StringBuilder(); for(; pathTokens.hasMoreTokens(); ) { sb.append(pathTokens.nextToken()); if (pathTokens.hasMoreTokens()) { sb.append(Path.SEPARATOR); } } return sb.length() == 0? ".": sb.toString(); }
java
{ "resource": "" }
q160728
DistCp.fullyDelete
train
static void fullyDelete(String dir, Configuration conf) throws IOException { if (dir != null) { Path tmp = new Path(dir); tmp.getFileSystem(conf).delete(tmp, true); } }
java
{ "resource": "" }
q160729
DistCp.setReducerCount
train
private static int setReducerCount(long fileCount, JobConf job, JobClient client) { // calculate the max number of reducers. int numReducers = Math.max(job.getInt(MAX_REDUCE_LABEL, MAX_REDUCERS_DEFAULT), (int) (fileCount / job.getInt(MAX_FILES_PER_REDUCER_LABEL, MAX_FILES_PER_REDUCER_DEFAULT))); // make sure we at least have 1. numReducers = Math.max(numReducers, 1); job.setNumReduceTasks(numReducers); return numReducers; }
java
{ "resource": "" }
q160730
DistCp.sameFile
train
static private boolean sameFile(FileSystem srcfs, FileStatus srcstatus, FileSystem dstfs, Path dstpath, boolean skipCRCCheck) throws IOException { FileStatus dststatus; try { dststatus = dstfs.getFileStatus(dstpath); } catch(FileNotFoundException fnfe) { return false; } //same length? if (srcstatus.getLen() != dststatus.getLen()) { return false; } if (skipCRCCheck) { LOG.debug("Skipping CRC Check"); return true; } //get src checksum final FileChecksum srccs; try { srccs = srcfs.getFileChecksum(srcstatus.getPath()); } catch(FileNotFoundException fnfe) { /* * Two possible cases: * (1) src existed once but was deleted between the time period that * srcstatus was obtained and the try block above. * (2) srcfs does not support file checksum and (incorrectly) throws * FNFE, e.g. some previous versions of HftpFileSystem. * For case (1), it is okay to return true since src was already deleted. * For case (2), true should be returned. */ return true; } //compare checksums try { final FileChecksum dstcs = dstfs.getFileChecksum(dststatus.getPath()); //return true if checksum is not supported //(i.e. some of the checksums is null) return srccs == null || dstcs == null || srccs.equals(dstcs); } catch(FileNotFoundException fnfe) { return false; } }
java
{ "resource": "" }
q160731
DistCp.isAncestorPath
train
static private boolean isAncestorPath(String x, String y) { if (!y.startsWith(x)) { return false; } final int len = x.length(); return y.length() == len || y.charAt(len) == Path.SEPARATOR_CHAR; }
java
{ "resource": "" }
q160732
DistCp.checkDuplication
train
static private void checkDuplication(FileSystem fs, Path file, Path sorted, Configuration conf) throws IOException { SequenceFile.Reader in = null; try { SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs, new Text.Comparator(), Text.class, Text.class, conf); sorter.sort(file, sorted); in = new SequenceFile.Reader(fs, sorted, conf); Text prevdst = null, curdst = new Text(); Text prevsrc = null, cursrc = new Text(); for(; in.next(curdst, cursrc); ) { if (prevdst != null && curdst.equals(prevdst)) { throw new DuplicationException( "Invalid input, there are duplicated files in the sources: " + prevsrc + ", " + cursrc); } prevdst = curdst; curdst = new Text(); prevsrc = cursrc; cursrc = new Text(); } } finally { checkAndClose(in); } }
java
{ "resource": "" }
q160733
JobMonitor.getRemainingJobs
train
List<Job> getRemainingJobs() { if (mThread.isAlive()) { LOG.warn("Internal error: Polling running monitor for jobs"); } synchronized (mJobs) { return new ArrayList<Job>(mJobs); } }
java
{ "resource": "" }
q160734
HadoopThriftServer.convertLocatedBlock
train
private static LocatedBlock convertLocatedBlock(TLocatedBlock tblk) { TBlock one = tblk.block; Block hblock = new Block(one.getBlockId(), one.getNumBytes(), one.getGenerationStamp()); List<TDatanodeID> locs = tblk.location; DatanodeInfo[] dn = new DatanodeInfo[locs.size()]; for (int j = 0; j < dn.length; j++) { String name = locs.get(j).name; dn[j] = new DatanodeInfo(new DatanodeID(name, "", -1, getPort(name))); } return new LocatedBlock(hblock, dn); }
java
{ "resource": "" }
q160735
HadoopThriftServer.convertBlock
train
private static Block convertBlock(TBlock tblk) { return new Block(tblk.getBlockId(), tblk.getNumBytes(), tblk.getGenerationStamp()); }
java
{ "resource": "" }
q160736
HadoopThriftServer.createServerSocket
train
private ServerSocket createServerSocket(int port) throws IOException { try { ServerSocket sock = new ServerSocket(); // Prevent 2MSL delay problem on server restarts sock.setReuseAddress(true); // Bind to listening port if (port == 0) { sock.bind(null); serverPort = sock.getLocalPort(); } else { sock.bind(new InetSocketAddress(port)); } return sock; } catch (IOException ioe) { throw new IOException("Could not create ServerSocket on port " + port + "." + ioe); } }
java
{ "resource": "" }
q160737
DfsTask.setArgs
train
public void setArgs(String args) { for (String s : args.trim().split("\\s*,\\s*")) argv.add(s); }
java
{ "resource": "" }
q160738
DfsTask.setOut
train
public void setOut(String outprop) { this.outprop = outprop; out = new ByteArrayOutputStream(); if (outprop.equals(errprop)) err = out; }
java
{ "resource": "" }
q160739
DfsTask.setErr
train
public void setErr(String errprop) { this.errprop = errprop; err = (errprop.equals(outprop)) ? err = out : new ByteArrayOutputStream(); }
java
{ "resource": "" }
q160740
DfsTask.pushContext
train
protected void pushContext() { antOut = System.out; antErr = System.err; System.setOut(new PrintStream(out)); System.setErr(out == err ? System.out : new PrintStream(err)); }
java
{ "resource": "" }
q160741
DfsTask.popContext
train
protected void popContext() { // write output to property, if applicable if (outprop != null && !System.out.checkError()) getProject().setNewProperty(outprop, out.toString()); if (out != err && errprop != null && !System.err.checkError()) getProject().setNewProperty(errprop, err.toString()); System.setErr(antErr); System.setOut(antOut); confloader.cleanup(); confloader.setParent(null); }
java
{ "resource": "" }
q160742
ReconfigurationServlet.printConf
train
private void printConf(PrintWriter out, Reconfigurable reconf) { Configuration oldConf = reconf.getConf(); Configuration newConf = new Configuration(); if (reconf instanceof ReconfigurableBase) ((ReconfigurableBase) reconf).preProcessConfiguration(newConf); Collection<ReconfigurationUtil.PropertyChange> changes = ReconfigurationUtil.getChangedProperties(newConf, oldConf); boolean changeOK = true; out.println("<form action=\"\" method=\"post\">"); out.println("<table border=\"1\">"); out.println("<tr><th>Property</th><th>Old value</th>"); out.println("<th>New value </th></tr>"); for (ReconfigurationUtil.PropertyChange c: changes) { out.print("<tr><td>"); if (!reconf.isPropertyReconfigurable(c.prop)) { out.print("<font color=\"red\">" + StringEscapeUtils.escapeHtml(c.prop) + "</font>"); changeOK = false; } else { out.print(StringEscapeUtils.escapeHtml(c.prop)); out.print("<input type=\"hidden\" name=\"" + StringEscapeUtils.escapeHtml(c.prop) + "\" value=\"" + StringEscapeUtils.escapeHtml(c.newVal) + "\"/>"); } out.print("</td><td>" + (c.oldVal == null ? "<it>default</it>" : StringEscapeUtils.escapeHtml(c.oldVal)) + "</td><td>" + (c.newVal == null ? "<it>default</it>" : StringEscapeUtils.escapeHtml(c.newVal)) + "</td>"); out.print("</tr>\n"); } out.println("</table>"); if (!changeOK) { out.println("<p><font color=\"red\">WARNING: properties marked red" + " will not be changed until the next restart.</font></p>"); } out.println("<input type=\"submit\" value=\"Apply\" />"); out.println("</form>"); }
java
{ "resource": "" }
q160743
ReconfigurationServlet.applyChanges
train
private void applyChanges(PrintWriter out, Reconfigurable reconf, HttpServletRequest req) throws IOException, ReconfigurationException { Configuration oldConf = reconf.getConf(); Configuration newConf = new Configuration(); if (reconf instanceof ReconfigurableBase) ((ReconfigurableBase) reconf).preProcessConfiguration(newConf); Enumeration<String> params = getParams(req); synchronized(oldConf) { while (params.hasMoreElements()) { String rawParam = params.nextElement(); String param = StringEscapeUtils.unescapeHtml(rawParam); String value = StringEscapeUtils.unescapeHtml(req.getParameter(rawParam)); if (reconf instanceof ReconfigurableBase) param = ((ReconfigurableBase) reconf).preProcessKey(param); if (value != null) { if (value.equals(newConf.getRaw(param)) || value.equals("default") || value.equals("null") || value.equals("")) { if ((value.equals("default") || value.equals("null") || value.equals("")) && oldConf.getRaw(param) != null) { out.println("<p>Changed \"" + StringEscapeUtils.escapeHtml(param) + "\" from \"" + StringEscapeUtils.escapeHtml(oldConf.getRaw(param)) + "\" to default</p>"); reconf.reconfigureProperty(param, null); } else if (!value.equals("default") && !value.equals("null") && !value.equals("") && (oldConf.getRaw(param) == null || !oldConf.getRaw(param).equals(value))) { // change from default or value to different value if (oldConf.getRaw(param) == null) { out.println("<p>Changed \"" + StringEscapeUtils.escapeHtml(param) + "\" from default to \"" + StringEscapeUtils.escapeHtml(value) + "\"</p>"); } else { out.println("<p>Changed \"" + StringEscapeUtils.escapeHtml(param) + "\" from \"" + StringEscapeUtils.escapeHtml(oldConf. getRaw(param)) + "\" to \"" + StringEscapeUtils.escapeHtml(value) + "\"</p>"); } reconf.reconfigureProperty(param, value); } else { LOG.info("property " + param + " unchanged"); } } else { // parameter value != newConf value out.println("<p>\"" + StringEscapeUtils.escapeHtml(param) + "\" not changed because value has changed from \"" + StringEscapeUtils.escapeHtml(value) + "\" to \"" + StringEscapeUtils.escapeHtml(newConf.getRaw(param)) + "\" since approval</p>"); } } } } }
java
{ "resource": "" }
q160744
ProtocolCompatible.isCompatibleClientProtocol
train
public static boolean isCompatibleClientProtocol( long clientVersion, long serverVersion) { return clientVersion == serverVersion || ( ( clientVersion == ClientProtocol.OPTIMIZE_FILE_STATUS_VERSION-1 || clientVersion == ClientProtocol.OPTIMIZE_FILE_STATUS_VERSION || clientVersion == ClientProtocol.ITERATIVE_LISTING_VERSION || clientVersion == ClientProtocol.BULK_BLOCK_LOCATIONS_VERSION || clientVersion == ClientProtocol.CONCAT_VERSION || clientVersion == ClientProtocol.LIST_CORRUPT_FILEBLOCKS_VERSION || clientVersion == ClientProtocol.SAVENAMESPACE_FORCE || clientVersion == ClientProtocol.RECOVER_LEASE_VERSION || clientVersion == ClientProtocol.CLOSE_RECOVER_LEASE_VERSION ) && ( serverVersion == ClientProtocol.OPTIMIZE_FILE_STATUS_VERSION-1 || serverVersion == ClientProtocol.OPTIMIZE_FILE_STATUS_VERSION || serverVersion == ClientProtocol.ITERATIVE_LISTING_VERSION || serverVersion == ClientProtocol.BULK_BLOCK_LOCATIONS_VERSION || serverVersion == ClientProtocol.CONCAT_VERSION || serverVersion == ClientProtocol.LIST_CORRUPT_FILEBLOCKS_VERSION || serverVersion == ClientProtocol.SAVENAMESPACE_FORCE || serverVersion == ClientProtocol.RECOVER_LEASE_VERSION || serverVersion == ClientProtocol.CLOSE_RECOVER_LEASE_VERSION )); }
java
{ "resource": "" }
q160745
ProtocolCompatible.isCompatibleClientDatanodeProtocol
train
public static boolean isCompatibleClientDatanodeProtocol( long clientVersion, long serverVersion) { return clientVersion == serverVersion || ( ( clientVersion == ClientDatanodeProtocol.GET_BLOCKINFO_VERSION-1 || clientVersion == ClientDatanodeProtocol.GET_BLOCKINFO_VERSION || clientVersion == ClientDatanodeProtocol.COPY_BLOCK_VERSION ) && ( serverVersion == ClientDatanodeProtocol.GET_BLOCKINFO_VERSION-1 || serverVersion == ClientDatanodeProtocol.GET_BLOCKINFO_VERSION || serverVersion == ClientDatanodeProtocol.COPY_BLOCK_VERSION )); }
java
{ "resource": "" }
q160746
HighTideNode.stop
train
public void stop() { if (stopRequested) { return; } stopRequested = true; running = false; if (server != null) server.stop(); if (triggerThread != null) triggerThread.interrupt(); if (fileFixer != null) fileFixer.shutdown(); if (fileFixerThread != null) fileFixerThread.interrupt(); if (myMetrics != null) { myMetrics.shutdown(); } }
java
{ "resource": "" }
q160747
HighTideNode.shutdown
train
void shutdown() throws IOException, InterruptedException { configMgr.stopReload(); // stop config reloads fileFixer.shutdown(); // stop block fixer fileFixerThread.interrupt(); server.stop(); // stop http server }
java
{ "resource": "" }
q160748
HighTideNode.createHighTideNode
train
public static HighTideNode createHighTideNode(String argv[], Configuration conf) throws IOException { if (conf == null) { conf = new Configuration(); } StartupOption startOpt = parseArguments(argv); if (startOpt == null) { printUsage(); return null; } setStartupOption(conf, startOpt); HighTideNode node = new HighTideNode(conf); return node; }
java
{ "resource": "" }
q160749
FileUtil.stat2Paths
train
public static Path[] stat2Paths(FileStatus[] stats) { if (stats == null) return null; Path[] ret = new Path[stats.length]; for (int i = 0; i < stats.length; ++i) { ret[i] = stats[i].getPath(); } return ret; }
java
{ "resource": "" }
q160750
FileUtil.stat2Paths
train
public static Path[] stat2Paths(FileStatus[] stats, Path path) { if (stats == null) return new Path[]{path}; else return stat2Paths(stats); }
java
{ "resource": "" }
q160751
FileUtil.fullyDelete
train
public static boolean fullyDelete(File dir) throws IOException { boolean deleted = true; File contents[] = dir.listFiles(); if (contents != null) { for (int i = 0; i < contents.length; i++) { if (contents[i].isFile()) { if (!contents[i].delete()) { deleted = false; } } else { //try deleting the directory // this might be a symlink boolean b = false; b = contents[i].delete(); if (b){ //this was indeed a symlink or an empty directory continue; } // if not an empty directory or symlink let // fullydelete handle it. if (!fullyDelete(contents[i])) { deleted = false; } } } } return dir.delete() && deleted; }
java
{ "resource": "" }
q160752
FileUtil.fullyDelete
train
@Deprecated public static void fullyDelete(FileSystem fs, Path dir) throws IOException { fs.delete(dir, true); }
java
{ "resource": "" }
q160753
FileUtil.copy
train
public static boolean copy(FileSystem srcFS, Path src, FileSystem dstFS, Path dst, boolean deleteSource, Configuration conf) throws IOException { return copy(srcFS, src, dstFS, dst, deleteSource, true, conf); }
java
{ "resource": "" }
q160754
FileUtil.copy
train
public static boolean copy(FileSystem srcFS, Path src, FileSystem dstFS, Path dst, boolean deleteSource, boolean overwrite, boolean validate, Configuration conf, IOThrottler throttler) throws IOException { dst = checkDest(src.getName(), dstFS, dst, overwrite); FileStatus srcFileStatus = srcFS.getFileStatus(src); if (srcFileStatus == null) { throw new FileNotFoundException("File not found: " + src); } if (srcFileStatus.isDir()) { checkDependencies(srcFS, src, dstFS, dst); if (!dstFS.mkdirs(dst)) { return false; } FileStatus contents[] = srcFS.listStatus(src); for (int i = 0; i < contents.length; i++) { copy(srcFS, contents[i].getPath(), dstFS, new Path(dst, contents[i].getPath().getName()), deleteSource, overwrite, validate, conf, throttler); } } else { InputStream in=null; OutputStream out = null; try { in = srcFS.open(src); out = dstFS.create(dst, overwrite); IOUtils.copyBytes(in, out, conf.getInt("io.file.buffer.size", 4096), true, throttler); // validate file size if (validate) { InjectionHandler.processEventIO( InjectionEventCore.FILE_TRUNCATION, dstFS, dst); FileStatus dstFileStatus = dstFS.getFileStatus(dst); if (dstFileStatus == null || dstFileStatus.getLen() != srcFileStatus.getLen()) { throw new IOException("Mismatched file length: src=" + src + " dst=" + dst); } } } catch (IOException e) { IOUtils.closeStream(out); IOUtils.closeStream(in); dstFS.delete(dst, true); throw e; } } if (deleteSource) { return srcFS.delete(src, true); } else { return true; } }
java
{ "resource": "" }
q160755
FileUtil.copy
train
public static boolean copy(File src, FileSystem dstFS, Path dst, boolean deleteSource, Configuration conf) throws IOException { dst = checkDest(src.getName(), dstFS, dst, false); if (src.isDirectory()) { if (!dstFS.mkdirs(dst)) { return false; } File contents[] = src.listFiles(); for (int i = 0; i < contents.length; i++) { copy(contents[i], dstFS, new Path(dst, contents[i].getName()), deleteSource, conf); } } else if (src.isFile()) { InputStream in = null; OutputStream out =null; try { in = new FileInputStream(src); out = dstFS.create(dst); IOUtils.copyBytes(in, out, conf); } catch (IOException e) { IOUtils.closeStream( out ); IOUtils.closeStream( in ); throw e; } } else { throw new IOException(src.toString() + ": No such file or directory"); } if (deleteSource) { return FileUtil.fullyDelete(src); } else { return true; } }
java
{ "resource": "" }
q160756
FileUtil.copy
train
public static boolean copy(FileSystem srcFS, Path src, File dst, boolean deleteSource, Configuration conf) throws IOException { return copy(srcFS, src, dst, deleteSource, conf, false, 0L); }
java
{ "resource": "" }
q160757
FileUtil.getDU
train
public static long getDU(File dir) { long size = 0; if (!dir.exists()) return 0; if (!dir.isDirectory()) { return dir.length(); } else { size = dir.length(); File[] allFiles = dir.listFiles(); for (int i = 0; i < allFiles.length; i++) { size = size + getDU(allFiles[i]); } return size; } }
java
{ "resource": "" }
q160758
FileUtil.unZip
train
public static void unZip(File inFile, File unzipDir) throws IOException { Enumeration<? extends ZipEntry> entries; ZipFile zipFile = new ZipFile(inFile); try { entries = zipFile.entries(); while (entries.hasMoreElements()) { ZipEntry entry = entries.nextElement(); if (!entry.isDirectory()) { InputStream in = zipFile.getInputStream(entry); try { File file = new File(unzipDir, entry.getName()); if (!file.getParentFile().mkdirs()) { if (!file.getParentFile().isDirectory()) { throw new IOException("Mkdirs failed to create " + file.getParentFile().toString()); } } OutputStream out = new FileOutputStream(file); try { byte[] buffer = new byte[8192]; int i; while ((i = in.read(buffer)) != -1) { out.write(buffer, 0, i); } } finally { out.close(); } } finally { in.close(); } } } } finally { zipFile.close(); } }
java
{ "resource": "" }
q160759
FileUtil.unTar
train
public static void unTar(File inFile, File untarDir) throws IOException { if (!untarDir.mkdirs()) { if (!untarDir.isDirectory()) { throw new IOException("Mkdirs failed to create " + untarDir); } } StringBuffer untarCommand = new StringBuffer(); boolean gzipped = inFile.toString().endsWith("gz"); if (gzipped) { untarCommand.append(" gzip -dc '"); untarCommand.append(FileUtil.makeShellPath(inFile)); untarCommand.append("' | ("); } untarCommand.append("cd '"); untarCommand.append(FileUtil.makeShellPath(untarDir)); untarCommand.append("' ; "); untarCommand.append("tar -xf "); if (gzipped) { untarCommand.append(" -)"); } else { untarCommand.append(FileUtil.makeShellPath(inFile)); } String[] shellCmd = { "bash", "-c", untarCommand.toString() }; ShellCommandExecutor shexec = new ShellCommandExecutor(shellCmd); shexec.execute(); int exitcode = shexec.getExitCode(); if (exitcode != 0) { throw new IOException("Error untarring file " + inFile + ". Tar process exited with exit code " + exitcode); } }
java
{ "resource": "" }
q160760
FileUtil.symLink
train
public static int symLink(String target, String linkname) throws IOException{ String cmd = "ln -s " + target + " " + linkname; Process p = Runtime.getRuntime().exec(cmd, null); int returnVal = -1; try{ returnVal = p.waitFor(); } catch(InterruptedException e){ //do nothing as of yet } return returnVal; }
java
{ "resource": "" }
q160761
FileUtil.chmod
train
public static int chmod(String filename, String perm ) throws IOException, InterruptedException { return chmod(filename, perm, false); }
java
{ "resource": "" }
q160762
FileUtil.createLocalTempFile
train
public static final File createLocalTempFile(final File basefile, final String prefix, final boolean isDeleteOnExit) throws IOException { File tmp = File.createTempFile(prefix + basefile.getName(), "", basefile.getParentFile()); if (isDeleteOnExit) { tmp.deleteOnExit(); } return tmp; }
java
{ "resource": "" }
q160763
FileUtil.listStatusHelper
train
public static List<FileStatus> listStatusHelper(FileSystem fs, Path path, int depth, List<FileStatus> acc) throws IOException { FileStatus[] fileStatusResults = fs.listStatus(path); if (fileStatusResults == null) { throw new IOException("Path does not exist: " + path); } for (FileStatus f : fileStatusResults) { Path subPath = f.getPath(); if (!f.isDir()) { acc.add(f); // Accumulate all files } else { if (depth > 1) { listStatusHelper(fs, subPath, depth - 1, acc); } else { acc.add(f); // Accumulate all leaves } } } return acc; }
java
{ "resource": "" }
q160764
FileUtil.listStatusForLeafDir
train
public static void listStatusForLeafDir(FileSystem fs, FileStatus pathStatus, List<FileStatus> acc) throws IOException { if (!pathStatus.isDir()) return; FileStatus[] fileStatusResults = fs.listStatus(pathStatus.getPath()); if (fileStatusResults == null) { throw new IOException("Path does not exist: " + pathStatus.getPath()); } boolean leafDir = true; for (FileStatus f : fileStatusResults) { if (f.isDir()) { leafDir = false; listStatusForLeafDir(fs, f, acc); } } if (leafDir) { acc.add(pathStatus); // Accumulate leaf dir } }
java
{ "resource": "" }
q160765
FileUtil.replaceFile
train
public static void replaceFile(File src, File target) throws IOException { /* renameTo() has two limitations on Windows platform. * src.renameTo(target) fails if * 1) If target already exists OR * 2) If target is already open for reading/writing. */ if (!src.renameTo(target)) { int retries = 5; while (target.exists() && !target.delete() && retries-- >= 0) { try { Thread.sleep(1000); } catch (InterruptedException e) { throw new IOException("replaceFile interrupted."); } } if (!src.renameTo(target)) { throw new IOException("Unable to rename " + src + " to " + target); } } }
java
{ "resource": "" }
q160766
TupleWritable.iterator
train
public Iterator<Writable> iterator() { final TupleWritable t = this; return new Iterator<Writable>() { long i = written; long last = 0L; public boolean hasNext() { return 0L != i; } public Writable next() { last = Long.lowestOneBit(i); if (0 == last) throw new NoSuchElementException(); i ^= last; // numberOfTrailingZeros rtn 64 if lsb set return t.get(Long.numberOfTrailingZeros(last) % 64); } public void remove() { t.written ^= last; if (t.has(Long.numberOfTrailingZeros(last))) { throw new IllegalStateException("Attempt to remove non-existent val"); } } }; }
java
{ "resource": "" }
q160767
IFileInputStream.close
train
@Override public void close() throws IOException { if (currentOffset < dataLength) { byte[] t = new byte[Math.min((int) (Integer.MAX_VALUE & (dataLength - currentOffset)), 32 * 1024)]; while (currentOffset < dataLength) { int n = read(t, 0, t.length); if (0 == n) { throw new EOFException("Could not validate checksum"); } } } in.close(); }
java
{ "resource": "" }
q160768
IFileInputStream.read
train
public int read(byte[] b, int off, int len) throws IOException { if (currentOffset >= dataLength) { return -1; } return doRead(b,off,len); }
java
{ "resource": "" }
q160769
IFileInputStream.readWithChecksum
train
public int readWithChecksum(byte[] b, int off, int len) throws IOException { if (currentOffset == length) { return -1; } else if (currentOffset >= dataLength) { // If the previous read drained off all the data, then just return // the checksum now. Note that checksum validation would have // happened in the earlier read int lenToCopy = (int) (checksumSize - (currentOffset - dataLength)); if (len < lenToCopy) { lenToCopy = len; } System.arraycopy(csum, (int) (currentOffset - dataLength), b, off, lenToCopy); currentOffset += lenToCopy; return lenToCopy; } int bytesRead = doRead(b,off,len); if (currentOffset == dataLength) { if (len >= bytesRead + checksumSize) { System.arraycopy(csum, 0, b, off + bytesRead, checksumSize); bytesRead += checksumSize; currentOffset += checksumSize; } } return bytesRead; }
java
{ "resource": "" }
q160770
SchedulerForType.scheduleTasks
train
private Map<String, List<ResourceGrant>> scheduleTasks() { fullyScheduled = false; long nodeWait = configManager.getLocalityWait(type, LocalityLevel.NODE); long rackWait = configManager.getLocalityWait(type, LocalityLevel.RACK); int tasksToSchedule = configManager.getGrantsPerIteration(); Map<String, List<ResourceGrant>> sessionIdToGranted = new HashMap<String, List<ResourceGrant>>(); for (int i = 0; i < tasksToSchedule; i++) { ScheduledPair scheduled = scheduleOneTask(nodeWait, rackWait); if (scheduled == null) { // Cannot find matched request-node anymore. We are done. fullyScheduled = true; break; } List<ResourceGrant> granted = sessionIdToGranted.get(scheduled.sessionId.toString()); if (granted == null) { granted = new LinkedList<ResourceGrant>(); sessionIdToGranted.put(scheduled.sessionId.toString(), granted); } granted.add(scheduled.grant); } return sessionIdToGranted; }
java
{ "resource": "" }
q160771
SchedulerForType.scheduleOneTask
train
private ScheduledPair scheduleOneTask(long nodeWait, long rackWait) { if (!nodeManager.existRunnableNodes(type)) { return null; } Queue<PoolGroupSchedulable> poolGroupQueue = poolGroupManager.getScheduleQueue(); while (!poolGroupQueue.isEmpty()) { PoolGroupSchedulable poolGroup = poolGroupQueue.poll(); if (poolGroup.reachedMaximum()) { continue; } // Get the appropriate pool from the pool group to schedule, then // schedule the best session Queue<PoolSchedulable> poolQueue = poolGroup.getScheduleQueue(); while (!poolQueue.isEmpty()) { PoolSchedulable pool = poolQueue.poll(); if (pool.reachedMaximum()) { continue; } Queue<SessionSchedulable> sessionQueue = pool.getScheduleQueue(); while (!sessionQueue.isEmpty()) { SessionSchedulable schedulable = sessionQueue.poll(); Session session = schedulable.getSession(); long now = ClusterManager.clock.getTime(); MatchedPair pair = doMatch( schedulable, now, nodeWait, rackWait); synchronized (session) { if (session.isDeleted()) { continue; } if (pair != null) { ResourceGrant grant = commitMatchedResource(session, pair); if (grant != null) { poolGroup.incGranted(1); pool.incGranted(1); schedulable.incGranted(1); // Put back to the queue only if we scheduled successfully poolGroupQueue.add(poolGroup); poolQueue.add(pool); sessionQueue.add(schedulable); return new ScheduledPair( session.getSessionId().toString(), grant); } } } } } } return null; }
java
{ "resource": "" }
q160772
SchedulerForType.doMatch
train
private MatchedPair doMatch( SessionSchedulable schedulable, long now, long nodeWait, long rackWait) { schedulable.adjustLocalityRequirement(now, nodeWait, rackWait); for (LocalityLevel level : neededLocalityLevels) { if (level.isBetterThan(schedulable.getLastLocality())) { /** * This means that the last time we tried to schedule this session * we could not achieve the current LocalityLevel level. * Since this is the same iteration of the scheduler we do not need to * try this locality level. * The last locality level of the shcedulable is getting reset on every * iteration of the scheduler, so we will retry the better localities * in the next run of the scheduler. */ continue; } if (needLocalityCheck(level, nodeWait, rackWait) && !schedulable.isLocalityGoodEnough(level)) { break; } Session session = schedulable.getSession(); synchronized (session) { if (session.isDeleted()) { return null; } int pendingRequestCount = session.getPendingRequestCountForType(type); MatchedPair matchedPair = null; if (nodeSnapshot == null || pendingRequestCount < nodeSnapshot.getRunnableHostCount()) { matchedPair = matchNodeForSession(session, level); } else { matchedPair = matchSessionForNode(session, level); } if (matchedPair != null) { schedulable.setLocalityLevel(level); return matchedPair; } } } schedulable.startLocalityWait(now); if (LOG.isDebugEnabled()) { LOG.debug("Could not find a node for " + schedulable.getSession().getHandle()); } return null; }
java
{ "resource": "" }
q160773
SchedulerForType.matchNodeForSession
train
private MatchedPair matchNodeForSession( Session session, LocalityLevel level) { Iterator<ResourceRequestInfo> pendingRequestIterator = session.getPendingRequestIteratorForType(type); while (pendingRequestIterator.hasNext()) { ResourceRequestInfo req = pendingRequestIterator.next(); Set<String> excluded = req.getExcludeHosts(); if (req.getHosts() == null || req.getHosts().size() == 0) { // No locality requirement String host = null; ClusterNode node = nodeManager.getRunnableNode( host, LocalityLevel.ANY, type, excluded); if (node != null) { return new MatchedPair(node, req); } continue; } for (RequestedNode requestedNode : req.getRequestedNodes()) { ClusterNode node = nodeManager.getRunnableNode( requestedNode, level, type, excluded); if (node != null) { return new MatchedPair(node, req); } } } return null; }
java
{ "resource": "" }
q160774
SchedulerForType.matchSessionForNode
train
private MatchedPair matchSessionForNode( Session session, LocalityLevel level) { if (level == LocalityLevel.NODE || level == LocalityLevel.ANY) { Set<Map.Entry<String, NodeContainer>> hostNodesSet = nodeSnapshot.runnableHosts(); for (Map.Entry<String, NodeContainer> hostNodes : hostNodesSet) { Iterator<ClusterNode> clusterNodeIt = hostNodes.getValue().iterator(); while (clusterNodeIt.hasNext()) { ClusterNode node = clusterNodeIt.next(); if (!nodeManager.hasEnoughResource(node)) { continue; } ResourceRequestInfo req = null; if (level == LocalityLevel.NODE) { req = session.getPendingRequestOnHost(node.getHost(), type); } else { req = session.getPendingRequestForAny(node.getHost(), type); } if (req != null) { return new MatchedPair(node, req); } } } } else if (level == LocalityLevel.RACK) { Set<Map.Entry<Node, NodeContainer>> rackNodesSet = nodeSnapshot.runnableRacks(); for (Map.Entry<Node, NodeContainer> rackNodes: rackNodesSet) { Node rack = rackNodes.getKey(); NodeContainer nodes = rackNodes.getValue(); Iterator<ClusterNode> clusterNodeIt = nodes.iterator(); while (clusterNodeIt.hasNext()) { ClusterNode node = clusterNodeIt.next(); if (!nodeManager.hasEnoughResource(node)) { continue; } ResourceRequestInfo req = session.getPendingRequestOnRack( node.getHost(), rack, type); if (req != null) { return new MatchedPair(node, req); } } } } return null; }
java
{ "resource": "" }
q160775
SchedulerForType.needLocalityCheck
train
private boolean needLocalityCheck( LocalityLevel level, long nodeWait, long rackWait) { if (level == LocalityLevel.NODE) { return nodeWait != 0; } if (level == LocalityLevel.RACK) { return rackWait != 0; } return false; }
java
{ "resource": "" }
q160776
SchedulerForType.commitMatchedResource
train
private ResourceGrant commitMatchedResource( Session session, MatchedPair pair) { ResourceGrant grant = null; ResourceRequestInfo req = pair.req; ClusterNode node = pair.node; String appInfo = nodeManager.getAppInfo(node, type); if (appInfo != null) { if (nodeManager.addGrant(node, session.getSessionId(), req)) { // if the nodemanager can commit this grant - we are done // the commit can fail if the node has been deleted grant = new ResourceGrant(req.getId(), node.getName(), node.getAddress(), ClusterManager.clock.getTime(), req.getType()); grant.setAppInfo(appInfo); sessionManager.grantResource(session, req, grant); } } if (nodeSnapshot != null) { synchronized (node) { if (node.deleted) { nodeSnapshot.removeNode(node); } else if (!node.checkForGrant(Utilities.getUnitResourceRequest(type), nodeManager.getResourceLimit())) { nodeSnapshot.removeNode(node); } } } return grant; }
java
{ "resource": "" }
q160777
SchedulerForType.doPreemption
train
private void doPreemption() { long now = ClusterManager.clock.getTime(); if (now - lastPreemptionTime > PREEMPTION_PERIOD) { lastPreemptionTime = now; doPreemptionNow(); } }
java
{ "resource": "" }
q160778
SchedulerForType.doPreemptionNow
train
private void doPreemptionNow() { int totalShare = nodeManager.getAllocatedCpuForType(type); poolGroupManager.distributeShare(totalShare); for (PoolGroupSchedulable poolGroup : poolGroupManager.getPoolGroups()) { poolGroup.distributeShare(); } int tasksToPreempt = countTasksShouldPreempt(); if (tasksToPreempt > 0) { LOG.info("Found " + tasksToPreempt + " " + type + " tasks to preempt"); preemptTasks(tasksToPreempt); } }
java
{ "resource": "" }
q160779
SchedulerForType.preemptTasks
train
private void preemptTasks(int tasksToPreempt) { LOG.info("Start preempt " + tasksToPreempt + " for type " + type); long maxRunningTime = configManager.getPreemptedTaskMaxRunningTime(); int rounds = configManager.getPreemptionRounds(); while (tasksToPreempt > 0) { int preempted = preemptOneSession(tasksToPreempt, maxRunningTime); if (preempted == 0) { maxRunningTime *= 2; // Check for enough rounds or an overflow if (--rounds <= 0 || maxRunningTime <= 0) { LOG.warn("Cannot preempt enough " + type + " tasks " + " rounds " + configManager.getPreemptionRounds() + " maxRunningTime " + maxRunningTime + " tasks not preempted:" + tasksToPreempt); return; } } tasksToPreempt -= preempted; } }
java
{ "resource": "" }
q160780
SchedulerForType.preemptOneSession
train
private int preemptOneSession(int maxToPreemt, long maxRunningTime) { Queue<PoolGroupSchedulable> poolGroupQueue = poolGroupManager.getPreemptQueue(); while (!poolGroupQueue.isEmpty()) { PoolGroupSchedulable poolGroup = poolGroupQueue.poll(); poolGroup.distributeShare(); Queue<PoolSchedulable> poolQueue = poolGroup.getPreemptQueue(); while (!poolQueue.isEmpty()) { PoolSchedulable pool = poolQueue.poll(); pool.distributeShare(); if (!pool.isPreemptable()) { continue; } Queue<SessionSchedulable> sessionQueue = pool.getPreemptQueue(); while (!sessionQueue.isEmpty()) { SessionSchedulable schedulable = sessionQueue.poll(); try { int overScheduled = (int) (schedulable.getGranted() - schedulable.getShare()); if (overScheduled <= 0) { continue; } maxToPreemt = Math.min(maxToPreemt, overScheduled); LOG.info("Trying to preempt " + maxToPreemt + " " + type + " from " + schedulable.getSession().getHandle()); int preempted = preemptSession( schedulable, maxToPreemt, maxRunningTime); poolGroup.incGranted(-1 * preempted); pool.incGranted(-1 * preempted); schedulable.incGranted(-1 * preempted); return preempted; } catch (InvalidSessionHandle e) { LOG.warn("Invalid session handle:" + schedulable.getSession().getHandle() + " Session may be removed"); } finally { // Add back the queue so it can be further preempt for other // sessions. poolGroupQueue.add(poolGroup); poolQueue.add(pool); } } } } return 0; }
java
{ "resource": "" }
q160781
SchedulerForType.preemptSession
train
private int preemptSession(SessionSchedulable schedulable, int maxToPreemt, long maxRunningTime) throws InvalidSessionHandle { Session session = schedulable.getSession(); List<Integer> grantIds; synchronized (session) { grantIds = session.getGrantsToPreempt(maxToPreemt, maxRunningTime, type); } List<ResourceGrant> revokedGrants = sessionManager.revokeResource(session.getHandle(), grantIds); for (ResourceGrant grant : revokedGrants) { nodeManager.cancelGrant( grant.nodeName, session.getSessionId(), grant.getId()); } sessionNotifier.notifyRevokeResource( session.getHandle(), revokedGrants, true); int preempted = revokedGrants.size(); LOG.info("Preempt " + preempted + " " + type + " tasks for Session:" + session.getHandle()); return preempted; }
java
{ "resource": "" }
q160782
SchedulerForType.countTasksShouldPreempt
train
private int countTasksShouldPreempt() { int tasksToPreempt = 0; long now = ClusterManager.clock.getTime(); for (PoolGroupSchedulable poolGroup : poolGroupManager.getPoolGroups()) { for (PoolSchedulable pool : poolGroup.getPools()) { if (pool.isStarving(now)) { tasksToPreempt += Math.min(pool.getPending(), pool.getShare() - pool.getGranted()); } } } return tasksToPreempt; }
java
{ "resource": "" }
q160783
SchedulerForType.addSession
train
public void addSession(String id, Session session) { poolGroupManager.addSession(id, session); LOG.info("Session " + id + " has been added to " + type + " scheduler"); }
java
{ "resource": "" }
q160784
MapFileOutputFormat.getEntry
train
public static <K extends WritableComparable, V extends Writable> Writable getEntry(MapFile.Reader[] readers, Partitioner<K, V> partitioner, K key, V value) throws IOException { int part = partitioner.getPartition(key, value, readers.length); return readers[part].get(key, value); }
java
{ "resource": "" }
q160785
LogParser.monitor
train
public void monitor(LocalStore ls) { int in = 0; EventRecord er = null; Environment.logInfo("Started processing log..."); while ((er = getNext()) != null) { // Environment.logInfo("Processing log line:\t" + in++); if (er.isValid()) { ls.insert(er); } } PersistentState.updateState(file.getAbsolutePath(), firstLine, offset); PersistentState.writeState("conf/parsing.state"); }
java
{ "resource": "" }
q160786
LogParser.monitor
train
public EventRecord[] monitor() { ArrayList<EventRecord> recs = new ArrayList<EventRecord>(); EventRecord er; while ((er = getNext()) != null) recs.add(er); EventRecord[] T = new EventRecord[recs.size()]; return recs.toArray(T); }
java
{ "resource": "" }
q160787
LogParser.getNext
train
public EventRecord getNext() { try { String line = reader.readLine(); if (line != null) { if (firstLine == null) firstLine = new String(line); offset += line.length() + 1; return parseLine(line); } } catch (IOException e) { e.printStackTrace(); } return null; }
java
{ "resource": "" }
q160788
LogParser.checkForRotation
train
public void checkForRotation() { try { BufferedReader probe = new BufferedReader(new FileReader(file.getAbsoluteFile())); if (firstLine == null || (!firstLine.equals(probe.readLine()))) { probe.close(); // start reading the file from the beginning reader.close(); reader = new BufferedReader(new FileReader(file.getAbsoluteFile())); firstLine = null; offset = 0; } } catch (IOException e) { e.printStackTrace(); } }
java
{ "resource": "" }
q160789
MapOutputCorrectness.getPartitionStatic
train
public static int getPartitionStatic(LongWritable key, LongWritable value, int numPartitions) { return (int) (Math.abs(key.get()) % numPartitions); }
java
{ "resource": "" }
q160790
MapOutputCorrectness.getMapperId
train
private static int getMapperId(long key, int numReducers, int maxKeySpace) { key = key - getFirstSumKey(numReducers, maxKeySpace); return (int) (key / numReducers); }
java
{ "resource": "" }
q160791
MapOutputCorrectness.getAttemptId
train
public static int getAttemptId(Configuration conf) throws IllegalArgumentException { if (conf == null) { throw new NullPointerException("Conf is null"); } String taskId = conf.get("mapred.task.id"); if (taskId == null) { throw new IllegalArgumentException( "Configuration does not contain the property mapred.task.id"); } String[] parts = taskId.split("_"); if (parts.length != 6 || !parts[0].equals("attempt") || (!"m".equals(parts[3]) && !"r".equals(parts[3]))) { throw new IllegalArgumentException( "TaskAttemptId string : " + taskId + " is not properly formed"); } return Integer.parseInt(parts[5]); }
java
{ "resource": "" }
q160792
DatanodeID.updateRegInfo
train
public void updateRegInfo(DatanodeID nodeReg) { name = nodeReg.getName(); infoPort = nodeReg.getInfoPort(); ipcPort = nodeReg.getIpcPort(); // update any more fields added in future. }
java
{ "resource": "" }
q160793
ErasureCode.decodeBulk
train
public void decodeBulk(byte[][] readBufs, byte[][] writeBufs, int[] erasedLocations) throws IOException { int[] tmpInput = new int[readBufs.length]; int[] tmpOutput = new int[erasedLocations.length]; int numBytes = readBufs[0].length; for (int idx = 0; idx < numBytes; idx++) { for (int i = 0; i < tmpOutput.length; i++) { tmpOutput[i] = 0; } for (int i = 0; i < tmpInput.length; i++) { tmpInput[i] = readBufs[i][idx] & 0x000000FF; } decode(tmpInput, erasedLocations, tmpOutput); for (int i = 0; i < tmpOutput.length; i++) { writeBufs[i][idx] = (byte) tmpOutput[i]; } } }
java
{ "resource": "" }
q160794
LexicographicalComparerHolder.getBestComparer
train
static Comparer<byte[]> getBestComparer() { try { Class<?> theClass = Class.forName(UNSAFE_COMPARER_NAME); // yes, UnsafeComparer does implement Comparer<byte[]> @SuppressWarnings("unchecked") Comparer<byte[]> comparer = (Comparer<byte[]>) theClass .getEnumConstants()[0]; return comparer; } catch (Throwable t) { // ensure we really catch *everything* LOG.error("Loading lexicographicalComparerJavaImpl..."); return lexicographicalComparerJavaImpl(); } }
java
{ "resource": "" }
q160795
ContextFactory.getAttributeNames
train
public String[] getAttributeNames() { String[] result = new String[attributeMap.size()]; int i = 0; // for (String attributeName : attributeMap.keySet()) { Iterator it = attributeMap.keySet().iterator(); while (it.hasNext()) { result[i++] = (String) it.next(); } return result; }
java
{ "resource": "" }
q160796
ContextFactory.getNullContext
train
public static synchronized MetricsContext getNullContext(String contextName) { MetricsContext nullContext = nullContextMap.get(contextName); if (nullContext == null) { nullContext = new NullContext(); nullContextMap.put(contextName, nullContext); } return nullContext; }
java
{ "resource": "" }
q160797
CommandFormat.parse
train
public List<String> parse(String[] args, int pos) { List<String> parameters = new ArrayList<String>(); for(; pos < args.length; pos++) { if (args[pos].charAt(0) == '-' && args[pos].length() > 1) { String opt = args[pos].substring(1); if (options.containsKey(opt)) options.put(opt, Boolean.TRUE); else { try { Long.parseLong(args[pos]); parameters.add(args[pos]); } catch (NumberFormatException e) { throw new IllegalArgumentException("Illegal option " + args[pos]); } } } else parameters.add(args[pos]); } int psize = parameters.size(); if (psize < minPar || psize > maxPar) throw new IllegalArgumentException("Illegal number of arguments"); return parameters; }
java
{ "resource": "" }
q160798
FileContext.emitRecord
train
public void emitRecord(String contextName, String recordName, OutputRecord outRec) throws IOException { Calendar currentDate = Calendar.getInstance(); if (fileName != null) { if (currentDate.get(Calendar.DAY_OF_MONTH) != lastRecordDate.get(Calendar.DAY_OF_MONTH)) { // rotate to a new context file file = new File(getFullFileName(currentDate)); if (writer != null) writer.close(); writer = new PrintWriter(new FileWriter(file, true)); } } writer.print(recordDateFormat.format(currentDate.getTime())); writer.print(" "); writer.print(contextName); writer.print("."); writer.print(recordName); String separator = ": "; for (String tagName : outRec.getTagNames()) { writer.print(separator); separator = ", "; writer.print(tagName); writer.print("="); writer.print(outRec.getTag(tagName)); } for (String metricName : outRec.getMetricNames()) { writer.print(separator); separator = ", "; writer.print(metricName); writer.print("="); writer.print(outRec.getMetric(metricName)); } writer.println(); lastRecordDate = currentDate; }
java
{ "resource": "" }
q160799
BlockReader.adjustChecksumBytes
train
private void adjustChecksumBytes(int dataLen) { int requiredSize = ((dataLen + bytesPerChecksum - 1)/bytesPerChecksum)*checksumSize; if (checksumBytes == null || requiredSize > checksumBytes.capacity()) { checksumBytes = ByteBuffer.wrap(new byte[requiredSize]); } else { checksumBytes.clear(); } checksumBytes.limit(requiredSize); }
java
{ "resource": "" }