_id
stringlengths
2
7
title
stringlengths
3
140
partition
stringclasses
3 values
text
stringlengths
73
34.1k
language
stringclasses
1 value
meta_information
dict
q161700
CompositeInputSplit.add
train
public void add(InputSplit s) throws IOException { if (null == splits) { throw new IOException("Uninitialized InputSplit"); } if (fill == splits.length) { throw new IOException("Too many splits"); } splits[fill++] = s; totsize += s.getLength(); }
java
{ "resource": "" }
q161701
CompositeInputSplit.getLocations
train
public String[] getLocations() throws IOException { HashSet<String> hosts = new HashSet<String>(); for (InputSplit s : splits) { String[] hints = s.getLocations(); if (hints != null && hints.length > 0) { for (String host : hints) { hosts.add(host); } } } return hosts.toArray(new String[hosts.size()]); }
java
{ "resource": "" }
q161702
ConnectionWatcher.await
train
public boolean await(long timeoutMillis) throws IOException { try { return connectLatch.await(timeoutMillis, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { interruptedException( "Interrupted waiting for connection (timeout = " + timeoutMillis + "ms.)", e); return false; } }
java
{ "resource": "" }
q161703
Session.readIdToRequest
train
private void readIdToRequest(CoronaSerializer coronaSerializer) throws IOException { coronaSerializer.readField("idToRequest"); // Expecting the START_OBJECT token for idToRequest coronaSerializer.readStartObjectToken("idToRequest"); JsonToken current = coronaSerializer.nextToken(); while (current != JsonToken.END_OBJECT) { Integer id = Integer.parseInt(coronaSerializer.getFieldName()); idToRequest.put(id, new ResourceRequestInfo(coronaSerializer)); current = coronaSerializer.nextToken(); } // Done with reading the END_OBJECT token for idToRequest }
java
{ "resource": "" }
q161704
Session.readIdToPendingRequests
train
private void readIdToPendingRequests(CoronaSerializer coronaSerializer) throws IOException { coronaSerializer.readField("idToPendingRequests"); // Expecting the START_ARRAY token for idToPendingRequests coronaSerializer.readStartArrayToken("idToPendingRequests"); JsonToken current = coronaSerializer.nextToken(); while (current != JsonToken.END_ARRAY) { pendingRequestsList.add(coronaSerializer.jsonParser.getIntValue()); current = coronaSerializer.nextToken(); } // Done with reading the END_ARRAY token for idToPendingRequests }
java
{ "resource": "" }
q161705
Session.readIdToGrant
train
private void readIdToGrant(CoronaSerializer coronaSerializer) throws IOException { coronaSerializer.readField("idToGrant"); // Expecting the START_OBJECT token for idToGrant coronaSerializer.readStartObjectToken("idToGrant"); JsonToken current = coronaSerializer.nextToken(); while (current != JsonToken.END_OBJECT) { Integer id = Integer.parseInt(coronaSerializer.getFieldName()); ResourceGrant resourceGrant = coronaSerializer.readValueAs(ResourceGrant.class); idToGrant.put(id, new ResourceGrant(resourceGrant)); current = coronaSerializer.nextToken(); } // Done with reading the END_OBJECT token for idToGrant }
java
{ "resource": "" }
q161706
Session.readTypeToFirstWait
train
private void readTypeToFirstWait(CoronaSerializer coronaSerializer) throws IOException { coronaSerializer.readField("typeToFirstWait"); // Expecting the START_OBJECT token for typeToFirstWait coronaSerializer.readStartObjectToken("typeToFirstWait"); JsonToken current = coronaSerializer.nextToken(); while (current != JsonToken.END_OBJECT) { String resourceTypeStr = coronaSerializer.getFieldName(); Long wait = coronaSerializer.readValueAs(Long.class); current = coronaSerializer.nextToken(); if (wait == -1) { wait = null; } typeToFirstWait.put(ResourceType.valueOf(resourceTypeStr), wait); } // Done with reading the END_OBJECT token for typeToFirstWait }
java
{ "resource": "" }
q161707
Session.write
train
public void write(JsonGenerator jsonGenerator) throws IOException { jsonGenerator.writeStartObject(); jsonGenerator.writeFieldName("idToRequest"); jsonGenerator.writeStartObject(); for (Integer id : idToRequest.keySet()) { jsonGenerator.writeFieldName(id.toString()); idToRequest.get(id).write(jsonGenerator); } jsonGenerator.writeEndObject(); // idToPendingRequests is the same, and we only need to persist the // array of pending request ids jsonGenerator.writeFieldName("idToPendingRequests"); jsonGenerator.writeStartArray(); for (Integer id : idToPendingRequests.keySet()) { jsonGenerator.writeNumber(id); } jsonGenerator.writeEndArray(); jsonGenerator.writeFieldName("idToGrant"); jsonGenerator.writeStartObject(); for (Integer id : idToGrant.keySet()) { jsonGenerator.writeObjectField(id.toString(), idToGrant.get(id)); } jsonGenerator.writeEndObject(); jsonGenerator.writeObjectField("status", status); jsonGenerator.writeStringField("sessionId", sessionId); jsonGenerator.writeBooleanField("deleted", deleted); jsonGenerator.writeNumberField("deletedTime", deletedTime); jsonGenerator.writeObjectField("info", info); jsonGenerator.writeNumberField("startTime", startTime); jsonGenerator.writeFieldName("poolInfo"); poolInfo.write(jsonGenerator); jsonGenerator.writeFieldName("typeToFirstWait"); jsonGenerator.writeStartObject(); for (ResourceType resourceType : typeToFirstWait.keySet()) { Long wait = typeToFirstWait.get(resourceType); if (wait == null) { wait = new Long(-1); } jsonGenerator.writeNumberField(resourceType.toString(), wait); } jsonGenerator.writeEndObject(); jsonGenerator.writeObjectField("expectedInfo", expectedInfo); jsonGenerator.writeObjectField("lastHeartbeat", lastHeartbeat); jsonGenerator.writeNumberField("lastSyncTime", lastSyncTime); jsonGenerator.writeEndObject(); // No need to serialize lastHeartbeatTime, it will be reset. // typeToContext can be rebuilt }
java
{ "resource": "" }
q161708
Session.restoreAfterSafeModeRestart
train
public void restoreAfterSafeModeRestart() { for (Integer pendingRequestId : pendingRequestsList) { ResourceRequestInfo request = idToRequest.get(pendingRequestId); incrementRequestCount(request.getType(), 1); addPendingRequest(request); } for (Integer grantedRequestId : idToGrant.keySet()) { ResourceRequestInfo request = idToRequest.get(grantedRequestId); incrementRequestCount(request.getType(), 1); addGrantedRequest(request); } }
java
{ "resource": "" }
q161709
Session.updateInfoUrlAndName
train
public void updateInfoUrlAndName(String url, String name) { this.info.url = url; this.info.name = name; }
java
{ "resource": "" }
q161710
Session.getPendingRequestOnHost
train
public ResourceRequestInfo getPendingRequestOnHost( String host, ResourceType type) { Context c = getContext(type); List<ResourceRequestInfo> hostReqs = c.hostToPendingRequests.get(host); if (hostReqs != null) { // Returning the first element in the list makes the subsequent // call to Utilities.removeReference() very fast. return hostReqs.get(0); } return null; }
java
{ "resource": "" }
q161711
Session.getPendingRequestOnRack
train
public ResourceRequestInfo getPendingRequestOnRack( String host, Node rack, ResourceType type) { Context c = getContext(type); List<ResourceRequestInfo> rackReqs = c.rackToPendingRequests.get(rack); if (rackReqs != null) { for (ResourceRequestInfo req: rackReqs) { Set<String> excluded = req.getExcludeHosts(); if (!excluded.contains(host)) { return req; } } } return null; }
java
{ "resource": "" }
q161712
Session.getPendingRequestForAny
train
public ResourceRequestInfo getPendingRequestForAny( String host, ResourceType type) { Context c = getContext(type); for (ResourceRequestInfo req: c.anyHostRequests) { Set<String> excluded = req.getExcludeHosts(); if (!excluded.contains(host)) { return req; } } return null; }
java
{ "resource": "" }
q161713
Session.getContext
train
protected Context getContext(ResourceType type) { Context c = typeToContext.get(type); if (c == null) { c = new Context(); typeToContext.put(type, c); } return c; }
java
{ "resource": "" }
q161714
Session.incrementRequestCount
train
protected void incrementRequestCount(ResourceType type, int delta) { Context c = getContext(type); int newRequestCount = c.requestCount + delta; c.requestCount = newRequestCount; if (newRequestCount > c.maxConcurrentRequestCount) { c.maxConcurrentRequestCount = newRequestCount; } }
java
{ "resource": "" }
q161715
Session.addPendingRequestForType
train
protected void addPendingRequestForType(ResourceRequestInfo req) { Context c = getContext(req.getType()); c.pendingRequests.add(req); c.pendingRequestCount++; }
java
{ "resource": "" }
q161716
Session.removePendingRequestForType
train
protected void removePendingRequestForType(ResourceRequestInfo req) { Context c = getContext(req.getType()); Object removed = Utilities.removeReference(c.pendingRequests, req); if (removed != null) { c.pendingRequestCount--; } }
java
{ "resource": "" }
q161717
Session.addGrantedRequest
train
protected void addGrantedRequest(ResourceRequestInfo req) { Context c = getContext(req.getType()); c.grantedRequests.add(req); // assume a granted resource is going to be fully used c.fulfilledRequestCount++; }
java
{ "resource": "" }
q161718
Session.removeGrantedRequest
train
protected void removeGrantedRequest( ResourceRequestInfo req, boolean isRevoked) { Context c = getContext(req.getType()); Utilities.removeReference(c.grantedRequests, req); // if revoked - we didn't fulfill this request if (isRevoked) { c.fulfilledRequestCount--; c.revokedRequestCount++; } }
java
{ "resource": "" }
q161719
Session.requestResource
train
public void requestResource(List<ResourceRequestInfo> requestList) { if (deleted) { throw new RuntimeException("Session: " + sessionId + " has been deleted"); } for (ResourceRequestInfo req : requestList) { boolean newRequest = idToRequest.put(req.getId(), req) == null; if (!newRequest) { LOG.warn("Duplicate request from Session: " + sessionId + "" + " request: " + req.getId()); continue; } incrementRequestCount(req.getType(), 1); addPendingRequest(req); setTypeRequested(req.getType()); } }
java
{ "resource": "" }
q161720
Session.setTypeRequested
train
private void setTypeRequested(ResourceType type) { synchronized (typeToFirstWait) { if (!typeToFirstWait.containsKey(type)) { typeToFirstWait.put(type, null); } } }
java
{ "resource": "" }
q161721
Session.addPendingRequest
train
private void addPendingRequest(ResourceRequestInfo req) { idToPendingRequests.put(req.getId(), req); if (req.getHosts() != null && req.getHosts().size() > 0) { Context c = getContext(req.getType()); for (RequestedNode node: req.getRequestedNodes()) { String host = node.getHost(); List<ResourceRequestInfo> hostReqs = c.hostToPendingRequests.get(host); if (hostReqs == null) { hostReqs = new LinkedList<ResourceRequestInfo>(); c.hostToPendingRequests.put(host, hostReqs); } hostReqs.add(req); Node rack = node.getRack(); List<ResourceRequestInfo> rackReqs = c.rackToPendingRequests.get(rack); if (rackReqs == null) { rackReqs = new LinkedList<ResourceRequestInfo>(); c.rackToPendingRequests.put(rack, rackReqs); } rackReqs.add(req); } } // Always add to the "any" list. Context c = getContext(req.getType()); c.anyHostRequests.add(req); addPendingRequestForType(req); }
java
{ "resource": "" }
q161722
Session.removePendingRequest
train
private void removePendingRequest(ResourceRequestInfo req) { ResourceRequestInfo removed = idToPendingRequests.remove(req.getId()); if (removed != null) { if (req.getHosts() != null && req.getHosts().size() > 0) { Context c = getContext(req.getType()); for (RequestedNode node : req.getRequestedNodes()) { String host = node.getHost(); List<ResourceRequestInfo> hostReqs = c.hostToPendingRequests.get(host); Utilities.removeReference(hostReqs, req); if (hostReqs.isEmpty()) { c.hostToPendingRequests.remove(host); } Node rack = node.getRack(); List<ResourceRequestInfo> rackReqs = c.rackToPendingRequests.get(rack); Utilities.removeReference(rackReqs, req); if (rackReqs.isEmpty()) { c.rackToPendingRequests.remove(rack); } } } Context c = getContext(req.getType()); Utilities.removeReference(c.anyHostRequests, req); } removePendingRequestForType(req); }
java
{ "resource": "" }
q161723
Session.releaseResource
train
public List<ResourceGrant> releaseResource(List<Integer> idList) { if (deleted) { throw new RuntimeException("Session: " + sessionId + " has been deleted"); } List<ResourceGrant> canceledGrants = new ArrayList<ResourceGrant>(); for (Integer id : idList) { ResourceRequestInfo req = idToRequest.get(id); if (req != null) { idToRequest.remove(id); ResourceGrant grant = idToGrant.remove(id); if (grant != null) { // we have previously granted this resource, return to caller canceledGrants.add(grant); removeGrantedRequest(req, false); } else { removePendingRequest(req); } incrementRequestCount(req.getType(), -1); } } return canceledGrants; }
java
{ "resource": "" }
q161724
Session.grantResource
train
public void grantResource(ResourceRequestInfo req, ResourceGrant grant) { if (deleted) { throw new RuntimeException("Session: " + sessionId + " has been deleted"); } removePendingRequest(req); idToGrant.put(req.getId(), grant); addGrantedRequest(req); // Handle the first wait metrics synchronized (typeToFirstWait) { if (!typeToFirstWait.containsKey(req.getType())) { throw new IllegalStateException( "Impossible to get a grant prior to requesting a resource."); } Long firstWait = typeToFirstWait.get(req.getType()); if (firstWait == null) { firstWait = new Long(ClusterManager.clock.getTime() - startTime); typeToFirstWait.put(req.getType(), firstWait); } } }
java
{ "resource": "" }
q161725
Session.revokeResource
train
public List<ResourceGrant> revokeResource(List<Integer> idList) { if (deleted) { throw new RuntimeException("Session: " + sessionId + " has been deleted"); } List<ResourceGrant> canceledGrants = new ArrayList<ResourceGrant>(); for (Integer id : idList) { ResourceRequestInfo req = idToRequest.get(id); ResourceGrant grant = idToGrant.remove(id); if (grant != null) { if (req == null) { throw new RuntimeException("Session: " + sessionId + ", requestId: " + id + " grant exists but request doesn't"); } removeGrantedRequest(req, true); // we have previously granted this resource, return to caller canceledGrants.add(grant); } } return canceledGrants; }
java
{ "resource": "" }
q161726
Session.getGrantReportList
train
public List<GrantReport> getGrantReportList() { Map<Integer, GrantReport> grantReportMap = new TreeMap<Integer, GrantReport>(); for (Map.Entry<Integer, ResourceGrant> entry : idToGrant.entrySet()) { grantReportMap.put(entry.getKey(), new GrantReport(entry.getKey().intValue(), entry.getValue().getAddress().toString(), entry.getValue().getType(), entry.getValue().getGrantedTime())); } return new ArrayList<GrantReport>(grantReportMap.values()); }
java
{ "resource": "" }
q161727
Session.checkHeartbeatInfo
train
public boolean checkHeartbeatInfo(HeartbeatArgs jtInfo) { if (expectedInfo.requestId != jtInfo.requestId){ LOG.fatal("heartbeat out-of-sync:" + sessionId + " CM:" + expectedInfo.requestId + " " + expectedInfo.grantId + " JT:" + jtInfo.requestId + " " + jtInfo.grantId); return false; } if (expectedInfo.grantId == jtInfo.grantId){ // perfect match if (LOG.isDebugEnabled()) { LOG.debug("heartbeat match:" + sessionId); } lastSyncTime = System.currentTimeMillis(); lastHeartbeat.requestId = 0; lastHeartbeat.grantId = 0; return true; } // delay if ( jtInfo.grantId != lastHeartbeat.grantId) { LOG.info("heartbeat mismatch with progress:" + sessionId + " CM:" + expectedInfo.requestId + " " + expectedInfo.grantId + " JT:" + jtInfo.requestId + " " + jtInfo.grantId); lastSyncTime = System.currentTimeMillis(); lastHeartbeat.requestId = jtInfo.requestId; lastHeartbeat.grantId = jtInfo.grantId; return true; } if (System.currentTimeMillis() - lastSyncTime > maxDelay) { // no progress LOG.error("heartbeat out-of-sync:" + sessionId + " CM:" + expectedInfo.requestId + " " + expectedInfo.grantId + " JT:" + jtInfo.requestId + " " + jtInfo.grantId); return true; } LOG.info("heartbeat mismatch with no progress:" + sessionId + " CM:" + expectedInfo.requestId + " " + expectedInfo.grantId + " JT:" + jtInfo.requestId + " " + jtInfo.grantId); return true; }
java
{ "resource": "" }
q161728
Session.getGrantsToPreempt
train
public List<Integer> getGrantsToPreempt( int maxGrantsToPreempt, long maxRunningTime, ResourceType type) { if (deleted) { LOG.warn("Attempt to preempt from deleted session " + getSessionId()); return Collections.emptyList(); } List<ResourceGrant> candidates = getGrantsYoungerThan(maxRunningTime, type); List<Integer> grantIds = new ArrayList<Integer>(); if (candidates.size() <= maxGrantsToPreempt) { // In this case, we can return the whole list without sorting for (ResourceGrant grant : candidates) { grantIds.add(grant.id); } } else { sortGrantsByStartTime(candidates); for (ResourceGrant grant : candidates) { grantIds.add(grant.id); if (grantIds.size() == maxGrantsToPreempt) { break; } } } LOG.info("Found " + grantIds.size() + " " + type + " grants younger than " + maxRunningTime + " ms to preempt in " + getSessionId()); return grantIds; }
java
{ "resource": "" }
q161729
Session.getGrantsYoungerThan
train
private List<ResourceGrant> getGrantsYoungerThan( long maxRunningTime, ResourceType type) { long now = ClusterManager.clock.getTime(); List<ResourceGrant> candidates = new ArrayList<ResourceGrant>(); for (ResourceGrant grant : getGrants()) { if (now - grant.getGrantedTime() < maxRunningTime && type.equals(grant.getType())) { candidates.add(grant); } } return candidates; }
java
{ "resource": "" }
q161730
Session.sortGrantsByStartTime
train
private void sortGrantsByStartTime(List<ResourceGrant> grants) { Collections.sort(grants, new Comparator<ResourceGrant>() { @Override public int compare(ResourceGrant g1, ResourceGrant g2) { if (g1.grantedTime < g2.grantedTime) { return 1; } if (g1.grantedTime > g2.grantedTime) { return -1; } return g2.id - g1.id; } }); }
java
{ "resource": "" }
q161731
SocketIOWithTimeout.checkChannelValidity
train
static void checkChannelValidity(Object channel) throws IOException { if (channel == null) { /* Most common reason is that original socket does not have a channel. * So making this an IOException rather than a RuntimeException. */ throw new IOException("Channel is null. Check " + "how the channel or socket is created."); } if (!(channel instanceof SelectableChannel)) { throw new IOException("Channel should be a SelectableChannel"); } }
java
{ "resource": "" }
q161732
SocketIOWithTimeout.doIO
train
int doIO(ByteBuffer buf, int ops) throws IOException { /* For now only one thread is allowed. If user want to read or write * from multiple threads, multiple streams could be created. In that * case multiple threads work as well as underlying channel supports it. */ if (!buf.hasRemaining()) { throw new IllegalArgumentException("Buffer has no data left."); //or should we just return 0? } while (buf.hasRemaining()) { if (closed) { return -1; } try { int n = performIO(buf); if (n != 0) { // successful io or an error. return n; } } catch (IOException e) { if (!channel.isOpen()) { closed = true; } throw e; } //now wait for socket to be ready. int count = 0; try { count = selector.select(channel, ops, timeout); } catch (IOException e) { //unexpected IOException. closed = true; throw e; } if (count == 0) { throw new SocketTimeoutException(timeoutExceptionString(channel, timeout, ops)); } // otherwise the socket should be ready for io. } return 0; // does not reach here. }
java
{ "resource": "" }
q161733
JNStorage.findFinalizedEditsFile
train
File findFinalizedEditsFile(long startTxId, long endTxId) throws IOException { File ret = new File(sd.getCurrentDir(), NNStorage.getFinalizedEditsFileName(startTxId, endTxId)); if (!ret.exists()) { throw new IOException( "No edits file for range " + startTxId + "-" + endTxId); } return ret; }
java
{ "resource": "" }
q161734
JNStorage.getSyncLogTemporaryFile
train
File getSyncLogTemporaryFile(long segmentTxId, long endTxId, long stamp) { String name = NNStorage.getFinalizedEditsFileName(segmentTxId, endTxId) + ".tmp=" + stamp; return new File(sd.getCurrentDir(), name); }
java
{ "resource": "" }
q161735
JNStorage.getSyncLogDestFile
train
File getSyncLogDestFile(long segmentTxId, long endTxId) { String name = NNStorage.getFinalizedEditsFileName(segmentTxId, endTxId); return new File(sd.getCurrentDir(), name); }
java
{ "resource": "" }
q161736
JNStorage.purgeDataOlderThan
train
void purgeDataOlderThan(long minTxIdToKeep) throws IOException { if (isImageDir) { purgeMatching(sd.getCurrentDir(), IMAGE_CURRENT_DIR_PURGE_REGEXES, minTxIdToKeep); } else { purgeMatching(sd.getCurrentDir(), EDITS_CURRENT_DIR_PURGE_REGEXES, minTxIdToKeep); purgeMatching(getPaxosDir(), PAXOS_DIR_PURGE_REGEXES, minTxIdToKeep); } }
java
{ "resource": "" }
q161737
Gridmix.writeInputData
train
protected void writeInputData(long genbytes, Path ioPath) throws IOException, InterruptedException { final Configuration conf = getConf(); final GridmixJob genData = new GenerateData(conf, ioPath, genbytes); submitter.add(genData); LOG.info("Generating " + StringUtils.humanReadableInt(genbytes) + " of test data..."); // TODO add listeners, use for job dependencies TimeUnit.SECONDS.sleep(10); try { genData.getJob().waitForCompletion(false); } catch (ClassNotFoundException e) { throw new IOException("Internal error", e); } if (!genData.getJob().isSuccessful()) { throw new IOException("Data generation failed!"); } LOG.info("Done."); }
java
{ "resource": "" }
q161738
Gridmix.startThreads
train
private void startThreads(Configuration conf, String traceIn, Path ioPath, Path scratchDir, CountDownLatch startFlag) throws IOException { monitor = createJobMonitor(); submitter = createJobSubmitter(monitor, conf.getInt(GRIDMIX_SUB_THR, Runtime.getRuntime().availableProcessors() + 1), conf.getInt(GRIDMIX_QUE_DEP, 5), new FilePool(conf, ioPath)); factory = createJobFactory(submitter, traceIn, scratchDir, conf, startFlag); monitor.start(); submitter.start(); factory.start(); }
java
{ "resource": "" }
q161739
ClusterJspHelper.generateClusterHealthReport
train
ClusterStatus generateClusterHealthReport() { ClusterStatus cs = new ClusterStatus(); List<InetSocketAddress> isas = null; ArrayList<String> suffixes = null; if (isAvatar) { suffixes = new ArrayList<String>(); suffixes.add("0"); suffixes.add("1"); } try { cs.nnAddrs = isas = DFSUtil.getClientRpcAddresses(conf, suffixes); } catch (Exception e) { // Could not build cluster status cs.setError(e); LOG.error(e); return cs; } sort(isas); // Process each namenode and add it to ClusterStatus in parallel NameNodeStatusFetcher[] threads = new NameNodeStatusFetcher[isas.size()]; for (int i = 0; i < isas.size(); i++) { threads[i] = new NameNodeStatusFetcher(isas.get(i)); threads[i].start(); } for (NameNodeStatusFetcher thread : threads) { try { thread.join(); if (thread.e != null) { cs.addException(thread.isa.toString(), thread.e); } cs.addNamenodeStatus(thread.nn); } catch (InterruptedException ex) { LOG.warn(ex); } } return cs; }
java
{ "resource": "" }
q161740
ClusterJspHelper.generateDecommissioningReport
train
DecommissionStatus generateDecommissioningReport() { List<InetSocketAddress> isas = null; ArrayList<String> suffixes = null; if (isAvatar) { suffixes = new ArrayList<String>(); suffixes.add("0"); suffixes.add("1"); } try { isas = DFSUtil.getClientRpcAddresses(conf, suffixes); sort(isas); } catch (Exception e) { // catch any exception encountered other than connecting to namenodes DecommissionStatus dInfo = new DecommissionStatus(e); LOG.error(e); return dInfo; } // Outer map key is datanode. Inner map key is namenode and the value is // decom status of the datanode for the corresponding namenode Map<String, Map<String, String>> statusMap = new HashMap<String, Map<String, String>>(); // Map of exceptions encountered when connecting to namenode // key is namenode and value is exception Map<String, Exception> decommissionExceptions = new HashMap<String, Exception>(); List<String> unreportedNamenode = new ArrayList<String>(); DecommissionStatusFetcher[] threads = new DecommissionStatusFetcher[isas.size()]; for (int i = 0; i < isas.size(); i++) { threads[i] = new DecommissionStatusFetcher(isas.get(i), statusMap); threads[i].start(); } for (DecommissionStatusFetcher thread : threads) { try { thread.join(); if (thread.e != null) { // catch exceptions encountered while connecting to namenodes decommissionExceptions.put(thread.isa.toString(), thread.e); unreportedNamenode.add(thread.isa.toString()); } } catch (InterruptedException ex) { LOG.warn(ex); } } updateUnknownStatus(statusMap, unreportedNamenode); getDecommissionNodeClusterState(statusMap); return new DecommissionStatus(statusMap, isas, getDatanodeHttpPort(conf), decommissionExceptions); }
java
{ "resource": "" }
q161741
ClusterJspHelper.updateUnknownStatus
train
private void updateUnknownStatus(Map<String, Map<String, String>> statusMap, List<String> unreportedNn) { if (unreportedNn == null || unreportedNn.isEmpty()) { // no unreported namenodes return; } for (Map.Entry<String, Map<String,String>> entry : statusMap.entrySet()) { String dn = entry.getKey(); Map<String, String> nnStatus = entry.getValue(); for (String nn : unreportedNn) { nnStatus.put(nn, DecommissionStates.UNKNOWN.toString()); } statusMap.put(dn, nnStatus); } }
java
{ "resource": "" }
q161742
ClusterJspHelper.getDatanodeHttpPort
train
private int getDatanodeHttpPort(Configuration conf) { String address = conf.get("dfs.datanode.http.address", ""); if (address.equals("")) { return -1; } return Integer.parseInt(address.split(":")[1]); }
java
{ "resource": "" }
q161743
PostExPerformanceDiagnoser.readJobInformation
train
private void readJobInformation(JobConf jobConf, JobInfo jobInfo) throws Exception { /* * Convert the input strings to URL */ URL jobConfFileUrl = new URL(this._jobConfFile); URL jobHistoryFileUrl = new URL (this._jobHistoryFile); /* * Read the Job Configuration from the jobConfFile url */ jobConf.addResource(jobConfFileUrl); /* * Read JobHistoryFile and build job counters to evaluate diagnostic rules */ if (jobHistoryFileUrl.getProtocol().equals("hdfs")) { DefaultJobHistoryParser.parseJobTasks (jobHistoryFileUrl.getPath(), jobInfo, FileSystem.get(jobConf)); } else if (jobHistoryFileUrl.getProtocol().equals("file")) { DefaultJobHistoryParser.parseJobTasks (jobHistoryFileUrl.getPath(), jobInfo, FileSystem.getLocal(jobConf)); } else { throw new Exception("Malformed URL. Protocol: "+jobHistoryFileUrl.getProtocol()); } }
java
{ "resource": "" }
q161744
TaskGraphServlet.getMapAvarageProgress
train
private float getMapAvarageProgress(int tasksPerBar, int index , TaskReport[] reports ) { float progress = 0f; int k=0; for(;k < tasksPerBar && index + k < reports.length; k++) { progress += reports[index + k].getProgress(); } progress /= k; return progress; }
java
{ "resource": "" }
q161745
TaskGraphServlet.getReduceAvarageProgresses
train
private float[] getReduceAvarageProgresses(int tasksPerBar, int index , TaskReport[] reports ) { float[] progresses = new float[] {0,0,0}; int k=0; for(;k < tasksPerBar && index + k < reports.length; k++) { float progress = reports[index+k].getProgress(); for(int j=0; progress > 0 ; j++, progress -= oneThird) { if(progress > oneThird) progresses[j] += 1f; else progresses[j] += progress * 3 ; } } for(int j=0; j<3; j++) { progresses[j] /= k;} return progresses; }
java
{ "resource": "" }
q161746
NNLatencyBenchmark.setUp
train
private void setUp() throws Exception { try { fileSystem = (DistributedFileSystem) FileSystem.get( StorageServiceConfigKeys.translateToOldSchema(conf, nameserviceId), conf); InetSocketAddress nameNodeAddr = fileSystem.getClient().getNameNodeAddr(); metaInfo = new RequestMetaInfo(clusterId, nameserviceId, RequestMetaInfo.NO_NAMESPACE_ID, RequestMetaInfo.NO_APPLICATION_ID, (UnixUserGroupInformation) UserGroupInformation.getUGI( this.conf)); directClientProtocol = RPC.getProxy(ClientProtocol.class, ClientProtocol.versionID, nameNodeAddr, conf); directClientProxyProtocol = RPC.getProxy(ClientProxyProtocol.class, ClientProxyProtocol.versionID, nameNodeAddr, conf); clientManager = new ThriftClientManager(); FramedClientConnector connector = new FramedClientConnector(HostAndPort.fromParts( proxyHostname, proxyPortThrift)); proxyTClientProxyProtocol = clientManager.createClient(connector, TClientProxyProtocol.class) .get(); proxyClientProxyProtocol = RPC.getProxy(ClientProxyProtocol.class, ClientProxyProtocol.versionID, new InetSocketAddress(proxyHostname, proxyPortRPC), conf); fileSystem.mkdirs(new Path(ROOT)); } catch (Exception e) { tearDown(); throw e; } }
java
{ "resource": "" }
q161747
NNLatencyBenchmark.tearDown
train
private void tearDown() throws Exception { try { if (fileSystem != null) { fileSystem.delete(new Path(ROOT), true, true); } } finally { RPC.stopProxy(proxyClientProxyProtocol); IOUtils.cleanup(LOG, proxyTClientProxyProtocol, clientManager, fileSystem); } }
java
{ "resource": "" }
q161748
Shard.normalizePath
train
public static String normalizePath(String path) { // remove double slashes & backslashes path = path.replace("//", "/"); path = path.replace("\\", "/"); // trim trailing slash from non-root path (ignoring windows drive) if (path.length() > 1 && path.endsWith("/")) { path = path.substring(0, path.length() - 1); } return path; }
java
{ "resource": "" }
q161749
Shard.createShardFromString
train
private static Shard createShardFromString(String str) { int first = str.indexOf("@"); int second = str.indexOf("@", first + 1); long version = Long.parseLong(str.substring(0, first)); String dir = str.substring(first + 1, second); long gen = Long.parseLong(str.substring(second + 1)); return new Shard(version, dir, gen); }
java
{ "resource": "" }
q161750
Shard.compareTo
train
public int compareTo(Shard other) { // compare version if (version < other.version) { return -1; } else if (version > other.version) { return 1; } // compare dir int result = dir.compareTo(other.dir); if (result != 0) { return result; } // compare gen if (gen < other.gen) { return -1; } else if (gen == other.gen) { return 0; } else { return 1; } }
java
{ "resource": "" }
q161751
Shell.getUlimitMemoryCommand
train
public static String[] getUlimitMemoryCommand(int memoryLimit) { // ulimit isn't supported on Windows if (WINDOWS) { return null; } return new String[] {ULIMIT_COMMAND, "-v", String.valueOf(memoryLimit)}; }
java
{ "resource": "" }
q161752
TaskID.appendTo
train
protected StringBuilder appendTo(StringBuilder builder) { return jobId.appendTo(builder). append(SEPARATOR). append(isMap ? 'm' : 'r'). append(SEPARATOR). append(idFormat.format(id)); }
java
{ "resource": "" }
q161753
TaskID.forName
train
public static TaskID forName(String str) throws IllegalArgumentException { if(str == null) return null; try { String[] parts = str.split("_"); if(parts.length == 5) { if(parts[0].equals(TASK)) { boolean isMap = false; if(parts[3].equals("m")) isMap = true; else if(parts[3].equals("r")) isMap = false; else throw new Exception(); return new org.apache.hadoop.mapred.TaskID(parts[1], Integer.parseInt(parts[2]), isMap, Integer.parseInt(parts[4])); } } }catch (Exception ex) {//fall below } throw new IllegalArgumentException("TaskId string : " + str + " is not properly formed"); }
java
{ "resource": "" }
q161754
FSImageSerialization.readString
train
@SuppressWarnings("deprecation") public static String readString(DataInputStream in) throws IOException { UTF8 ustr = TL_DATA.get().U_STR; ustr.readFields(in); return ustr.toString(); }
java
{ "resource": "" }
q161755
JobMonitor.doMonitor
train
public void doMonitor() { while (running) { String[] keys = null; // Make a copy of the names of the current jobs. synchronized(jobs) { keys = jobs.keySet().toArray(new String[0]); } // Check all the jobs. We do not want to block access to `jobs` // because that will prevent new jobs from being added. // This is safe because JobMonitor.run is the only code that can // remove a job from `jobs`. Thus all elements in `keys` will have // valid values. Map<String, List<DistRaid>> finishedJobs = new HashMap<String, List<DistRaid>>(); for (String key: keys) { // For each policy being monitored, get the list of jobs running. DistRaid[] jobListCopy = null; synchronized(jobs) { List<DistRaid> jobList = jobs.get(key); synchronized(jobList) { jobListCopy = jobList.toArray(new DistRaid[jobList.size()]); } } // The code that actually contacts the JobTracker is not synchronized, // it uses copies of the list of jobs. for (DistRaid job: jobListCopy) { // Check each running job. try { boolean complete = job.checkComplete(); if (complete) { addJob(finishedJobs, key, job); if (job.successful()) { jobsSucceeded++; } } else if (System.currentTimeMillis() - job.getStartTime() > maximumRunningTime){ // If the job is running for more than one day throw new Exception("Job " + job.getJobID() + " is hanging more than " + maximumRunningTime/1000 + " seconds. Kill it"); } } catch (Exception e) { // If there was an error, consider the job finished. addJob(finishedJobs, key, job); try { job.killJob(); } catch (Exception ee) { LOG.error(ee); } } } } if (finishedJobs.size() > 0) { for (String key: finishedJobs.keySet()) { List<DistRaid> finishedJobList = finishedJobs.get(key); // Iterate through finished jobs and remove from jobs. // removeJob takes care of locking. for (DistRaid job: finishedJobList) { addCounter(raidProgress, job, INT_CTRS); removeJob(jobs, key, job); addJob(history, key, job); // delete the temp directory job.cleanUp(); } } } try { Thread.sleep(jobMonitorInterval); } catch (InterruptedException ie) { } } }
java
{ "resource": "" }
q161756
JobMonitor.runningJobsCount
train
int runningJobsCount() { int total = 0; synchronized(jobs) { for (String key: jobs.keySet()) { total += jobs.get(key).size(); } } return total; }
java
{ "resource": "" }
q161757
TFile.main
train
public static void main(String[] args) { System.out.printf("TFile Dumper (TFile %s, BCFile %s)\n", TFile.API_VERSION .toString(), BCFile.API_VERSION.toString()); if (args.length == 0) { System.out .println("Usage: java ... org.apache.hadoop.io.file.tfile.TFile tfile-path [tfile-path ...]"); System.exit(0); } Configuration conf = new Configuration(); for (String file : args) { System.out.println("===" + file + "==="); try { TFileDumper.dumpInfo(file, System.out, conf); } catch (IOException e) { e.printStackTrace(System.err); } } }
java
{ "resource": "" }
q161758
TaskLogsMonitor.getAllLogsFileDetails
train
private Map<Task, Map<LogName, LogFileDetail>> getAllLogsFileDetails( final List<Task> allAttempts) throws IOException { Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails = new HashMap<Task, Map<LogName, LogFileDetail>>(); for (Task task : allAttempts) { Map<LogName, LogFileDetail> allLogsFileDetails; allLogsFileDetails = TaskLog.getAllLogsFileDetails(task.getTaskID(), task.isTaskCleanupTask()); taskLogFileDetails.put(task, allLogsFileDetails); } return taskLogFileDetails; }
java
{ "resource": "" }
q161759
TaskLogsMonitor.isTruncationNeeded
train
private boolean isTruncationNeeded(PerJVMInfo lInfo, Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails, LogName logName) { boolean truncationNeeded = false; LogFileDetail logFileDetail = null; for (Task task : lInfo.allAttempts) { long taskRetainSize = (task.isMapTask() ? mapRetainSize : reduceRetainSize); Map<LogName, LogFileDetail> allLogsFileDetails = taskLogFileDetails.get(task); logFileDetail = allLogsFileDetails.get(logName); if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION && logFileDetail.length > taskRetainSize) { truncationNeeded = true; break; } } return truncationNeeded; }
java
{ "resource": "" }
q161760
TaskLogsMonitor.truncateALogFileOfAnAttempt
train
private LogFileDetail truncateALogFileOfAnAttempt( final TaskAttemptID taskID, final LogFileDetail oldLogFileDetail, final long taskRetainSize, final FileWriter tmpFileWriter, final FileReader logFileReader) throws IOException { LogFileDetail newLogFileDetail = new LogFileDetail(); // ///////////// Truncate log file /////////////////////// // New location of log file is same as the old newLogFileDetail.location = oldLogFileDetail.location; if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION && oldLogFileDetail.length > taskRetainSize) { LOG.info("Truncating logs for " + taskID + " from " + oldLogFileDetail.length + "bytes to " + taskRetainSize + "bytes."); newLogFileDetail.length = taskRetainSize; } else { LOG.info("No truncation needed for " + taskID + " length is " + oldLogFileDetail.length + " retain size " + taskRetainSize + "bytes."); newLogFileDetail.length = oldLogFileDetail.length; } long charsSkipped = logFileReader.skip(oldLogFileDetail.length - newLogFileDetail.length); if (charsSkipped != oldLogFileDetail.length - newLogFileDetail.length) { throw new IOException("Erroneously skipped " + charsSkipped + " instead of the expected " + (oldLogFileDetail.length - newLogFileDetail.length)); } long alreadyRead = 0; while (alreadyRead < newLogFileDetail.length) { char tmpBuf[]; // Temporary buffer to read logs if (newLogFileDetail.length - alreadyRead >= DEFAULT_BUFFER_SIZE) { tmpBuf = new char[DEFAULT_BUFFER_SIZE]; } else { tmpBuf = new char[(int) (newLogFileDetail.length - alreadyRead)]; } int bytesRead = logFileReader.read(tmpBuf); if (bytesRead < 0) { break; } else { alreadyRead += bytesRead; } tmpFileWriter.write(tmpBuf); } // ////// End of truncating log file /////////////////////// return newLogFileDetail; }
java
{ "resource": "" }
q161761
TaskLogsMonitor.updateIndicesAfterLogTruncation
train
private void updateIndicesAfterLogTruncation(TaskAttemptID firstAttempt, Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails) { for (Entry<Task, Map<LogName, LogFileDetail>> entry : updatedTaskLogFileDetails.entrySet()) { Task task = entry.getKey(); Map<LogName, LogFileDetail> logFileDetails = entry.getValue(); Map<LogName, Long[]> logLengths = new HashMap<LogName, Long[]>(); // set current and previous lengths for (LogName logName : TaskLog.LOGS_TRACKED_BY_INDEX_FILES) { logLengths.put(logName, new Long[] { Long.valueOf(0L), Long.valueOf(0L) }); LogFileDetail lfd = logFileDetails.get(logName); if (lfd != null) { // Set previous lengths logLengths.get(logName)[0] = Long.valueOf(lfd.start); // Set current lengths logLengths.get(logName)[1] = Long.valueOf(lfd.start + lfd.length); } } try { TaskLog.writeToIndexFile(firstAttempt, task.getTaskID(), task.isTaskCleanupTask(), logLengths); } catch (IOException ioe) { LOG.warn("Exception in updateIndicesAfterLogTruncation : " + StringUtils.stringifyException(ioe)); LOG.warn("Exception encountered while updating index file of task " + task.getTaskID() + ". Ignoring and continuing with other tasks."); } } }
java
{ "resource": "" }
q161762
CombineFileInputFormat.getStatsString
train
private String getStatsString() { SplitTypeStats allTypeStats = splitTypeStatsMap.get(SplitType.ALL); Map<String, Map<String, Number>> statsMapMap = new HashMap<String, Map<String, Number>>(); for (Map.Entry<SplitType, SplitTypeStats> entry : splitTypeStatsMap.entrySet()) { Map<String, Number> statsMap = new HashMap<String, Number>(); statsMapMap.put(entry.getKey().toString(), statsMap); float percentTotalSplitCount = (100f * entry.getValue().getTotalSplitCount()) / allTypeStats.getTotalSplitCount(); float percentTotalSize = (100f * entry.getValue().getTotalSize()) / allTypeStats.getTotalSize(); float percentTotalBlockCount = (100f * entry.getValue().getTotalBlockCount()) / allTypeStats.getTotalBlockCount(); float averageSizePerSplit = ((float) entry.getValue().getTotalSize()) / entry.getValue().getTotalSplitCount(); float averageHostCountPerSplit = ((float) entry.getValue().getTotalHostCount()) / entry.getValue().getTotalSplitCount(); float averageBlockCountPerSplit = ((float) entry.getValue().getTotalBlockCount()) / entry.getValue().getTotalSplitCount(); statsMap.put("totalSplitCount", entry.getValue().getTotalSplitCount()); statsMap.put("percentTotalSplitCount", percentTotalSplitCount); statsMap.put("totalSize", entry.getValue().getTotalSize()); statsMap.put("percentTotalSize", percentTotalSize); statsMap.put("averageSizePerSplit", averageSizePerSplit); statsMap.put("totalHostCount", entry.getValue().getTotalHostCount()); statsMap.put("averageHostCountPerSplit", averageHostCountPerSplit); statsMap.put("totalBlockCount", entry.getValue().getTotalBlockCount()); statsMap.put("percentTotalBlockCount", percentTotalBlockCount); statsMap.put("averageBlockCountPerSplit", averageBlockCountPerSplit); } return JSON.toString(statsMapMap); }
java
{ "resource": "" }
q161763
CombineFileInputFormat.createPool
train
protected void createPool(JobConf conf, List<PathFilter> filters) { pools.add(new MultiPathFilter(filters)); }
java
{ "resource": "" }
q161764
CombineFileInputFormat.sortBlocksBySize
train
private void sortBlocksBySize(Map<String, List<OneBlockInfo>> nodeToBlocks) { OneBlockInfoSizeComparator comparator = new OneBlockInfoSizeComparator(); for (Entry<String, List<OneBlockInfo>> entry : nodeToBlocks.entrySet()) { Collections.sort(entry.getValue(), comparator); } }
java
{ "resource": "" }
q161765
JobInProgress.updateMetrics
train
public void updateMetrics() { Counters counters = getCounters(); for (Counters.Group group : counters) { jobMetrics.setTag("group", group.getDisplayName()); for (Counters.Counter counter : group) { jobMetrics.setTag("counter", counter.getDisplayName()); jobMetrics.setMetric("value", (float) counter.getCounter()); jobMetrics.update(); } } }
java
{ "resource": "" }
q161766
JobInProgress.getTasks
train
TaskInProgress[] getTasks(TaskType type) { TaskInProgress[] tasks = null; switch (type) { case MAP: { tasks = maps; } break; case REDUCE: { tasks = reduces; } break; case JOB_SETUP: { tasks = setup; } break; case JOB_CLEANUP: { tasks = cleanup; } break; default: { tasks = new TaskInProgress[0]; } break; } return tasks; }
java
{ "resource": "" }
q161767
JobInProgress.canLaunchJobCleanupTask
train
private synchronized boolean canLaunchJobCleanupTask() { // check if the job is running if (status.getRunState() != JobStatus.RUNNING && status.getRunState() != JobStatus.PREP) { return false; } // check if cleanup task has been launched already or if setup isn't // launched already. The later check is useful when number of maps is // zero. if (launchedCleanup || !isSetupFinished()) { return false; } // check if job has failed or killed if (jobKilled || jobFailed) { return true; } boolean mapsDone = ((finishedMapTasks + failedMapTIPs) == (numMapTasks)); boolean reducesDone = ((finishedReduceTasks + failedReduceTIPs) == numReduceTasks); boolean mapOnlyJob = (numReduceTasks == 0); if (mapOnlyJob) { return mapsDone; } if (jobFinishWhenReducesDone) { return reducesDone; } return mapsDone && reducesDone; }
java
{ "resource": "" }
q161768
JobInProgress.canLaunchSetupTask
train
private synchronized boolean canLaunchSetupTask() { return (tasksInited.get() && status.getRunState() == JobStatus.PREP && !launchedSetup && !jobKilled && !jobFailed); }
java
{ "resource": "" }
q161769
JobInProgress.obtainNewReduceTask
train
public synchronized Task obtainNewReduceTask(TaskTrackerStatus tts, int clusterSize, int numUniqueHosts ) throws IOException { if (status.getRunState() != JobStatus.RUNNING) { LOG.info("Cannot create task split for " + profile.getJobID()); return null; } // Ensure we have sufficient map outputs ready to shuffle before // scheduling reduces if (!scheduleReduces()) { return null; } int target = findNewReduceTask(tts, clusterSize, numUniqueHosts); if (target == -1) { return null; } Task result = reduces[target].getTaskToRun(tts.getTrackerName()); if (result != null) { addRunningTaskToTIP(reduces[target], result.getTaskID(), tts, true); } return result; }
java
{ "resource": "" }
q161770
JobInProgress.getBlackListedTrackers
train
Map<String, List<String>> getBlackListedTrackers() { Map<String, List<String>> blackListedTrackers = new HashMap<String, List<String>>(); for (Map.Entry<String,List<String>> e : trackerToFailuresMap.entrySet()) { if (e.getValue().size() >= maxTaskFailuresPerTracker) { blackListedTrackers.put(e.getKey(), e.getValue()); } } return blackListedTrackers; }
java
{ "resource": "" }
q161771
JobInProgress.getTaskTrackerErrors
train
synchronized Map<String, List<String>> getTaskTrackerErrors() { // Clone the 'trackerToFailuresMap' and return the copy Map<String, List<String>> trackerErrors = new TreeMap<String, List<String>>(trackerToFailuresMap); return trackerErrors; }
java
{ "resource": "" }
q161772
JobInProgress.scheduleMap
train
protected synchronized void scheduleMap(TaskInProgress tip) { runningMapTaskStats.add(0.0f); runningTaskMapByteProcessingRateStats.add(0.0f); if (runningMapCache == null) { LOG.warn("Running cache for maps is missing!! " + "Job details are missing."); return; } String[] splitLocations = tip.getSplitLocations(); // Add the TIP to the list of non-local running TIPs if (splitLocations.length == 0) { nonLocalRunningMaps.add(tip); return; } for(String host: splitLocations) { Node node = jobtracker.getNode(host); for (int j = 0; j < maxLevel; ++j) { Set<TaskInProgress> hostMaps = runningMapCache.get(node); if (hostMaps == null) { // create a cache if needed hostMaps = new LinkedHashSet<TaskInProgress>(); runningMapCache.put(node, hostMaps); } hostMaps.add(tip); node = node.getParent(); } } }
java
{ "resource": "" }
q161773
JobInProgress.scheduleReduce
train
protected synchronized void scheduleReduce(TaskInProgress tip) { runningReduceTaskStats.add(0.0f); runningTaskCopyProcessingRateStats.add(0.0f); runningTaskSortProcessingRateStats.add(0.0f); runningTaskReduceProcessingRateStats.add(0.0f); if (runningReduces == null) { LOG.warn("Running cache for reducers missing!! " + "Job details are missing."); return; } runningReduces.add(tip); }
java
{ "resource": "" }
q161774
JobInProgress.findNewReduceTask
train
private synchronized int findNewReduceTask(TaskTrackerStatus tts, int clusterSize, int numUniqueHosts) { if (numReduceTasks == 0) { if(LOG.isDebugEnabled()) { LOG.debug("No reduces to schedule for " + profile.getJobID()); } return -1; } String taskTracker = tts.getTrackerName(); TaskInProgress tip = null; // Update the last-known clusterSize this.clusterSize = clusterSize; if (!shouldRunOnTaskTracker(taskTracker)) { return -1; } long outSize = resourceEstimator.getEstimatedReduceInputSize(); long availSpace = tts.getResourceStatus().getAvailableSpace(); final long SAVETY_BUFFER = conf.getLong("mapred.reduce.reserved.disk.mb", 300) * 1024 * 1024; if (availSpace < outSize + SAVETY_BUFFER) { LOG.warn("No room for reduce task. Node " + taskTracker + " has " + availSpace + " bytes free; The safty buffer is " + SAVETY_BUFFER + " bytes; but we expect map to take " + outSize); return -1; //see if a different TIP might work better. } // 1. check for a never-executed reduce tip // reducers don't have a cache and so pass -1 to explicitly call that out tip = findTaskFromList(nonRunningReduces, tts, numUniqueHosts, false); if (tip != null) { scheduleReduce(tip); return tip.getIdWithinJob(); } // 2. check for a reduce tip to be speculated if (hasSpeculativeReduces) { tip = getSpeculativeReduce(tts.getTrackerName(), tts.getHost()); if (tip != null) { scheduleReduce(tip); return tip.getIdWithinJob(); } } return -1; }
java
{ "resource": "" }
q161775
JobInProgress.kill
train
public void kill() { boolean killNow = false; synchronized(jobInitKillStatus) { jobInitKillStatus.killed = true; //if not in middle of init, terminate it now if(!jobInitKillStatus.initStarted || jobInitKillStatus.initDone) { //avoiding nested locking by setting flag killNow = true; } } if(killNow) { terminate(JobStatus.KILLED); } }
java
{ "resource": "" }
q161776
JobInProgress.garbageCollect
train
synchronized void garbageCollect() { // Cancel task tracker reservation cancelReservedSlots(); // Remove the remaining speculative tasks counts totalSpeculativeReduceTasks.addAndGet(-speculativeReduceTasks); totalSpeculativeMapTasks.addAndGet(-speculativeMapTasks); garbageCollected = true; // Let the JobTracker know that a job is complete jobtracker.getInstrumentation().decWaitingMaps(getJobID(), pendingMaps()); jobtracker.getInstrumentation().decWaitingReduces(getJobID(), pendingReduces()); jobtracker.storeCompletedJob(this); jobtracker.finalizeJob(this); try { // Definitely remove the local-disk copy of the job file if (localJobFile != null) { localFs.delete(localJobFile, true); localJobFile = null; } // clean up splits for (int i = 0; i < maps.length; i++) { maps[i].clearSplit(); } // JobClient always creates a new directory with job files // so we remove that directory to cleanup // Delete temp dfs dirs created if any, like in case of // speculative exn of reduces. Path tempDir = jobtracker.getSystemDirectoryForJob(getJobID()); new CleanupQueue().addToQueue(new PathDeletionContext( FileSystem.get(conf), tempDir.toUri().getPath())); } catch (IOException e) { LOG.warn("Error cleaning up "+profile.getJobID()+": "+e); } cleanUpMetrics(); // free up the memory used by the data structures this.nonRunningMapCache = null; this.runningMapCache = null; this.nonRunningReduces = null; this.runningReduces = null; this.trackerMapStats = null; this.trackerReduceStats = null; }
java
{ "resource": "" }
q161777
JobInProgress.findFinishedMap
train
public synchronized TaskStatus findFinishedMap(int mapId) { TaskInProgress tip = maps[mapId]; if (tip.isComplete()) { TaskStatus[] statuses = tip.getTaskStatuses(); for(int i=0; i < statuses.length; i++) { if (statuses[i].getRunState() == TaskStatus.State.SUCCEEDED) { return statuses[i]; } } } return null; }
java
{ "resource": "" }
q161778
JobInProgress.atSpeculativeCap
train
private boolean atSpeculativeCap(TaskType type) { float numTasks = (type == TaskType.MAP) ? (float)(runningMapTasks - speculativeMapTasks) : (float)(runningReduceTasks - speculativeReduceTasks); if (numTasks == 0){ return true; // avoid divide by zero } int speculativeTaskCount = type == TaskType.MAP ? speculativeMapTasks : speculativeReduceTasks; int totalSpeculativeTaskCount = type == TaskType.MAP ? totalSpeculativeMapTasks.get() : totalSpeculativeReduceTasks.get(); //return true if totalSpecTask < max(10, 0.01 * total-slots, // 0.1 * total-running-tasks) if (speculativeTaskCount < MIN_SPEC_CAP) { return false; // at least one slow tracker's worth of slots(default=10) } ClusterStatus c = jobtracker.getClusterStatus(false); int numSlots = (type == TaskType.MAP ? c.getMaxMapTasks() : c.getMaxReduceTasks()); if (speculativeTaskCount < numSlots * MIN_SLOTS_CAP) { return false; } // Check if the total CAP has been reached if (totalSpeculativeTaskCount >= numSlots * TOTAL_SPECULATIVECAP) { return true; } boolean atCap = (((speculativeTaskCount)/numTasks) >= speculativeCap); if (LOG.isDebugEnabled()) { LOG.debug("SpeculativeCap is "+speculativeCap+", specTasks/numTasks is " + ((speculativeTaskCount)/numTasks)+ ", so atSpecCap() is returning "+atCap); } return atCap; }
java
{ "resource": "" }
q161779
JobInProgress.isSlowTracker
train
protected boolean isSlowTracker(String taskTracker) { if (trackerMapStats.get(taskTracker) != null && trackerMapStats.get(taskTracker).mean() - mapTaskStats.mean() > mapTaskStats.std()*slowNodeThreshold) { if (LOG.isDebugEnabled()) { LOG.debug("Tracker " + taskTracker + " declared slow. trackerMapStats.get(taskTracker).mean() :" + trackerMapStats.get(taskTracker).mean() + " mapTaskStats :" + mapTaskStats); } return true; } if (trackerReduceStats.get(taskTracker) != null && trackerReduceStats.get(taskTracker).mean() - reduceTaskStats.mean() > reduceTaskStats.std()*slowNodeThreshold) { if (LOG.isDebugEnabled()) { LOG.debug("Tracker " + taskTracker + " declared slow. trackerReduceStats.get(taskTracker).mean() :" + trackerReduceStats.get(taskTracker).mean() + " reduceTaskStats :" + reduceTaskStats); } return true; } return false; }
java
{ "resource": "" }
q161780
JobInProgress.refresh
train
void refresh(long now) { refreshCandidateSpeculativeMaps(now); refreshCandidateSpeculativeReduces(now); refreshTaskCountsAndWaitTime(TaskType.MAP, now); refreshTaskCountsAndWaitTime(TaskType.REDUCE, now); }
java
{ "resource": "" }
q161781
JobInProgress.refreshTaskCountsAndWaitTime
train
protected void refreshTaskCountsAndWaitTime(TaskType type, long now) { TaskInProgress[] allTips = getTasks(type); int finishedTips = 0; int runningTips = 0; int runningTaskAttempts = 0; long totalWaitTime = 0; long jobStartTime = this.getStartTime(); for (TaskInProgress tip : allTips) { if (tip.isComplete()) { finishedTips += 1; } else if(tip.isRunning()) { runningTaskAttempts += tip.getActiveTasks().size(); runningTips += 1; } if (tip.getExecStartTime() > 0) { totalWaitTime += tip.getExecStartTime() - jobStartTime; } else { totalWaitTime += now - jobStartTime; } } if (TaskType.MAP == type) { totalMapWaitTime = totalWaitTime; runningMapTasks = runningTaskAttempts; neededMapTasks = numMapTasks - runningTips - finishedTips + neededSpeculativeMaps(); pendingMapTasks = numMapTasks - runningTaskAttempts - failedMapTIPs - finishedMapTasks + speculativeMapTasks; } else { totalReduceWaitTime = totalWaitTime; runningReduceTasks = runningTaskAttempts; neededReduceTasks = numReduceTasks - runningTips - finishedTips + neededSpeculativeReduces(); pendingReduceTasks = numReduceTasks - runningTaskAttempts - failedReduceTIPs - finishedReduceTasks + speculativeReduceTasks; } }
java
{ "resource": "" }
q161782
JobTrackerJspHelper.generateSummaryTable
train
public void generateSummaryTable(JspWriter out, JobTracker tracker) throws IOException { ClusterStatus status = tracker.getClusterStatus(); int maxMapTasks = status.getMaxMapTasks(); int maxReduceTasks = status.getMaxReduceTasks(); int numTaskTrackers = status.getTaskTrackers(); String tasksPerNodeStr; if (numTaskTrackers > 0) { double tasksPerNodePct = (double) (maxMapTasks + maxReduceTasks) / (double) numTaskTrackers; tasksPerNodeStr = percentFormat.format(tasksPerNodePct); } else { tasksPerNodeStr = "-"; } out.print("<maps>" + status.getMapTasks() + "</maps>\n" + "<reduces>" + status.getReduceTasks() + "</reduces>\n" + "<total_submissions>" + tracker.getTotalSubmissions() + "</total_submissions>\n" + "<nodes>" + status.getTaskTrackers() + "</nodes>\n" + "<map_task_capacity>" + status.getMaxMapTasks() + "</map_task_capacity>\n" + "<reduce_task_capacity>" + status.getMaxReduceTasks() + "</reduce_task_capacity>\n" + "<avg_tasks_per_node>" + tasksPerNodeStr + "</avg_tasks_per_node>\n"); }
java
{ "resource": "" }
q161783
TaskAttemptID.appendTo
train
protected StringBuilder appendTo(StringBuilder builder) { return taskId.appendTo(builder).append(SEPARATOR).append(id); }
java
{ "resource": "" }
q161784
TaskAttemptID.compareTo
train
@Override public int compareTo(ID o) { TaskAttemptID that = (TaskAttemptID)o; int tipComp = this.taskId.compareTo(that.taskId); if(tipComp == 0) { return this.id - that.id; } else return tipComp; }
java
{ "resource": "" }
q161785
Schedulable.distributeShare
train
public static void distributeShare( double total, final Collection<? extends Schedulable> schedulables, ScheduleComparator comparator) { switch (comparator) { case FIFO: case DEADLINE: Schedulable.distributeShareSorted(total, schedulables, comparator); break; case FAIR: Schedulable.distributeShareFair(total, schedulables); break; case PRIORITY: Schedulable.distributeSharePriority(total, schedulables); break; default: throw new IllegalArgumentException("Unknown comparator"); } }
java
{ "resource": "" }
q161786
Schedulable.distributeShareSorted
train
private static void distributeShareSorted( double total, final Collection<? extends Schedulable> schedulables, ScheduleComparator comparator) { List<Schedulable> sches = new ArrayList<Schedulable>(schedulables); Collections.sort(sches, comparator); for (Schedulable schedulable : sches) { int max = Math.min(schedulable.getRequested(), schedulable.getMaximum()); if (total > max) { schedulable.share = max; total -= max; } else { schedulable.share = total; return; } } }
java
{ "resource": "" }
q161787
Schedulable.distributeShareFair
train
private static void distributeShareFair( double total, final Collection<? extends Schedulable> schedulables) { BinarySearcher searcher = new BinarySearcher() { @Override protected double targetFunction(double x) { return totalShareWithRatio(schedulables, x); } }; double ratio = searcher.getSolution(total); for (Schedulable schedulable : schedulables) { schedulable.share = shareWithRatio(schedulable, ratio); } }
java
{ "resource": "" }
q161788
Schedulable.totalShareWithRatio
train
private static double totalShareWithRatio( Collection<? extends Schedulable> schedulables, double weightToShareRatio) { double totalShare = 0; for (Schedulable schedulable : schedulables) { totalShare += shareWithRatio(schedulable, weightToShareRatio); } return totalShare; }
java
{ "resource": "" }
q161789
Schedulable.shareWithRatio
train
private static double shareWithRatio( Schedulable schedulable, double weightToShareRatio) { double share = schedulable.getWeight() * weightToShareRatio; int min = schedulable.getMinimum(); int max = schedulable.getMaximum(); int requested = schedulable.getRequested(); share = Math.max(min, share); share = Math.min(max, share); share = Math.min(requested, share); return share; }
java
{ "resource": "" }
q161790
Schedulable.assignShareIfUnderAllocated
train
private static double assignShareIfUnderAllocated( double totalShare, final Collection<? extends Schedulable> schedulables) { double totalMinDemand = 0; for (Schedulable schedulable : schedulables) { schedulable.share = 0; totalMinDemand += Math.min(schedulable.getRequested(), schedulable.getMinimum()); } if ((totalMinDemand > 0) && (totalMinDemand >= totalShare)) { distributeShareMin(schedulables); } return totalShare - totalMinDemand; }
java
{ "resource": "" }
q161791
Schedulable.generatePriorityGroupedSchedulables
train
private static TreeMap<Integer, Vector<Schedulable>> generatePriorityGroupedSchedulables( final Collection<? extends Schedulable> schedulables) { TreeMap<Integer, Vector<Schedulable>> prioritizedSchedulableMap = new TreeMap<Integer, Vector<Schedulable>>(); for (Schedulable schedulable : schedulables) { if (! prioritizedSchedulableMap.containsKey(schedulable.getPriority())) { prioritizedSchedulableMap.put(schedulable.getPriority(), new Vector<Schedulable>()); } prioritizedSchedulableMap.get(schedulable.getPriority()).add(schedulable); } return prioritizedSchedulableMap; }
java
{ "resource": "" }
q161792
Schedulable.distributeSharePriority
train
private static void distributeSharePriority( double total, final Collection<? extends Schedulable> schedulables) { // 1) If share < sum(min(demand, min_alloc)) then do fair share and quit. double residualShare = assignShareIfUnderAllocated(total, schedulables); if (residualShare <= 0.0) { return; } // 2) Group schedulables according to priorities. TreeMap<Integer, Vector<Schedulable>> prioritizedSchedulableMap = generatePriorityGroupedSchedulables(schedulables); // 3) Trickle the share across priority groups. trickleShareDownPriorityGroups(residualShare, prioritizedSchedulableMap); }
java
{ "resource": "" }
q161793
Schedulable.distributeShareMin
train
private static void distributeShareMin( Collection<? extends Schedulable> schedulableVector) { for (Schedulable schedulable : schedulableVector) { schedulable.share += schedulable.getMinimum(); } }
java
{ "resource": "" }
q161794
Schedulable.distributeShareMax
train
private static void distributeShareMax( Collection<? extends Schedulable> schedulableVector) { for (Schedulable schedulable : schedulableVector) { double minShare = Math.max(schedulable.getMinimum(), schedulable.getRequested()); schedulable.share += Math.min(schedulable.getMaximum(), minShare); } }
java
{ "resource": "" }
q161795
SystemLogParser.parseMonth
train
private int parseMonth(String month) { for (int i = 0; i < months.length; i++) if (months[i].startsWith(month)) return i; return -1; }
java
{ "resource": "" }
q161796
ReedSolomonDecoder.writeFixedBlock
train
long writeFixedBlock( FSDataInputStream[] inputs, int[] erasedLocations, int erasedLocationToFix, long limit, OutputStream out, Progressable reporter, ParallelStreamReader parallelReader, CRC32 crc) throws IOException { LOG.info("Need to write " + limit + " bytes for erased location index " + erasedLocationToFix); if (crc != null) { crc.reset(); } int[] tmp = new int[inputs.length]; int[] decoded = new int[erasedLocations.length]; // Loop while the number of written bytes is less than the max. long written; for (written = 0; written < limit; ) { erasedLocations = readFromInputs( inputs, erasedLocations, limit, reporter, parallelReader); if (decoded.length != erasedLocations.length) { decoded = new int[erasedLocations.length]; } int toWrite = (int)Math.min((long)bufSize, limit - written); int partSize = (int) Math.ceil(bufSize * 1.0 / parallelism); try { long startTime = System.currentTimeMillis(); for (int i = 0; i < parallelism; i++) { decodeOps.acquire(1); int start = i * partSize; int count = Math.min(bufSize - start, partSize); parallelDecoder.execute(new DecodeOp( readBufs, writeBufs, start, count, erasedLocations, reedSolomonCode[i])); } decodeOps.acquire(parallelism); decodeOps.release(parallelism); decodeTime += (System.currentTimeMillis() - startTime); } catch (InterruptedException e) { throw new IOException("Interrupted while waiting for read result"); } for (int i = 0; i < erasedLocations.length; i++) { if (erasedLocations[i] == erasedLocationToFix) { out.write(writeBufs[i], 0, toWrite); if (crc != null) { crc.update(writeBufs[i], 0, toWrite); } written += toWrite; break; } } } return written; }
java
{ "resource": "" }
q161797
ClientConfigurationUtil.mergeConfiguration
train
public static Configuration mergeConfiguration(URI uri, Configuration conf) throws IOException { try { Long lastBadAccess = badURIs.get(uri.getHost()); if (lastBadAccess != null) { if (System.currentTimeMillis() - lastBadAccess < BAD_URI_EXPIRY) { return conf; } else { badURIs.remove(uri.getHost()); } } boolean lookupLogical = conf.getBoolean( "dfs.client.configerator.logical.lookup.enabled", false); Properties props = new Properties(System.getProperties()); props.setProperty("dfs.client.configerator.logical.lookup.enabled", lookupLogical + ""); String configDir = conf.get("dfs.client.configerator.dir"); if (configDir != null) { props.setProperty("dfs.client.configerator.dir", configDir); } String json = getInstance(uri, conf).getConfiguration( uri.getHost(), conf.getInt("hdfs.retrieve.client_configuration_timeout", 3000), props); if (json == null) { LOG.info("Client configuration lookup disabled/failed. " + "Using default configuration"); return conf; } Configuration newConf = new Configuration(conf); JSONObject jsonObj = new JSONObject(json); Configuration clientConf = new Configuration(jsonObj); Iterator<Map.Entry<String, String>> it = clientConf.iterator(); while (it.hasNext()) { Map.Entry<String, String> entry = it.next(); String key = entry.getKey(); String val = entry.getValue(); newConf.set(key, val); } newConf.setBoolean("client.configuration.lookup.done", true); return newConf; } catch (Throwable t) { badURIs.put(uri.getHost(), System.currentTimeMillis()); // In case of any error, fallback to the default configuration. LOG.info("Problem retreiving client side configuration " + ". Using default configuration instead", t); return conf; } }
java
{ "resource": "" }
q161798
JVMId.forName
train
public static JVMId forName(String str) throws IllegalArgumentException { if(str == null) return null; try { String[] parts = str.split("_"); if(parts.length == 5) { if(parts[0].equals(JVM)) { boolean isMap = false; if(parts[3].equals("m")) isMap = true; else if(parts[3].equals("r")) isMap = false; else throw new Exception(); return new JVMId(parts[1], Integer.parseInt(parts[2]), isMap, Integer.parseInt(parts[4])); } } }catch (Exception ex) {//fall below } throw new IllegalArgumentException("TaskId string : " + str + " is not properly formed"); }
java
{ "resource": "" }
q161799
LinuxUtilizationGauger.readFile
train
private String[] readFile(String fileName) throws IOException { ArrayList<String> result = new ArrayList<String>(); FileReader fReader = new FileReader(fileName); BufferedReader bReader = new BufferedReader(fReader); while (true) { String line = bReader.readLine(); if (line == null) { break; } result.add(line); } bReader.close(); fReader.close(); return (String[])result.toArray(new String[result.size()]); }
java
{ "resource": "" }