_id
stringlengths
2
7
title
stringlengths
3
140
partition
stringclasses
3 values
text
stringlengths
73
34.1k
language
stringclasses
1 value
meta_information
dict
q161500
FSNamesystem.clearReplicationQueues
train
void clearReplicationQueues() { writeLock(); try { synchronized (neededReplications) { neededReplications.clear(); } underReplicatedBlocksCount = 0; corruptReplicas.clear(); corruptReplicaBlocksCount = 0; overReplicatedBlocks.clear(); raidEncodingTasks.clear(); excessReplicateMap = new HashMap<String, LightWeightHashSet<Block>>(); excessBlocksCount = 0; } finally { writeUnlock(); } }
java
{ "resource": "" }
q161501
FSNamesystem.wipeDatanode
train
void wipeDatanode(DatanodeID nodeID) throws IOException { String key = nodeID.getStorageID(); host2DataNodeMap.remove(datanodeMap.remove(key)); if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug( "BLOCK* NameSystem.wipeDatanode: " + nodeID.getName() + " storage " + key + " is removed from datanodeMap."); } }
java
{ "resource": "" }
q161502
FSNamesystem.heartbeatCheck
train
void heartbeatCheck() { if (!getNameNode().shouldCheckHeartbeat()) { // not to check dead nodes. return; } boolean allAlive = false; while (!allAlive) { boolean foundDead = false; DatanodeID nodeID = null; // locate the first dead node. synchronized (heartbeats) { for (Iterator<DatanodeDescriptor> it = heartbeats.iterator(); it.hasNext();) { DatanodeDescriptor nodeInfo = it.next(); if (isDatanodeDead(nodeInfo)) { foundDead = true; nodeID = nodeInfo; break; } } } // acquire the fsnamesystem lock, and then remove the dead node. if (foundDead) { writeLock(); try { synchronized (heartbeats) { synchronized (datanodeMap) { DatanodeDescriptor nodeInfo = null; try { nodeInfo = getDatanode(nodeID); } catch (IOException e) { nodeInfo = null; } if (nodeInfo != null && isDatanodeDead(nodeInfo)) { NameNode.stateChangeLog.info("BLOCK* NameSystem.heartbeatCheck: " + "lost heartbeat from " + nodeInfo.getName()); removeDatanode(nodeInfo); nodeInfo.setStartTime(now()); } } } } finally { writeUnlock(); } } allAlive = !foundDead; } }
java
{ "resource": "" }
q161503
FSNamesystem.checkBlockSize
train
private boolean checkBlockSize(Block block, INodeFile inode) { if (block.getNumBytes() < 0) { return false; } BlockInfo[] blocks = inode.getBlocks(); if (blocks.length == 0) { return false; } return block.getNumBytes() <= inode.getPreferredBlockSize(); }
java
{ "resource": "" }
q161504
FSNamesystem.rejectAddStoredBlock
train
private void rejectAddStoredBlock(Block block, DatanodeDescriptor node, String msg, boolean ignoreInfoLogs, final boolean parallelInitialBlockReport) { if ((!isInSafeModeInternal()) && (!ignoreInfoLogs)) { NameNode.stateChangeLog.info("BLOCK* NameSystem.addStoredBlock: " + "addStoredBlock request received for " + block + " size " + block.getNumBytes() + " but was rejected and added to invalidSet of " + node.getName() + " : " + msg); } lockParallelBRLock(parallelInitialBlockReport); try { addToInvalidatesNoLog(block, node, false); // we do not need try finally, the lock is unlocked by the worker if locked } finally { unlockParallelBRLock(parallelInitialBlockReport); } }
java
{ "resource": "" }
q161505
FSNamesystem.processOverReplicatedBlocksAsync
train
private void processOverReplicatedBlocksAsync() { // blocks should not be scheduled for deletion during safemode if (isInSafeMode()) { return; } if (delayOverreplicationMonitorTime > now()) { LOG.info("Overreplication monitor delayed for " + ((delayOverreplicationMonitorTime - now()) / 1000) + " seconds"); return; } nameNode.clearOutstandingNodes(); final int nodes = heartbeats.size(); List<Block> blocksToProcess = new ArrayList<Block>(Math.min( overReplicatedBlocks.size(), ReplicationConfigKeys.overreplicationWorkMultiplier * nodes)); for (int i = 0; i < ReplicationConfigKeys.overreplicationWorkMultiplier; i++) { writeLock(); try { NameNode.getNameNodeMetrics().numOverReplicatedBlocks.set(overReplicatedBlocks.size()); overReplicatedBlocks.pollNToList(nodes, blocksToProcess); if (overReplicatedBlocks.isEmpty()) { break; } } finally { writeUnlock(); } } for (Block block : blocksToProcess) { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog .debug("BLOCK* NameSystem.processOverReplicatedBlocksAsync: " + block); } if (block instanceof OverReplicatedBlock) { OverReplicatedBlock opb = (OverReplicatedBlock)block; processOverReplicatedBlock(block, (short) -1, opb.addedNode, opb.delNodeHint); } else { processOverReplicatedBlock(block, (short) -1, null, null); } } }
java
{ "resource": "" }
q161506
FSNamesystem.updateNeededReplicationQueue
train
private void updateNeededReplicationQueue(BlockInfo blockInfo, int delta, int numCurrentReplicas, int numCurrentDecommissionedReplicas, DatanodeDescriptor node, short fileReplication) { int numOldReplicas = numCurrentReplicas; int numOldDecommissionedReplicas = numCurrentDecommissionedReplicas; if (node.isDecommissioned() || node.isDecommissionInProgress()) { numOldDecommissionedReplicas -= delta; } else { numOldReplicas -= delta; } if (fileReplication > numOldReplicas) { neededReplications.remove(blockInfo, numOldReplicas, numOldDecommissionedReplicas, fileReplication); } if (fileReplication > numCurrentReplicas) { neededReplications.add(blockInfo, numCurrentReplicas, numCurrentDecommissionedReplicas, fileReplication); } }
java
{ "resource": "" }
q161507
FSNamesystem.blockReceived
train
private boolean blockReceived(Block block, String delHint, DatanodeDescriptor node) throws IOException { assert (hasWriteLock()); // decrement number of blocks scheduled to this datanode. node.decBlocksScheduled(); // get the deletion hint node DatanodeDescriptor delHintNode = null; if (delHint != null && delHint.length() != 0) { delHintNode = datanodeMap.get(delHint); if (delHintNode == null) { NameNode.stateChangeLog.warn("BLOCK* NameSystem.blockReceived: " + block + " is expected to be removed from an unrecorded node " + delHint); } } // // Modify the blocks->datanode map and node's map. // pendingReplications.remove(block); return addStoredBlock(block, node, delHintNode); }
java
{ "resource": "" }
q161508
FSNamesystem.getDatanodes
train
DatanodeInfo[] getDatanodes(DatanodeReportType type) { ArrayList<DatanodeDescriptor> results = getDatanodeListForReport(type); DatanodeInfo[] arr = new DatanodeInfo[results.size()]; for (int i = 0; i < arr.length; i++) { arr[i] = new DatanodeInfo(results.get(i)); } return arr; }
java
{ "resource": "" }
q161509
FSNamesystem.saveNamespace
train
void saveNamespace(boolean force, boolean uncompressed) throws AccessControlException, IOException { LOG.info("Saving namespace"); writeLock(); try { checkSuperuserPrivilege(); if(!force && !isInSafeMode()) { throw new IOException("Safe mode should be turned ON " + "in order to create namespace image."); } getFSImage().saveNamespace(uncompressed); } finally { writeUnlock(); } LOG.info("Saving namespace - DONE"); }
java
{ "resource": "" }
q161510
FSNamesystem.datanodeDump
train
private void datanodeDump(PrintWriter out) { readLock(); try { synchronized (datanodeMap) { out.println("Metasave: Number of datanodes: " + datanodeMap.size()); for (Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); it.hasNext();) { DatanodeDescriptor node = it.next(); out.println(node.dumpDatanode()); } } } finally { readUnlock(); } }
java
{ "resource": "" }
q161511
FSNamesystem.startDecommission
train
void startDecommission(DatanodeDescriptor node) throws IOException { if (!node.isDecommissionInProgress() && !node.isDecommissioned()) { LOG.info("Start Decommissioning node " + node.getName() + " with " + node.numBlocks() + " blocks."); synchronized (heartbeats) { updateStats(node, false); node.startDecommission(); updateStats(node, true); } if (((Monitor) dnthread.getRunnable()).startDecommision(node)) { node.setStartTime(now()); } } else if (node.isDecommissionInProgress()) { if (((Monitor) dnthread.getRunnable()).startDecommision(node)) { node.setStartTime(now()); } } }
java
{ "resource": "" }
q161512
FSNamesystem.stopDecommission
train
void stopDecommission(DatanodeDescriptor node) throws IOException { if ((node.isDecommissionInProgress() && ((Monitor) dnthread.getRunnable()).stopDecommission(node)) || node.isDecommissioned()) { LOG.info("Stop Decommissioning node " + node.getName()); synchronized (heartbeats) { updateStats(node, false); node.stopDecommission(); updateStats(node, true); } // Make sure we process over replicated blocks. writeLock(); try { Iterator<BlockInfo> it = node.getBlockIterator(); while (it.hasNext()) { Block b = it.next(); if (countLiveNodes(b) > getReplication(b)) { overReplicatedBlocks.add(b); } } } finally { writeUnlock(); } } }
java
{ "resource": "" }
q161513
FSNamesystem.countLiveNodes
train
private int countLiveNodes(Block b, Iterator<DatanodeDescriptor> nodeIter) { int live = 0; Collection<DatanodeDescriptor> nodesCorrupt = null; if (corruptReplicas.size() != 0) { nodesCorrupt = corruptReplicas.getNodes(b); } while (nodeIter.hasNext()) { DatanodeDescriptor node = nodeIter.next(); if (((nodesCorrupt != null) && (nodesCorrupt.contains(node))) || node.isDecommissionInProgress() || node.isDecommissioned()) { // do nothing } else { live++; } } return live; }
java
{ "resource": "" }
q161514
FSNamesystem.isReplicationInProgress
train
BlockInfo isReplicationInProgress(final DecommissioningStatus status, final DatanodeDescriptor srcNode, final BlockInfo block, boolean addToNeeded) { INode fileINode = blocksMap.getINode(block); if (fileINode == null) { return null; } NumberReplicas num = countNodes(block); int curReplicas = num.liveReplicas(); int curExpectedReplicas = getReplication(block); if (curExpectedReplicas > curReplicas) { if (status!= null) { //Log info about one block for this node which needs replication if (status.underReplicatedBlocks == 0) { logBlockReplicationInfo(block, srcNode, num); } status.underReplicatedBlocks++; if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) { status.decommissionOnlyReplicas++; } if (fileINode.isUnderConstruction()) { status.underReplicatedInOpenFiles++; } } if (!neededReplications.contains(block) && pendingReplications.getNumReplicas(block) == 0) { // // These blocks have been reported from the datanode // after the startDecommission method has been executed. These // blocks were in flight when the decommissioning was started. // if (addToNeeded) { neededReplications.add(block, curReplicas, num.decommissionedReplicas(), curExpectedReplicas); } else { return block; } } } return null; }
java
{ "resource": "" }
q161515
FSNamesystem.getHostNameForIp
train
private String getHostNameForIp(String ipAddr) { try { // this handles the case where the hostlist contains names // and the datanodes are using IPs InetAddress addr = InetAddress.getByName(ipAddr); return addr.getHostName(); } catch (Exception e) { // this can be safely ignored } return null; }
java
{ "resource": "" }
q161516
FSNamesystem.verifyNodeRegistration
train
private boolean verifyNodeRegistration(DatanodeRegistration nodeReg, String ipAddr) throws IOException { assert (hasWriteLock()); return inHostsList(nodeReg, ipAddr); }
java
{ "resource": "" }
q161517
FSNamesystem.checkDecommissioning
train
private void checkDecommissioning(DatanodeDescriptor nodeReg, String ipAddr) throws IOException { // If the registered node is in exclude list, then decommission it if (inExcludedHostsList(nodeReg, ipAddr)) { startDecommission(nodeReg); } }
java
{ "resource": "" }
q161518
FSNamesystem.getDatanode
train
public DatanodeDescriptor getDatanode(DatanodeID nodeID) throws IOException { UnregisteredDatanodeException e = null; DatanodeDescriptor node = datanodeMap.get(nodeID.getStorageID()); if (node == null) { return null; } if (!node.getName().equals(nodeID.getName())) { e = new UnregisteredDatanodeException(nodeID, node); NameNode.stateChangeLog.fatal("BLOCK* NameSystem.getDatanode: " + e.getLocalizedMessage()); throw e; } return node; }
java
{ "resource": "" }
q161519
FSNamesystem.incrementSafeBlockCount
train
void incrementSafeBlockCount(int replication, boolean skipCheck) { if (safeMode != null && safeMode.isOn()) { if ((int) replication == minReplication) { this.blocksSafe++; if(!skipCheck) { safeMode.checkMode(); } } } }
java
{ "resource": "" }
q161520
FSNamesystem.enterSafeMode
train
void enterSafeMode() throws IOException { writeLock(); try { // Ensure that any concurrent operations have been fully synced // before entering safe mode. This ensures that the FSImage // is entirely stable on disk as soon as we're in safe mode. getEditLog().logSyncAll(); if (!isInSafeMode()) { safeMode = SafeModeUtil.getInstance(this); safeMode.setManual(); return; } safeMode.setManual(); getEditLog().logSyncAll(); NameNode.stateChangeLog.info("STATE* Safe mode is ON. " + safeMode.getTurnOffTip()); } finally { writeUnlock(); } }
java
{ "resource": "" }
q161521
FSNamesystem.leaveSafeMode
train
void leaveSafeMode(boolean checkForUpgrades) throws SafeModeException { writeLock(); try { if (!isInSafeMode()) { NameNode.stateChangeLog.info("STATE* Safe mode is already OFF."); return; } if (getDistributedUpgradeState()) { throw new SafeModeException("Distributed upgrade is in progress", safeMode); } safeMode.leave(checkForUpgrades); safeMode = null; } finally { writeUnlock(); } }
java
{ "resource": "" }
q161522
FSNamesystem.initializeReplQueues
train
void initializeReplQueues() throws SafeModeException { writeLock(); try { if (isPopulatingReplQueues()) { NameNode.stateChangeLog.info("STATE* Safe mode is already OFF." + " Replication queues are initialized"); return; } safeMode.initializeReplicationQueues(); } finally { writeUnlock(); } }
java
{ "resource": "" }
q161523
FSNamesystem.nextGenerationStampForBlock
train
public long nextGenerationStampForBlock(Block block, boolean fromNN) throws IOException { writeLock(); try { if (isInSafeMode()) { throw new SafeModeException("Cannot get nextGenStamp for " + block, safeMode); } Block blockWithWildcardGenstamp = new Block(block.getBlockId()); BlockInfo storedBlock = blocksMap.getStoredBlock(blockWithWildcardGenstamp); if (storedBlock == null) { String msg = block + " is already commited, storedBlock == null."; LOG.info(msg); throw new BlockAlreadyCommittedException(msg); } INodeFile fileINode = storedBlock.getINode(); if (!fileINode.isUnderConstruction()) { String msg = block + " is already commited, !fileINode.isUnderConstruction()."; LOG.info(msg); throw new BlockAlreadyCommittedException(msg); } // Disallow client-initiated recovery once // NameNode initiated lease recovery starts String path = null; try { path = fileINode.getFullPathName(); } catch (IOException ioe) { throw (BlockAlreadyCommittedException) new BlockAlreadyCommittedException( block + " is already deleted").initCause(ioe); } if (!fromNN && HdfsConstants.NN_RECOVERY_LEASEHOLDER.equals( leaseManager.getLeaseByPath(path).getHolder())) { String msg = block + "is being recovered by NameNode, ignoring the request from a client"; LOG.info(msg); throw new IOException(msg); } if (!((INodeFileUnderConstruction) fileINode).setLastRecoveryTime(now())) { String msg = block + " is being recovered, ignoring this request."; LOG.info(msg); throw new IOException(msg); } return nextGenerationStamp(); } finally { writeUnlock(); } }
java
{ "resource": "" }
q161524
FSNamesystem.saveFilesUnderConstruction
train
void saveFilesUnderConstruction(SaveNamespaceContext ctx, DataOutputStream out) throws IOException { synchronized (leaseManager) { int pathsToSave = 0; Iterator<Lease> itrl = leaseManager.getSortedLeases().iterator(); while (itrl.hasNext()) { Lease lease = itrl.next(); for (String path : lease.getPaths()) { // verify that path exists in namespace INode node = dir.getFileINode(path); if (node != null && node.isUnderConstruction()) { pathsToSave++; } else if (node == null) { // ignore the path and continue. String msg = "saveLeases - counting - found path " + path + " but no matching entry in namespace."; LOG.warn(msg); continue; } else { throw new IOException("saveLeases found path " + path + " but is not under construction."); } } } if (pathsToSave != leaseManager.countPath()) { LOG.warn("Number of leases mismatch: " + pathsToSave + " are valid, lease manager indicated: " + leaseManager.countPath()); } out.writeInt(pathsToSave); // write the size int pathsSaved = 0; LightWeightLinkedSet<Lease> sortedLeases = leaseManager.getSortedLeases(); Iterator<Lease> itr = sortedLeases.iterator(); while (itr.hasNext()) { ctx.checkCancelled(); Lease lease = itr.next(); for (String path : lease.getPaths()) { // verify that path exists in namespace INode node = dir.getFileINode(path); if (node == null) { // ignore the path and continue. String msg = "saveLeases found path " + path + " but no matching entry in namespace."; LOG.warn(msg); continue; } if (!node.isUnderConstruction()) { throw new IOException("saveLeases found path " + path + " but is not under construction."); } INodeFileUnderConstruction cons = (INodeFileUnderConstruction) node; FSImageSerialization.writeINodeUnderConstruction(out, cons, path); pathsSaved++; } } if (pathsSaved != pathsToSave) { String msg = "Saved paths: " + pathsSaved + " is not equal to what we thought we would save: " + pathsToSave; LOG.error(msg); throw new IOException(msg); } } }
java
{ "resource": "" }
q161525
FSNamesystem.getLiveNodes
train
@Override // NameNodeMXBean public String getLiveNodes() { final Map<String, Map<String,Object>> info = new HashMap<String, Map<String,Object>>(); try { final ArrayList<DatanodeDescriptor> liveNodeList = new ArrayList<DatanodeDescriptor>(); final ArrayList<DatanodeDescriptor> deadNodeList = new ArrayList<DatanodeDescriptor>(); DFSNodesStatus(liveNodeList, deadNodeList); removeDecommissionedNodeFromList(liveNodeList); for (DatanodeDescriptor node : liveNodeList) { final Map<String, Object> innerinfo = new HashMap<String, Object>(); innerinfo.put("lastContact", getLastContact(node)); innerinfo.put("usedSpace", getDfsUsed(node)); innerinfo.put("adminState", node.getAdminState().toString()); innerinfo.put("excluded", this.inExcludedHostsList(node, null)); info.put(node.getHostName() + ":" + node.getPort(), innerinfo); } } catch (Exception e) { LOG.error("Exception:", e); } return JSON.toString(info); }
java
{ "resource": "" }
q161526
FSNamesystem.getDecomNodes
train
@Override // NameNodeMXBean public String getDecomNodes() { final Map<String, Map<String, Object>> info = new HashMap<String, Map<String, Object>>(); try { final ArrayList<DatanodeDescriptor> decomNodeList = this.getDecommissioningNodesList(); for (DatanodeDescriptor node : decomNodeList) { final Map<String, Object> innerinfo = new HashMap<String, Object>(); innerinfo.put("underReplicatedBlocks", node.decommissioningStatus .getUnderReplicatedBlocks()); innerinfo.put("decommissionOnlyReplicas", node.decommissioningStatus .getDecommissionOnlyReplicas()); innerinfo.put("underReplicateInOpenFiles", node.decommissioningStatus .getUnderReplicatedInOpenFiles()); info.put(node.getHostName() + ":" + node.getPort(), innerinfo); } } catch (Exception e) { LOG.error("Exception:", e); } return JSON.toString(info); }
java
{ "resource": "" }
q161527
FSNamesystem.adjustReplication
train
public short adjustReplication(short replication) { short r = (short) (replication < minReplication? minReplication : replication > maxReplication? maxReplication: replication); return r; }
java
{ "resource": "" }
q161528
StorageServiceConfigKeys.translateToOldSchema
train
@Deprecated public static URI translateToOldSchema(Configuration clusterConf, String nameserviceId) { String key = FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + nameserviceId; String value = clusterConf.get(key); if (value == null) { throw new IllegalArgumentException( "Cannot translate to old schema for nameserviceId: " + nameserviceId); } InetSocketAddress address = NetUtils.createSocketAddr(value); return NameNode.getUri(address); }
java
{ "resource": "" }
q161529
NetUtils.getSocketFactoryFromProperty
train
public static SocketFactory getSocketFactoryFromProperty( Configuration conf, String propValue) { try { Class<?> theClass = conf.getClassByName(propValue); return (SocketFactory) ReflectionUtils.newInstance(theClass, conf); } catch (ClassNotFoundException cnfe) { throw new RuntimeException("Socket Factory class not found: " + cnfe); } }
java
{ "resource": "" }
q161530
NetUtils.getServerAddress
train
@Deprecated public static String getServerAddress(Configuration conf, String oldBindAddressName, String oldPortName, String newBindAddressName) { String oldAddr = conf.get(oldBindAddressName); int oldPort = conf.getInt(oldPortName, 0); String newAddrPort = conf.get(newBindAddressName); if (oldAddr == null && oldPort == 0) { return toIpPort(createSocketAddr(newAddrPort)); } InetSocketAddress newAddr = NetUtils.createSocketAddr(newAddrPort); if (oldAddr == null) { oldAddr = newAddr.getAddress().getHostAddress(); } else { LOG.warn("Configuration parameter " + oldBindAddressName + " is deprecated. Use " + newBindAddressName + " instead."); } if (oldPort == 0) { oldPort = newAddr.getPort(); } else { LOG.warn("Configuration parameter " + oldPortName + " is deprecated. Use " + newBindAddressName + " instead."); } try { return toIpPort(oldAddr, oldPort); } catch (UnknownHostException e) { LOG.error("DNS not supported."); LOG.fatal(e); } return oldAddr + ":" + oldPort; }
java
{ "resource": "" }
q161531
NetUtils.normalizeHostNames
train
public static List<String> normalizeHostNames(Collection<String> names) { List<String> resolvedIpAddresses = new ArrayList<String>(names.size()); for (String name : names) { resolvedIpAddresses.add(normalizeHostName(name)); } return resolvedIpAddresses; }
java
{ "resource": "" }
q161532
NetUtils.isSocketBindable
train
public static void isSocketBindable(InetSocketAddress addr) throws IOException { if (addr == null) { return; } ServerSocket socket = new ServerSocket(); try { socket.bind(addr); } finally { socket.close(); } }
java
{ "resource": "" }
q161533
DirectoryStatistics.addSourceFile
train
public boolean addSourceFile(FileSystem fs, PolicyInfo info, FileStatus src, RaidState.Checker checker, long now, int targetReplication) throws IOException { List<FileStatus> lfs = RaidNode.listDirectoryRaidFileStatus( fs.getConf(), fs, src.getPath()); if (lfs == null) { return false; } RaidState state = checker.check(info, src, now, false, lfs); Counters counters = stateToSourceCounters.get(state); counters.inc(lfs); if (state == RaidState.RAIDED) { long paritySize = computeParitySize(lfs, targetReplication); estimatedParitySize += paritySize; estimatedDoneParitySize += paritySize; estimatedDoneSourceSize += DirectoryStripeReader.getDirPhysicalSize(lfs); return false; } if (state == RaidState.NOT_RAIDED_BUT_SHOULD) { estimatedDoneParitySize += computeParitySize(lfs, targetReplication); estimatedDoneSourceSize += DirectoryStripeReader.getDirPhysicalSize(lfs); return true; } return false; }
java
{ "resource": "" }
q161534
RemoteJTProxy.incrementAttemptUnprotected
train
private void incrementAttemptUnprotected() { attempt++; currentAttemptId = new TaskAttemptID(new TaskID(attemptJobId, currentAttemptId.isMap(), currentAttemptId.getTaskID().getId()), attempt); }
java
{ "resource": "" }
q161535
RemoteJTProxy.checkAttempt
train
private void checkAttempt(TaskAttemptID attemptId) throws IOException { if (!attemptId.equals(currentAttemptId)) { throw new IOException("Attempt " + attemptId + " does not match current attempt " + currentAttemptId); } }
java
{ "resource": "" }
q161536
RemoteJTProxy.initializeClientUnprotected
train
void initializeClientUnprotected(String host, int port, String sessionId) throws IOException { if (client != null) { return; } LOG.info("Creating JT client to " + host + ":" + port); long connectTimeout = RemoteJTProxy.getRemotJTTimeout(conf); int rpcTimeout = RemoteJTProxy.getRemoteJTRPCTimeout(conf); remoteJTAddr = new InetSocketAddress(host, port); client = RPC.waitForProtocolProxy( JobSubmissionProtocol.class, JobSubmissionProtocol.versionID, remoteJTAddr, conf, connectTimeout, rpcTimeout ).getProxy(); remoteJTStatus = RemoteJTStatus.SUCCESS; remoteJTHost = host; remoteJTPort = port; remoteSessionId = sessionId; if (remoteJTState != null) { remoteJTState.setSessionId(sessionId); } }
java
{ "resource": "" }
q161537
RemoteJTProxy.waitForJTStart
train
public void waitForJTStart(JobConf jobConf) throws IOException { int maxJTAttempts = jobConf.getInt( "mapred.coronajobtracker.remotejobtracker.attempts", 4); ResourceTracker resourceTracker = jt.getResourceTracker(); SessionDriver sessionDriver = jt.getSessionDriver(); List<ResourceGrant> excludeGrants = new ArrayList<ResourceGrant>(); boolean toExcludeFailed = jobConf.getBoolean(REMOTE_JT_EXCLUDE_FAILED, true); // Release and blacklist failed JT grant. if (remoteJTGrant != null) { if (toExcludeFailed) { excludeGrants.add(remoteJTGrant); } resourceTracker.releaseResource(remoteJTGrant.getId()); sessionDriver.releaseResources(resourceTracker.getResourcesToRelease()); } for (int i = 0; i < maxJTAttempts; i++) { try { remoteJTGrant = waitForJTGrant(resourceTracker, sessionDriver, excludeGrants); boolean success = startRemoteJT(jobConf, remoteJTGrant); if (success) { return; } else { excludeGrants.add(remoteJTGrant); resourceTracker.releaseResource(remoteJTGrant.getId()); List<ResourceRequest> released = resourceTracker.getResourcesToRelease(); sessionDriver.releaseResources(released); } } catch (InterruptedException e) { throw new IOException(e); } } throw new IOException("Could not start remote JT after " + maxJTAttempts + " attempts"); }
java
{ "resource": "" }
q161538
RemoteJTProxy.waitForJTGrant
train
private ResourceGrant waitForJTGrant( ResourceTracker resourceTracker, SessionDriver sessionDriver, List<ResourceGrant> previousGrants) throws IOException, InterruptedException { LOG.info("Waiting for JT grant for " + attemptJobId); ResourceRequest req = resourceTracker.newJobTrackerRequest(); for (ResourceGrant prev: previousGrants) { LOG.info("Adding " + prev.getNodeName() + " to excluded hosts"); req.addToExcludeHosts(prev.getAddress().getHost()); } resourceTracker.recordRequest(req); List<ResourceRequest> newRequests = resourceTracker.getWantedResources(); sessionDriver.requestResources(newRequests); final List<ResourceGrant> grants = new ArrayList<ResourceGrant>(); ResourceTracker.ResourceProcessor proc = new ResourceTracker.ResourceProcessor() { @Override public boolean processAvailableResource(ResourceGrant resource) { grants.add(resource); final boolean consumed = true; return consumed; } }; while (true) { // Try to get JT grant while periodically checking for session driver // exceptions. long timeout = 60 * 1000; // 1 min. resourceTracker.processAvailableGrants(proc, 1, timeout); IOException e = sessionDriver.getFailed(); if (e != null) { throw e; } if (!grants.isEmpty()) { return grants.get(0); } } }
java
{ "resource": "" }
q161539
RemoteJTProxy.startRemoteJT
train
private boolean startRemoteJT( JobConf jobConf, ResourceGrant grant) throws InterruptedException { org.apache.hadoop.corona.InetAddress ttAddr = Utilities.appInfoToAddress(grant.appInfo); CoronaTaskTrackerProtocol coronaTT = null; try { coronaTT = jt.getTaskTrackerClient(ttAddr.getHost(), ttAddr.getPort()); } catch (IOException e) { LOG.error("Error while trying to connect to TT at " + ttAddr.getHost() + ":" + ttAddr.getPort(), e); return false; } LOG.warn("Starting remote JT for " + attemptJobId + " on " + ttAddr.getHost()); // Get a special map id for the JT task. Path systemDir = new Path(jt.getSystemDir()); LOG.info("startRemoteJT:systemDir "+systemDir.toString()); String jobFile = CoronaJobInProgress.getJobFile(systemDir, attemptJobId) .toString(); LOG.info("startRemoteJT:jobFile " + jobFile); String splitClass = JobClient.RawSplit.class.getName(); BytesWritable split = new BytesWritable(); Task jobTask = new MapTask( jobFile, currentAttemptId, currentAttemptId.getTaskID().getId(), splitClass, split, 1, jobConf.getUser()); CoronaSessionInfo info = new CoronaSessionInfo(jt.getSessionId(), jt.getJobTrackerAddress(), jt.getJobTrackerAddress()); synchronized (this) { try { coronaTT.startCoronaJobTracker(jobTask, info); } catch (IOException e) { // Increment the attempt so that the older attempt will get an error // in reportRemoteCoronaJobTracker(). incrementAttemptUnprotected(); LOG.error("Error while performing RPC to TT at " + ttAddr.getHost() + ":" + ttAddr.getPort(), e); return false; } } // Now wait for the remote CJT to report its address. final long waitStart = System.currentTimeMillis(); final long timeout = RemoteJTProxy.getRemotJTTimeout(jobConf); synchronized (this) { while (client == null) { LOG.warn("Waiting for remote JT to start on " + ttAddr.getHost()); this.wait(1000); if (client == null && System.currentTimeMillis() - waitStart > timeout) { // Increment the attempt so that the older attempt will get an error // in reportRemoteCoronaJobTracker(). incrementAttemptUnprotected(); LOG.warn("Could not start remote JT on " + ttAddr.getHost()); return false; } } } return true; }
java
{ "resource": "" }
q161540
RemoteJTProxy.close
train
public void close() { clientLock.writeLock().lock(); try { if (client != null) { RPC.stopProxy(client); client = null; } } finally { clientLock.writeLock().unlock(); } }
java
{ "resource": "" }
q161541
RemoteJTProxy.checkClient
train
private JobSubmissionProtocol checkClient() throws IOException { synchronized (this) { while (client == null) { try { if (remoteJTStatus == RemoteJTStatus.FAILURE) { throw new IOException("Remote Job Tracker is not available"); } this.wait(1000); } catch (InterruptedException e) { throw new IOException(e); } } return client; } }
java
{ "resource": "" }
q161542
TaskMemoryManagerThread.isProcessTreeOverLimit
train
boolean isProcessTreeOverLimit(String tId, long currentMemUsage, long curMemUsageOfAgedProcesses, long limit) { boolean isOverLimit = false; if (currentMemUsage > (2*limit)) { LOG.warn("Process tree for task: " + tId + " running over twice " + "the configured limit. Limit=" + limit + ", current usage = " + currentMemUsage); isOverLimit = true; } else if (curMemUsageOfAgedProcesses > limit) { LOG.warn("Process tree for task: " + tId + " has processes older than 1 " + "iteration running over the configured limit. Limit=" + limit + ", current usage = " + curMemUsageOfAgedProcesses); isOverLimit = true; } return isOverLimit; }
java
{ "resource": "" }
q161543
TaskMemoryManagerThread.isProcessTreeOverLimit
train
boolean isProcessTreeOverLimit(ProcfsBasedProcessTree pTree, String tId, long limit) { long currentMemUsage = pTree.getCumulativeVmem(); // as processes begin with an age 1, we want to see if there are processes // more than 1 iteration old. long curMemUsageOfAgedProcesses = pTree.getCumulativeVmem(1); return isProcessTreeOverLimit(tId, currentMemUsage, curMemUsageOfAgedProcesses, limit); }
java
{ "resource": "" }
q161544
TaskMemoryManagerThread.getTaskCumulativeRssmem
train
private long getTaskCumulativeRssmem(TaskAttemptID tid) { ProcessTreeInfo ptInfo = processTreeInfoMap.get(tid); ProcfsBasedProcessTree pTree = ptInfo.getProcessTree(); return pTree == null ? 0 : pTree.getCumulativeVmem(); }
java
{ "resource": "" }
q161545
TaskMemoryManagerThread.failTasksWithMaxRssMemory
train
private void failTasksWithMaxRssMemory( long rssMemoryInUsage, long availableRssMemory) { List<TaskAttemptID> tasksToKill = new ArrayList<TaskAttemptID>(); List<TaskAttemptID> allTasks = new ArrayList<TaskAttemptID>(); allTasks.addAll(processTreeInfoMap.keySet()); // Sort the tasks descendingly according to RSS memory usage Collections.sort(allTasks, new Comparator<TaskAttemptID>() { @Override public int compare(TaskAttemptID tid1, TaskAttemptID tid2) { return getTaskCumulativeRssmem(tid2) > getTaskCumulativeRssmem(tid1) ? 1 : -1; }}); long rssMemoryStillInUsage = rssMemoryInUsage; long availableRssMemoryAfterKilling = availableRssMemory; // Fail the tasks one by one until the memory requirement is met while ((rssMemoryStillInUsage > maxRssMemoryAllowedForAllTasks || availableRssMemoryAfterKilling < reservedRssMemory) && !allTasks.isEmpty()) { TaskAttemptID tid = allTasks.remove(0); if (!isKillable(tid)) { continue; } long rssmem = getTaskCumulativeRssmem(tid); if (rssmem == 0) { break; // Skip tasks without process tree information currently } tasksToKill.add(tid); rssMemoryStillInUsage -= rssmem; availableRssMemoryAfterKilling += rssmem; } // Now kill the tasks. if (!tasksToKill.isEmpty()) { for (TaskAttemptID tid : tasksToKill) { long taskMemoryLimit = getTaskMemoryLimit(tid); long taskMemory = getTaskCumulativeRssmem(tid); String pid = processTreeInfoMap.get(tid).getPID(); String msg = HIGH_MEMORY_KEYWORD + " task:" + tid + " pid:" + pid + " taskMemory:" + taskMemory + " taskMemoryLimit:" + taskMemoryLimit + " availableMemory:" + availableRssMemory + " totalMemory:" + rssMemoryInUsage + " totalMemoryLimit:" + maxRssMemoryAllowedForAllTasks; if (taskMemory > taskMemoryLimit) { msg = "Failing " + msg; LOG.warn(msg); killTask(tid, msg, true); } else { msg = "Killing " + msg; LOG.warn(msg); killTask(tid, msg, false); } } } else { LOG.error("The total physical memory usage is overflowing TTs limits. " + "But found no alive task to kill for freeing memory."); } }
java
{ "resource": "" }
q161546
ObjectWritable.prepareCachedNameBytes
train
public static byte[] prepareCachedNameBytes(String entityName) { UTF8 name = new UTF8(); name.set(entityName, true); byte nameBytes[] = name.getBytes(); byte cachedName[] = new byte[nameBytes.length + 2]; System.arraycopy(nameBytes, 0, cachedName, 2, nameBytes.length); // we cache the length as well int v = nameBytes.length; cachedName[0] = (byte)((v >>> 8) & 0xFF); cachedName[1] = (byte)((v >>> 0) & 0xFF); return cachedName; }
java
{ "resource": "" }
q161547
ObjectWritable.getClassWithCaching
train
private static Class<?> getClassWithCaching(String className, Configuration conf) { Class<?> classs = cachedClassObjects.get(className); if (classs == null) { try { classs = conf.getClassByName(className); if (cachedClassObjects.size() < CACHE_MAX_SIZE) { cachedClassObjects.put(className, classs); } } catch (ClassNotFoundException e) { throw new RuntimeException("readObject can't find class " + className, e); } } // for sanity check if the class is not null if (classs == null) { throw new RuntimeException("readObject can't find class " + className); } return classs; }
java
{ "resource": "" }
q161548
FileStatusExtended.blocksEquals
train
private boolean blocksEquals(Block[] a1, Block[] a2, boolean closedFile) { if (a1 == a2) return true; if (a1 == null || a2 == null || a2.length != a1.length) return false; for (int i = 0; i < a1.length; i++) { Block b1 = a1[i]; Block b2 = a2[i]; if (b1 == b2) continue; if (b1 == null || b2 == null) return false; // compare ids and gen stamps if (!(b1.getBlockId() == b2.getBlockId() && b1.getGenerationStamp() == b2 .getGenerationStamp())) return false; // for open files check len-2 blocks only if (!closedFile && i >= a1.length - 2) continue; // check block size if (b1.getNumBytes() != b2.getNumBytes()) return false; } return true; }
java
{ "resource": "" }
q161549
Application.abort
train
void abort(Throwable t) throws IOException { LOG.info("Aborting because of " + StringUtils.stringifyException(t)); try { downlink.abort(); downlink.flush(); } catch (IOException e) { // IGNORE cleanup problems } try { handler.waitForFinish(); } catch (Throwable ignored) { process.destroy(); } IOException wrapper = new IOException("pipe child exception"); wrapper.initCause(t); throw wrapper; }
java
{ "resource": "" }
q161550
Application.cleanup
train
void cleanup() throws IOException { serverSocket.close(); try { downlink.close(); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); } }
java
{ "resource": "" }
q161551
Application.runClient
train
static Process runClient(List<String> command, Map<String, String> env) throws IOException { ProcessBuilder builder = new ProcessBuilder(command); if (env != null) { builder.environment().putAll(env); } Process result = builder.start(); return result; }
java
{ "resource": "" }
q161552
SortedRanges.add
train
synchronized void add(Range range){ if(range.isEmpty()) { return; } long startIndex = range.getStartIndex(); long endIndex = range.getEndIndex(); //make sure that there are no overlapping ranges SortedSet<Range> headSet = ranges.headSet(range); if(headSet.size()>0) { Range previousRange = headSet.last(); LOG.debug("previousRange "+previousRange); if(startIndex<previousRange.getEndIndex()) { //previousRange overlaps this range //remove the previousRange if(ranges.remove(previousRange)) { indicesCount-=previousRange.getLength(); } //expand this range startIndex = previousRange.getStartIndex(); endIndex = endIndex>=previousRange.getEndIndex() ? endIndex : previousRange.getEndIndex(); } } Iterator<Range> tailSetIt = ranges.tailSet(range).iterator(); while(tailSetIt.hasNext()) { Range nextRange = tailSetIt.next(); LOG.debug("nextRange "+nextRange +" startIndex:"+startIndex+ " endIndex:"+endIndex); if(endIndex>=nextRange.getStartIndex()) { //nextRange overlaps this range //remove the nextRange tailSetIt.remove(); indicesCount-=nextRange.getLength(); if(endIndex<nextRange.getEndIndex()) { //expand this range endIndex = nextRange.getEndIndex(); break; } } else { break; } } add(startIndex,endIndex); }
java
{ "resource": "" }
q161553
SimulatorJobStoryProducer.getNextJobFiltered
train
private JobStory getNextJobFiltered() throws IOException { while (true) { ZombieJob job = producer.getNextJob(); if (job == null) { return null; } if (job.getOutcome() == Pre21JobHistoryConstants.Values.KILLED) { continue; } if (job.getNumberMaps() == 0) { continue; } if (job.getNumLoggedMaps() == 0) { continue; } return job; } }
java
{ "resource": "" }
q161554
DancingLinks.addColumn
train
public void addColumn(ColumnName name, boolean primary) { ColumnHeader<ColumnName> top = new ColumnHeader<ColumnName>(name, 0); top.up = top; top.down = top; if (primary) { Node<ColumnName> tail = head.left; tail.right = top; top.left = tail; top.right = head; head.left = top; } else { top.left = top; top.right = top; } columns.add(top); }
java
{ "resource": "" }
q161555
DancingLinks.addRow
train
public void addRow(boolean[] values) { Node<ColumnName> prev = null; for(int i=0; i < values.length; ++i) { if (values[i]) { ColumnHeader<ColumnName> top = columns.get(i); top.size += 1; Node<ColumnName> bottom = top.up; Node<ColumnName> node = new Node<ColumnName>(null, null, bottom, top, top); bottom.down = node; top.up = node; if (prev != null) { Node<ColumnName> front = prev.right; node.left = prev; node.right = front; prev.right = node; front.left = node; } else { node.left = node; node.right = node; } prev = node; } } }
java
{ "resource": "" }
q161556
DancingLinks.findBestColumn
train
private ColumnHeader<ColumnName> findBestColumn() { int lowSize = Integer.MAX_VALUE; ColumnHeader<ColumnName> result = null; ColumnHeader<ColumnName> current = (ColumnHeader<ColumnName>) head.right; while (current != head) { if (current.size < lowSize) { lowSize = current.size; result = current; } current = (ColumnHeader<ColumnName>) current.right; } return result; }
java
{ "resource": "" }
q161557
DancingLinks.coverColumn
train
private void coverColumn(ColumnHeader<ColumnName> col) { LOG.debug("cover " + col.head.name); // remove the column col.right.left = col.left; col.left.right = col.right; Node<ColumnName> row = col.down; while (row != col) { Node<ColumnName> node = row.right; while (node != row) { node.down.up = node.up; node.up.down = node.down; node.head.size -= 1; node = node.right; } row = row.down; } }
java
{ "resource": "" }
q161558
DancingLinks.getRowName
train
private List<ColumnName> getRowName(Node<ColumnName> row) { List<ColumnName> result = new ArrayList<ColumnName>(); result.add(row.head.name); Node<ColumnName> node = row.right; while (node != row) { result.add(node.head.name); node = node.right; } return result; }
java
{ "resource": "" }
q161559
DancingLinks.search
train
private int search(List<Node<ColumnName>> partial, SolutionAcceptor<ColumnName> output) { int results = 0; if (head.right == head) { List<List<ColumnName>> result = new ArrayList<List<ColumnName>>(partial.size()); for(Node<ColumnName> row: partial) { result.add(getRowName(row)); } output.solution(result); results += 1; } else { ColumnHeader<ColumnName> col = findBestColumn(); if (col.size > 0) { coverColumn(col); Node<ColumnName> row = col.down; while (row != col) { partial.add(row); Node<ColumnName> node = row.right; while (node != row) { coverColumn(node.head); node = node.right; } results += search(partial, output); partial.remove(partial.size() - 1); node = row.left; while (node != row) { uncoverColumn(node.head); node = node.left; } row = row.down; } uncoverColumn(col); } } return results; }
java
{ "resource": "" }
q161560
DancingLinks.searchPrefixes
train
private void searchPrefixes(int depth, int[] choices, List<int[]> prefixes) { if (depth == 0) { prefixes.add(choices.clone()); } else { ColumnHeader<ColumnName> col = findBestColumn(); if (col.size > 0) { coverColumn(col); Node<ColumnName> row = col.down; int rowId = 0; while (row != col) { Node<ColumnName> node = row.right; while (node != row) { coverColumn(node.head); node = node.right; } choices[choices.length - depth] = rowId; searchPrefixes(depth - 1, choices, prefixes); node = row.left; while (node != row) { uncoverColumn(node.head); node = node.left; } row = row.down; rowId += 1; } uncoverColumn(col); } } }
java
{ "resource": "" }
q161561
DancingLinks.split
train
public List<int[]> split(int depth) { int[] choices = new int[depth]; List<int[]> result = new ArrayList<int[]>(100000); searchPrefixes(depth, choices, result); return result; }
java
{ "resource": "" }
q161562
DancingLinks.advance
train
private Node<ColumnName> advance(int goalRow) { ColumnHeader<ColumnName> col = findBestColumn(); if (col.size > 0) { coverColumn(col); Node<ColumnName> row = col.down; int id = 0; while (row != col) { if (id == goalRow) { Node<ColumnName> node = row.right; while (node != row) { coverColumn(node.head); node = node.right; } return row; } id += 1; row = row.down; } } return null; }
java
{ "resource": "" }
q161563
DancingLinks.rollback
train
private void rollback(Node<ColumnName> row) { Node<ColumnName> node = row.left; while (node != row) { uncoverColumn(node.head); node = node.left; } uncoverColumn(row.head); }
java
{ "resource": "" }
q161564
DancingLinks.solve
train
public int solve(int[] prefix, SolutionAcceptor<ColumnName> output) { List<Node<ColumnName>> choices = new ArrayList<Node<ColumnName>>(); for(int i=0; i < prefix.length; ++i) { choices.add(advance(prefix[i])); } int result = search(choices, output); for(int i=prefix.length-1; i >=0; --i) { rollback(choices.get(i)); } return result; }
java
{ "resource": "" }
q161565
QuorumCall.waitFor
train
public synchronized void waitFor( int minResponses, int minSuccesses, int maxExceptions, int millis, String operationName) throws InterruptedException, TimeoutException { long st = monotonicNow(); long nextLogTime = st + (long)(millis * WAIT_PROGRESS_INFO_THRESHOLD); long et = st + millis; while (true) { checkAssertionErrors(); if (minResponses > 0 && countResponses() >= minResponses) return; if (minSuccesses > 0 && countSuccesses() >= minSuccesses) return; if ((maxExceptions > 0 && countExceptions() >= maxExceptions) || (maxExceptions == 0 && countExceptions() > 0)) { return; } long now = monotonicNow(); if (now > nextLogTime) { long waited = now - st; String msg = String.format( "Waited %s ms (timeout=%s ms) for a response for %s", waited, millis, operationName); if (!successes.isEmpty()) { msg += ". Succeeded so far: [" + Joiner.on(",").join(successes.keySet()) + "]"; } if (!exceptions.isEmpty()) { msg += ". Exceptions so far: [" + mapToString(exceptions) + "]"; } if (successes.isEmpty() && exceptions.isEmpty()) { msg += ". No responses yet."; } if (waited > millis * WAIT_PROGRESS_WARN_THRESHOLD) { QuorumJournalManager.LOG.warn(msg); } else { QuorumJournalManager.LOG.info(msg); } nextLogTime = now + WAIT_PROGRESS_INTERVAL_MILLIS; } long rem = et - now; if (rem <= 0) { throw new TimeoutException(); } rem = Math.min(rem, nextLogTime - now); rem = Math.max(rem, 1); wait(rem); } }
java
{ "resource": "" }
q161566
QuorumCall.checkAssertionErrors
train
private synchronized void checkAssertionErrors() { boolean assertsEnabled = false; assert assertsEnabled = true; // sets to true if enabled if (assertsEnabled) { for (Throwable t : exceptions.values()) { if (t instanceof AssertionError) { throw (AssertionError)t; } else if (t instanceof RemoteException && ((RemoteException)t).getClassName().equals( AssertionError.class.getName())) { throw new AssertionError(t); } } } }
java
{ "resource": "" }
q161567
QuorumCall.throwQuorumException
train
public synchronized void throwQuorumException(String msg) throws QuorumException { Preconditions.checkState(!exceptions.isEmpty()); throw QuorumException.create(msg, successes, exceptions); }
java
{ "resource": "" }
q161568
JobSubmitter.add
train
public void add(final GridmixJob job) throws InterruptedException { final boolean addToQueue = !shutdown; if (addToQueue) { final SubmitTask task = new SubmitTask(job); sem.acquire(); try { sched.execute(task); } catch (RejectedExecutionException e) { sem.release(); } } }
java
{ "resource": "" }
q161569
JobSubmitter.join
train
public void join(long millis) throws InterruptedException { if (!shutdown) { throw new IllegalStateException("Cannot wait for active submit thread"); } sched.awaitTermination(millis, TimeUnit.MILLISECONDS); }
java
{ "resource": "" }
q161570
JMXGet.printAllValues
train
public void printAllValues() throws Exception { err("List of all the available keys:"); Object val = null; for (ObjectName oname: hadoopObjectNames) { err(">>>>>>>>jmx name: " + oname.getCanonicalKeyPropertyListString()); MBeanInfo mbinfo = mbsc.getMBeanInfo(oname); MBeanAttributeInfo [] mbinfos = mbinfo.getAttributes(); for (MBeanAttributeInfo mb: mbinfos) { val = mbsc.getAttribute(oname, mb.getName()); System.out.format(format,mb.getName(),val.toString()); } } }
java
{ "resource": "" }
q161571
JMXGet.getValue
train
public String getValue(String key) throws Exception{ Object val = null; for (ObjectName oname: hadoopObjectNames) { try { val = mbsc.getAttribute(oname, key); } catch (AttributeNotFoundException anfe) { /*just go to the next */ continue; } catch(ReflectionException re) { if (re.getCause() instanceof NoSuchMethodException) { continue; } } err("Info: key = " + key + "; val = " + val); break; } return (val == null) ? null : val.toString(); }
java
{ "resource": "" }
q161572
DirectoryStripeReader.getDirLogicalSize
train
public static long getDirLogicalSize(List<FileStatus> lfs) { long totalSize = 0L; if (null == lfs) { return totalSize; } for (FileStatus fsStat : lfs) { totalSize += fsStat.getLen(); } return totalSize; }
java
{ "resource": "" }
q161573
DirectoryStripeReader.getDirPhysicalSize
train
public static long getDirPhysicalSize(List<FileStatus> lfs) { long totalSize = 0L; if (null == lfs) { return totalSize; } for (FileStatus fsStat : lfs) { totalSize += fsStat.getLen() * fsStat.getReplication(); } return totalSize; }
java
{ "resource": "" }
q161574
BinaryRecordOutput.get
train
public static BinaryRecordOutput get(DataOutput out) { BinaryRecordOutput bout = (BinaryRecordOutput) bOut.get(); bout.setDataOutput(out); return bout; }
java
{ "resource": "" }
q161575
LuceneUtil.isSegmentsFile
train
public static boolean isSegmentsFile(String name) { return name.startsWith(IndexFileNames.SEGMENTS) && !name.equals(IndexFileNames.SEGMENTS_GEN); }
java
{ "resource": "" }
q161576
LuceneUtil.generationFromSegmentsFileName
train
public static long generationFromSegmentsFileName(String fileName) { if (fileName.equals(IndexFileNames.SEGMENTS)) { return 0; } else if (fileName.startsWith(IndexFileNames.SEGMENTS)) { return Long.parseLong( fileName.substring(1 + IndexFileNames.SEGMENTS.length()), Character.MAX_RADIX); } else { throw new IllegalArgumentException("fileName \"" + fileName + "\" is not a segments file"); } }
java
{ "resource": "" }
q161577
SimpleSeekableFormatInputStream.createInterleavedInputStream
train
protected InterleavedInputStream createInterleavedInputStream(InputStream in, int metaDataBlockLength, int dataBlockLength, SimpleSeekableFormat.MetaDataConsumer consumer) { return new InterleavedInputStream(in, metaDataBlockLength, dataBlockLength, consumer); }
java
{ "resource": "" }
q161578
SimpleSeekableFormatInputStream.moveToNextDataSegment
train
private boolean moveToNextDataSegment() throws IOException { try { clearDataSegment(); DataSegmentReader dataSegmentReader = new DataSegmentReader(dataIn, conf, decompressorCache); dataSegmentIn = dataSegmentReader.getInputStream(); } catch (EmptyDataSegmentException e){ // no data available return false; } catch (EOFException e) { // EOFException is thrown when the underlying data stream is truncated, e.g. truncated file. // This is considered as a normal case. throw new CodecPrematureEOFException("Truncated .SSF file detected."); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } return true; }
java
{ "resource": "" }
q161579
SimpleSeekableFormatInputStream.seekForward
train
public long seekForward() throws IOException { // Try to read the last metadata block interleavedIn.skipToLastAvailableMetaDataBlock(); if (!interleavedIn.readMetaDataIfNeeded()) { throw new EOFException("Cannot get a complete metadata block"); } // Move the interleavedIn to the beginning of a dataSegment SortedMap<Long, Long> offsetPairs = metaData.getOffsetPairs(); // The last key in the offsetPair points to the farthest position that we can seek to. long uncompressedDataOffset = offsetPairs.lastKey(); long compressedDataOffset = offsetPairs.get(uncompressedDataOffset); long toSkip = compressedDataOffset - interleavedIn.getDataOffset(); if (toSkip < 0) { throw new CorruptedDataException("SSF format error: The last offset pair is before the current position in InterleaveStream!"); } try { interleavedIn.skipExactly(toSkip); } catch (EOFException e) { // Ignore this exception // This is the PTail use case. We don't care about this CodecPrematureEOFException } clearDataSegment(); return uncompressedDataOffset; }
java
{ "resource": "" }
q161580
BookKeeperEditLogInputStream.refresh
train
@Override public void refresh(long position, long skippedUntilTxid) throws IOException { checkInitialized(); if (isInProgress()) { // If a ledger is in progress, re-open it for reading in order // to determine the correct bounds of the ledger. LedgerHandle ledger = ledgerProvider.openForReading(ledgerId); journalInputStream.resetLedger(ledger); } // Try to set the underlying stream to the specified position journalInputStream.position(position); // Reload the position tracker and log reader to adjust to the newly // refreshed position bin = new BufferedInputStream(journalInputStream); tracker = new PositionTrackingInputStream(bin, position); DataInputStream in = new DataInputStream(tracker); if (position == 0) { // If we are at the beginning, re-read the version logVersion = readLogVersion(in); } reader = new Reader(in, logVersion); }
java
{ "resource": "" }
q161581
DFSLocatedBlocks.blockLocationInfoExpiresIfNeeded
train
public void blockLocationInfoExpiresIfNeeded() { if (blkLocInfoExpireTimeout < 0) { return; } long timeNow = System.currentTimeMillis(); if (timeBlkLocInfoExpire < timeNow) { this.writeLock(); try { long newTimeBlockExpire = Long.MAX_VALUE; List<LocatedBlock> listToRemove = new ArrayList<LocatedBlock>(); for (LocatedBlock lb : blkLocInfoExpireMap.keySet()) { long expireTime = blkLocInfoExpireMap.get(lb); if (expireTime < timeNow) { if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Expire cached block location for " + lb); } listToRemove.add(lb); } else if (expireTime < newTimeBlockExpire) { newTimeBlockExpire = expireTime; } else { } } super.getLocatedBlocks().removeAll(listToRemove); for (LocatedBlock lb : listToRemove) { blkLocInfoExpireMap.remove(lb); } this.timeBlkLocInfoExpire = newTimeBlockExpire; } finally { this.writeUnlock(); } } }
java
{ "resource": "" }
q161582
DFSLocatedBlocks.initBlkLocInfoExpireMap
train
private void initBlkLocInfoExpireMap(long expireTime) { if (blkLocInfoExpireTimeout < 0) { return; } this.blkLocInfoExpireMap = new HashMap<LocatedBlock, Long>(this .getLocatedBlocks().size()); for (LocatedBlock lb : this.getLocatedBlocks()) { blkLocInfoExpireMap.put(lb, expireTime); } timeBlkLocInfoExpire = expireTime; }
java
{ "resource": "" }
q161583
DFSLocatedBlocks.isUnderConstructionBlock
train
public boolean isUnderConstructionBlock(Block block) { if (!isUnderConstruction()) { return false; } LocatedBlock lastBlock = this.get(this.locatedBlockCount() - 1); // There are potential inconsistency when counting the size of the // last block, but fileLength is not likely to be under-estimated // the size, unless the last block size is 0. if ((this.fileLength <= lastBlock.getStartOffset() + lastBlock.getBlockSize()) && lastBlock.getBlock().equals(block)) { return true; } return false; }
java
{ "resource": "" }
q161584
Counters.findCounter
train
public synchronized Counter findCounter(String group, String name) { return getGroup(group).getCounterForName(name); }
java
{ "resource": "" }
q161585
Counters.findCounter
train
@Deprecated public synchronized Counter findCounter(String group, int id, String name) { return getGroup(group).getCounterForName(name); }
java
{ "resource": "" }
q161586
Counters.incrCounter
train
public synchronized void incrCounter(String group, String counter, long amount) { getGroup(group).getCounterForName(counter).increment(amount); }
java
{ "resource": "" }
q161587
Counters.sum
train
public static Counters sum(Counters a, Counters b) { Counters counters = new Counters(); counters.incrAllCounters(a); counters.incrAllCounters(b); return counters; }
java
{ "resource": "" }
q161588
Counters.log
train
public void log(Log log) { log.info("Counters: " + size()); for(Group group: this) { log.info(" " + group.getDisplayName()); for (Counter counter: group) { log.info(" " + counter.getDisplayName() + "=" + counter.getCounter()); } } }
java
{ "resource": "" }
q161589
Counters.makeCompactString
train
public synchronized String makeCompactString() { StringBuffer buffer = new StringBuffer(); boolean first = true; for(Group group: this){ for(Counter counter: group) { if (first) { first = false; } else { buffer.append(','); } buffer.append(group.getDisplayName()); buffer.append('.'); buffer.append(counter.getDisplayName()); buffer.append(':'); buffer.append(counter.getCounter()); } } return buffer.toString(); }
java
{ "resource": "" }
q161590
Counters.makeJsonString
train
public synchronized String makeJsonString() { Map<String, Map<String, Long>> data = new HashMap<String, Map<String, Long>>(); for (Group group : this) { Map<String, Long> groupData = new HashMap<String, Long>(); data.put(group.getDisplayName(), groupData); for (Counter counter : group) { groupData.put(counter.getDisplayName(), counter.getCounter()); } } return JSON.toString(data); }
java
{ "resource": "" }
q161591
Counters.makeEscapedCompactString
train
public synchronized String makeEscapedCompactString() { StringBuffer buffer = new StringBuffer(); for(Group group: this){ buffer.append(group.makeEscapedCompactString()); } return buffer.toString(); }
java
{ "resource": "" }
q161592
Counters.getBlock
train
private static String getBlock(String str, char open, char close, IntWritable index) throws ParseException { StringBuilder split = new StringBuilder(); int next = StringUtils.findNext(str, open, StringUtils.ESCAPE_CHAR, index.get(), split); split.setLength(0); // clear the buffer if (next >= 0) { ++next; // move over '(' next = StringUtils.findNext(str, close, StringUtils.ESCAPE_CHAR, next, split); if (next >= 0) { ++next; // move over ')' index.set(next); return split.toString(); // found a block } else { throw new ParseException("Unexpected end of block", next); } } return null; // found nothing }
java
{ "resource": "" }
q161593
FileOutputFormat.getUniqueFile
train
public synchronized static String getUniqueFile(TaskAttemptContext context, String name, String extension) { TaskID taskId = context.getTaskAttemptID().getTaskID(); int partition = taskId.getId(); StringBuilder result = new StringBuilder(); result.append(name); result.append('-'); result.append(taskId.isMap() ? 'm' : 'r'); result.append('-'); result.append(NUMBER_FORMAT.format(partition)); result.append(extension); return result.toString(); }
java
{ "resource": "" }
q161594
FileOutputFormat.getDefaultWorkFile
train
public Path getDefaultWorkFile(TaskAttemptContext context, String extension) throws IOException{ FileOutputCommitter committer = (FileOutputCommitter) getOutputCommitter(context); return new Path(committer.getWorkPath(), getUniqueFile(context, "part", extension)); }
java
{ "resource": "" }
q161595
ValueAggregatorCombiner.reduce
train
public void reduce(Text key, Iterator<Text> values, OutputCollector<Text, Text> output, Reporter reporter) throws IOException { String keyStr = key.toString(); int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR); String type = keyStr.substring(0, pos); ValueAggregator aggregator = ValueAggregatorBaseDescriptor .generateValueAggregator(type); while (values.hasNext()) { aggregator.addNextValue(values.next()); } Iterator outputs = aggregator.getCombinerOutput().iterator(); while (outputs.hasNext()) { Object v = outputs.next(); if (v instanceof Text) { output.collect(key, (Text)v); } else { output.collect(key, new Text(v.toString())); } } }
java
{ "resource": "" }
q161596
MemoryMatcher.getMemReservedForTasks
train
synchronized Long getMemReservedForTasks( TaskTrackerStatus taskTracker, TaskType taskType) { long vmem = 0; for (TaskStatus task : taskTracker.getTaskReports()) { // the following task states are one in which the slot is // still occupied and hence memory of the task should be // accounted in used memory. if ((task.getRunState() == TaskStatus.State.RUNNING) || (task.getRunState() == TaskStatus.State.UNASSIGNED) || (task.inTaskCleanupPhase())) { // Get the memory "allotted" for this task based on number of slots long myVmem = 0; if (task.getIsMap() && taskType == TaskType.MAP) { long memSizePerMapSlot = scheduler.getMemSizeForMapSlot(); myVmem = memSizePerMapSlot * task.getNumSlots(); } else if (!task.getIsMap() && taskType == TaskType.REDUCE) { long memSizePerReduceSlot = scheduler.getMemSizeForReduceSlot(); myVmem = memSizePerReduceSlot * task.getNumSlots(); } vmem += myVmem; } } return Long.valueOf(vmem); }
java
{ "resource": "" }
q161597
MemoryMatcher.matchesMemoryRequirements
train
boolean matchesMemoryRequirements(JobInProgress job,TaskType taskType, TaskTrackerStatus taskTracker) { LOG.debug("Matching memory requirements of " + job.getJobID().toString() + " for scheduling on " + taskTracker.trackerName); if (!isSchedulingBasedOnMemEnabled()) { LOG.debug("Scheduling based on job's memory requirements is disabled." + " Ignoring any value set by job."); return true; } Long memUsedOnTT = getMemReservedForTasks(taskTracker, taskType); long totalMemUsableOnTT = 0; long memForThisTask = 0; if (taskType == TaskType.MAP) { memForThisTask = job.getJobConf().getMemoryForMapTask(); totalMemUsableOnTT = scheduler.getMemSizeForMapSlot() * taskTracker.getMaxMapSlots(); } else if (taskType == TaskType.REDUCE) { memForThisTask = job.getJobConf().getMemoryForReduceTask(); totalMemUsableOnTT = scheduler.getMemSizeForReduceSlot() * taskTracker.getMaxReduceSlots(); } long freeMemOnTT = totalMemUsableOnTT - memUsedOnTT.longValue(); if (memForThisTask > freeMemOnTT) { if (LOG.isDebugEnabled()) { LOG.debug("memForThisTask (" + memForThisTask + ") > freeMemOnTT (" + freeMemOnTT + "). A " + taskType + " task from " + job.getJobID().toString() + " cannot be scheduled on TT " + taskTracker.trackerName); } return false; } if (LOG.isDebugEnabled()) { LOG.debug("memForThisTask = " + memForThisTask + ". freeMemOnTT = " + freeMemOnTT + ". A " + taskType.toString() + " task from " + job.getJobID().toString() + " matches memory requirements " + "on TT "+ taskTracker.trackerName); } return true; }
java
{ "resource": "" }
q161598
LocalDirAllocator.getLocalPathToRead
train
public Path getLocalPathToRead(String pathStr, Configuration conf) throws IOException { AllocatorPerContext context = obtainContext(contextCfgItemName); return context.getLocalPathToRead(pathStr, conf); }
java
{ "resource": "" }
q161599
LocalDirAllocator.ifExists
train
public boolean ifExists(String pathStr,Configuration conf) { AllocatorPerContext context = obtainContext(contextCfgItemName); return context.ifExists(pathStr, conf); }
java
{ "resource": "" }