_id stringlengths 2 7 | title stringlengths 3 140 | partition stringclasses 3
values | text stringlengths 73 34.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q161300 | FSDirectory.setReplication | train | BlockInfo[] setReplication(String src,
short replication,
int[] oldReplication
) throws IOException {
waitForReady();
BlockInfo[] fileBlocks = unprotectedSetReplication(src, replication, oldReplication);
if (fileBlocks != null) // log replication change
fsImage.getEditLog().logSetReplication(src, replication);
return fileBlocks;
} | java | {
"resource": ""
} |
q161301 | FSDirectory.getPreferredBlockSize | train | long getPreferredBlockSize(String filename) throws IOException {
byte[][] components = INodeDirectory.getPathComponents(filename);
readLock();
try {
INode fileNode = rootDir.getNode(components);
if (fileNode == null) {
throw new IOException("Unknown file: " + filename);
}
if (fileNode.isDirectory()) {
throw new IOException("Getting block size of a directory: " +
filename);
}
return ((INodeFile)fileNode).getPreferredBlockSize();
} finally {
readUnlock();
}
} | java | {
"resource": ""
} |
q161302 | FSDirectory.getINode | train | INode getINode(String src) {
src = normalizePath(src);
byte[][] components = INodeDirectory.getPathComponents(src);
readLock();
try {
INode inode = rootDir.getNode(components);
return inode;
} finally {
readUnlock();
}
} | java | {
"resource": ""
} |
q161303 | FSDirectory.getINode | train | INode getINode(long id) {
readLock();
try {
INode inode = inodeMap.get(id);
return inode;
} finally {
readUnlock();
}
} | java | {
"resource": ""
} |
q161304 | FSDirectory.mergeInternal | train | public void mergeInternal(INode parityINodes[], INode sourceINodes[],
String parity, String source, RaidCodec codec, int[] checksums)
throws IOException {
waitForReady();
long now = FSNamesystem.now();
unprotectedMerge(parityINodes, sourceINodes, parity, source, codec,
checksums, now);
fsImage.getEditLog().logMerge(parity, source, codec.id, checksums, now);
} | java | {
"resource": ""
} |
q161305 | FSDirectory.delete | train | INode delete(String src, INode[] inodes, List<BlockInfo> collectedBlocks,
int blocksLimit) {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: "+src);
}
waitForReady();
long now = FSNamesystem.now();
INode deletedNode = unprotectedDelete(src, inodes, collectedBlocks,
blocksLimit, now);
if (deletedNode != null) {
fsImage.getEditLog().logDelete(src, now);
}
return deletedNode;
} | java | {
"resource": ""
} |
q161306 | FSDirectory.unprotectedDelete | train | INode unprotectedDelete(String src, long modificationTime) {
return unprotectedDelete(src, this.getExistingPathINodes(src), null,
BLOCK_DELETION_NO_LIMIT, modificationTime);
} | java | {
"resource": ""
} |
q161307 | FSDirectory.unprotectedDelete | train | INode unprotectedDelete(String src, INode inodes[], List<BlockInfo> toBeDeletedBlocks,
int blocksLimit, long modificationTime) {
src = normalizePath(src);
writeLock();
try {
INode targetNode = inodes[inodes.length-1];
if (targetNode == null) { // non-existent src
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
+"failed to remove "+src+" because it does not exist");
}
return null;
} else if (inodes.length == 1) { // src is the root
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " +
"failed to remove " + src +
" because the root is not allowed to be deleted");
return null;
} else {
try {
// Remove the node from the namespace
removeChild(inodes, inodes.length-1);
// set the parent's modification time
inodes[inodes.length-2].setModificationTime(modificationTime);
// GC all the blocks underneath the node.
if (toBeDeletedBlocks == null) {
toBeDeletedBlocks = new ArrayList<BlockInfo>();
blocksLimit = BLOCK_DELETION_NO_LIMIT;
}
List<INode> removedINodes = new ArrayList<INode>();
int filesRemoved = targetNode.collectSubtreeBlocksAndClear(
toBeDeletedBlocks, blocksLimit, removedINodes);
FSNamesystem.incrDeletedFileCount(getFSNamesystem(), filesRemoved);
// Delete collected blocks immediately;
// Remaining blocks need to be collected and deleted later on
getFSNamesystem().removePathAndBlocks(src, toBeDeletedBlocks);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
+src+" is removed");
}
targetNode.parent = null; //mark the node as deleted
removeFromInodeMap(removedINodes);
return targetNode;
} catch (IOException e) {
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " +
"failed to remove " + src + " because " + e.getMessage());
return null;
}
}
} finally {
writeUnlock();
}
} | java | {
"resource": ""
} |
q161308 | FSDirectory.removeFromInodeMap | train | void removeFromInodeMap(List<INode> inodes) {
if (inodes != null) {
for (INode inode : inodes) {
if (inode != null) {
inodeMap.remove(inode);
}
}
}
} | java | {
"resource": ""
} |
q161309 | FSDirectory.replaceNode | train | void replaceNode(String path, INodeFile oldnode, INodeFile newnode)
throws IOException {
replaceNode(path, null, oldnode, newnode, true);
} | java | {
"resource": ""
} |
q161310 | FSDirectory.getRandomFileStats | train | public List<FileStatusExtended> getRandomFileStats(double percent) {
readLock();
try {
List<FileStatusExtended> stats = new LinkedList<FileStatusExtended>();
for (INodeFile file : getRandomFiles(percent)) {
try {
String path = file.getFullPathName();
FileStatus stat = createFileStatus(path, file);
Lease lease = this.getFSNamesystem().leaseManager.getLeaseByPath(path);
String holder = (lease == null) ? null : lease.getHolder();
long hardlinkId = (file instanceof INodeHardLinkFile) ? ((INodeHardLinkFile) file)
.getHardLinkID() : -1;
stats.add(new FileStatusExtended(stat, file.getBlocks(), holder,
hardlinkId));
} catch (IOException ioe) {
// the file has already been deleted; ingore it
}
}
return stats;
} finally {
readUnlock();
}
} | java | {
"resource": ""
} |
q161311 | FSDirectory.getFileBlocks | train | Block[] getFileBlocks(String src) {
waitForReady();
byte[][] components = INodeDirectory.getPathComponents(src);
readLock();
try {
INode targetNode = rootDir.getNode(components);
if (targetNode == null)
return null;
if(targetNode.isDirectory())
return null;
return ((INodeFile)targetNode).getBlocks();
} finally {
readUnlock();
}
} | java | {
"resource": ""
} |
q161312 | FSDirectory.getExistingPathINodes | train | public INode[] getExistingPathINodes(String path) {
byte[][] components = INode.getPathComponents(path);
INode[] inodes = new INode[components.length];
readLock();
try {
rootDir.getExistingPathINodes(components, inodes);
return inodes;
} finally {
readUnlock();
}
} | java | {
"resource": ""
} |
q161313 | FSDirectory.isValidToCreate | train | boolean isValidToCreate(String src) {
String srcs = normalizePath(src);
byte[][] components = INodeDirectory.getPathComponents(srcs);
readLock();
try {
if (srcs.startsWith("/") &&
!srcs.endsWith("/") &&
rootDir.getNode(components) == null) {
return true;
} else {
return false;
}
} finally {
readUnlock();
}
} | java | {
"resource": ""
} |
q161314 | FSDirectory.isDir | train | boolean isDir(String src) {
byte[][] components = INodeDirectory.getPathComponents(normalizePath(src));
readLock();
try {
INode node = rootDir.getNode(components);
return isDir(node);
} finally {
readUnlock();
}
} | java | {
"resource": ""
} |
q161315 | FSDirectory.updateCount | train | private void updateCount(INode[] inodes, int numOfINodes,
long nsDelta, long dsDelta, boolean checkQuota)
throws QuotaExceededException {
this.updateCount(inodes, 0, numOfINodes, nsDelta, dsDelta, checkQuota);
} | java | {
"resource": ""
} |
q161316 | FSDirectory.updateCount | train | private void updateCount(INode[] inodes, int dsUpdateStartPos, int endPos,
long nsDelta, long dsDelta, boolean checkQuota)
throws QuotaExceededException {
if (!ready) {
// still intializing. do not check or update quotas.
return;
}
if (endPos > inodes.length) {
endPos = inodes.length;
}
if (checkQuota) {
verifyQuota(inodes, 0, dsUpdateStartPos, endPos, nsDelta, dsDelta);
}
for (int i = 0; i < endPos; i++) {
if (inodes[i].isQuotaSet()) { // a directory with quota
INodeDirectoryWithQuota node = (INodeDirectoryWithQuota) inodes[i];
if (i >= dsUpdateStartPos) {
node.updateNumItemsInTree(nsDelta, dsDelta);
} else {
node.updateNumItemsInTree(nsDelta, 0);
}
}
}
} | java | {
"resource": ""
} |
q161317 | FSDirectory.updateCountNoQuotaCheck | train | private void updateCountNoQuotaCheck(INode[] inodes, int startPos, int endPos,
long nsDelta, long dsDelta) {
try {
updateCount(inodes, startPos, endPos, nsDelta, dsDelta, false);
} catch (QuotaExceededException e) {
NameNode.LOG.warn("FSDirectory.updateCountNoQuotaCheck - unexpected ", e);
}
} | java | {
"resource": ""
} |
q161318 | FSDirectory.unprotectedUpdateCount | train | private void unprotectedUpdateCount(INode[] inodes, int numOfINodes,
long nsDelta, long dsDelta) {
for(int i=0; i < numOfINodes; i++) {
if (inodes[i].isQuotaSet()) { // a directory with quota
INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i];
node.updateNumItemsInTree(nsDelta, dsDelta);
}
}
} | java | {
"resource": ""
} |
q161319 | FSDirectory.getFullPathName | train | static String getFullPathName(byte[][] names) {
StringBuilder fullPathName = new StringBuilder();
for (int i = 1; i < names.length; i++) {
byte[] name = names[i];
fullPathName.append(Path.SEPARATOR_CHAR)
.append(DFSUtil.bytes2String(name));
}
return fullPathName.toString();
} | java | {
"resource": ""
} |
q161320 | FSDirectory.getINodeArray | train | static INode[] getINodeArray(INode inode) throws IOException {
// calculate the depth of this inode from root
int depth = getPathDepth(inode);
INode[] inodes = new INode[depth];
// fill up the inodes in the path from this inode to root
for (int i = 0; i < depth; i++) {
inodes[depth-i-1] = inode;
inode = inode.parent;
}
return inodes;
} | java | {
"resource": ""
} |
q161321 | FSDirectory.getPathDepth | train | static private int getPathDepth(INode inode) throws IOException {
// calculate the depth of this inode from root
int depth = 1;
INode node; // node on the path to the root
for (node = inode; node.parent != null; node = node.parent) {
depth++;
}
// parent should be root
if (node.isRoot()) {
return depth;
}
// invalid inode
throw new IOException("Invalid inode: " + inode.getLocalName());
} | java | {
"resource": ""
} |
q161322 | FSDirectory.getINodeByteArray | train | static byte[][] getINodeByteArray(INode inode) throws IOException {
// calculate the depth of this inode from root
int depth = getPathDepth(inode);
byte[][] names = new byte[depth][];
// fill up the inodes in the path from this inode to root
for (int i = 0; i < depth; i++) {
names[depth-i-1] = inode.getLocalNameBytes();
inode = inode.parent;
}
return names;
} | java | {
"resource": ""
} |
q161323 | FSDirectory.getFullPathName | train | static String getFullPathName(INode inode) throws IOException {
INode[] inodes = getINodeArray(inode);
return getFullPathName(inodes, inodes.length-1);
} | java | {
"resource": ""
} |
q161324 | FSDirectory.addNode | train | private <T extends INode> T addNode(String src, T child,
long childDiskspace, boolean inheritPermission)
throws QuotaExceededException {
byte[][] components = INode.getPathComponents(src);
byte[] path = components[components.length - 1];
child.setLocalName(path);
cacheName(child);
INode[] inodes = new INode[components.length];
writeLock();
try {
rootDir.getExistingPathINodes(components, inodes);
return addChild(inodes, inodes.length-1, child, childDiskspace,
inheritPermission);
} finally {
writeUnlock();
}
} | java | {
"resource": ""
} |
q161325 | FSDirectory.verifyQuota | train | private void verifyQuota(INode[] inodes, int nsQuotaStartPos, int dsQuotaStartPos,
int endPos, long nsDelta, long dsDelta)
throws QuotaExceededException {
if (!ready) {
// Do not check quota if edits log is still being processed
return;
}
if (endPos >inodes.length) {
endPos = inodes.length;
}
int i = endPos - 1;
Assert.assertTrue("nsQuotaStartPos shall be less or equal than the dsQuotaStartPos",
(nsQuotaStartPos <= dsQuotaStartPos));
try {
// check existing components in the path
for(; i >= nsQuotaStartPos; i--) {
if (inodes[i].isQuotaSet()) { // a directory with quota
INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i];
if (i >= dsQuotaStartPos) {
// Verify both nsQuota and dsQuota
node.verifyQuota(nsDelta, dsDelta);
} else {
// Verify the nsQuota only
node.verifyQuota(nsDelta, 0);
}
}
}
} catch (QuotaExceededException e) {
e.setPathName(getFullPathName(inodes, i));
throw e;
}
} | java | {
"resource": ""
} |
q161326 | FSDirectory.updateCountForINodeWithQuota | train | private static void updateCountForINodeWithQuota(INodeDirectory dir,
INode.DirCounts counts,
ArrayList<INode> nodesInPath) {
long parentNamespace = counts.nsCount;
long parentDiskspace = counts.dsCount;
counts.nsCount = 1L;//for self. should not call node.spaceConsumedInTree()
counts.dsCount = 0L;
/* We don't need nodesInPath if we could use 'parent' field in
* INode. using 'parent' is not currently recommended. */
nodesInPath.add(dir);
for (INode child : dir.getChildren()) {
if (child.isDirectory()) {
updateCountForINodeWithQuota((INodeDirectory)child,
counts, nodesInPath);
} else { // reduce recursive calls
counts.nsCount += 1;
counts.dsCount += ((INodeFile)child).diskspaceConsumed();
}
}
if (dir.isQuotaSet()) {
((INodeDirectoryWithQuota)dir).setSpaceConsumed(counts.nsCount,
counts.dsCount);
// check if quota is violated for some reason.
if ((dir.getNsQuota() >= 0 && counts.nsCount > dir.getNsQuota()) ||
(dir.getDsQuota() >= 0 && counts.dsCount > dir.getDsQuota())) {
// can only happen because of a software bug. the bug should be fixed.
StringBuilder path = new StringBuilder(512);
for (INode n : nodesInPath) {
path.append('/');
path.append(n.getLocalName());
}
NameNode.LOG.warn("Quota violation in image for " + path +
" (Namespace quota : " + dir.getNsQuota() +
" consumed : " + counts.nsCount + ")" +
" (Diskspace quota : " + dir.getDsQuota() +
" consumed : " + counts.dsCount + ").");
}
}
// pop
nodesInPath.remove(nodesInPath.size()-1);
counts.nsCount += parentNamespace;
counts.dsCount += parentDiskspace;
} | java | {
"resource": ""
} |
q161327 | FSDirectory.createFileStatus | train | static FileStatus createFileStatus(String path, INode node) {
// length is zero for directories
return new FileStatus(node.isDirectory() ? 0 : node.computeContentSummary().getLength(),
node.isDirectory(),
node.isDirectory() ? 0 : ((INodeFile)node).getReplication(),
node.isDirectory() ? 0 : ((INodeFile)node).getPreferredBlockSize(),
node.getModificationTime(),
node.getAccessTime(),
node.getFsPermission(),
node.getUserName(),
node.getGroupName(),
new Path(path));
} | java | {
"resource": ""
} |
q161328 | FSDirectory.createHdfsFileStatus | train | private static HdfsFileStatus createHdfsFileStatus(byte[] path, INode node) {
long size = 0; // length is zero for directories
short replication = 0;
long blocksize = 0;
if (node instanceof INodeFile) {
INodeFile fileNode = (INodeFile)node;
size = fileNode.getFileSize();
replication = fileNode.getReplication();
blocksize = fileNode.getPreferredBlockSize();
}
else
if (node.isDirectory()) {
INodeDirectory dirNode = (INodeDirectory)node;
//length is used to represent the number of children for directories.
size = dirNode.getChildren().size();
}
return new HdfsFileStatus(
size,
node.isDirectory(),
replication,
blocksize,
node.getModificationTime(),
node.getAccessTime(),
node.getFsPermission(),
node.getUserName(),
node.getGroupName(),
path);
} | java | {
"resource": ""
} |
q161329 | FSDirectory.createLocatedBlocks | train | private LocatedBlocks createLocatedBlocks(INode node) throws IOException {
LocatedBlocks loc = null;
if (node instanceof INodeFile) {
loc = getFSNamesystem().getBlockLocationsInternal(
(INodeFile)node, 0L, Long.MAX_VALUE, Integer.MAX_VALUE);
}
if (loc==null) {
loc = EMPTY_BLOCK_LOCS;
}
return loc;
} | java | {
"resource": ""
} |
q161330 | TaskTracker.close | train | public synchronized void close() throws IOException {
//
// Kill running tasks. Do this in a 2nd vector, called 'tasksToClose',
// because calling jobHasFinished() may result in an edit to 'tasks'.
//
TreeMap<TaskAttemptID, TaskInProgress> tasksToClose =
new TreeMap<TaskAttemptID, TaskInProgress>();
tasksToClose.putAll(tasks);
for (TaskInProgress tip : tasksToClose.values()) {
tip.jobHasFinished(false);
}
this.running = false;
if (pulseChecker != null) {
pulseChecker.shutdown();
}
if (versionBeanName != null) {
MBeanUtil.unregisterMBean(versionBeanName);
}
// Clear local storage
if (asyncDiskService != null) {
// Clear local storage
asyncDiskService.cleanupAllVolumes();
// Shutdown all async deletion threads with up to 10 seconds of delay
asyncDiskService.shutdown();
try {
if (!asyncDiskService.awaitTermination(10000)) {
asyncDiskService.shutdownNow();
asyncDiskService = null;
}
} catch (InterruptedException e) {
asyncDiskService.shutdownNow();
asyncDiskService = null;
}
}
// Shutdown the fetcher thread
if (this.mapEventsFetcher != null) {
this.mapEventsFetcher.interrupt();
}
// Stop the launchers
this.mapLauncher.interrupt();
this.reduceLauncher.interrupt();
if (this.heartbeatMonitor != null) {
this.heartbeatMonitor.interrupt();
}
// Stop memory manager thread
if (this.taskMemoryManager != null) {
this.taskMemoryManager.shutdown();
}
// Stop cgroup memory watcher
this.cgroupMemoryWatcher.shutdown();
// All tasks are killed. So, they are removed from TaskLog monitoring also.
// Interrupt the monitor.
getTaskLogsMonitor().interrupt();
jvmManager.stop();
// shutdown RPC connections
RPC.stopProxy(jobClient);
// wait for the fetcher thread to exit
for (boolean done = false; !done; ) {
try {
if (this.mapEventsFetcher != null) {
this.mapEventsFetcher.join();
}
done = true;
} catch (InterruptedException e) {
}
}
if (taskReportServer != null) {
taskReportServer.stop();
taskReportServer = null;
}
if (healthChecker != null) {
//stop node health checker service
healthChecker.stop();
healthChecker = null;
}
if (this.server != null) {
try {
LOG.info("Shutting down StatusHttpServer");
this.server.stop();
LOG.info("Shutting down Netty MapOutput Server");
if (this.nettyMapOutputServer != null) {
this.nettyMapOutputServer.stop();
}
} catch (Exception e) {
LOG.warn("Exception shutting down TaskTracker", e);
}
}
} | java | {
"resource": ""
} |
q161331 | TaskTracker.getTceFromStore | train | private static TaskCompletionEvent getTceFromStore(TaskCompletionEvent t) {
// Use the store so that we can save memory in simulations where there
// are multiple task trackers in memory
synchronized(taskCompletionEventsStore) {
WeakReference<TaskCompletionEvent> e =
taskCompletionEventsStore.get(t);
// If it's not in the store, then put it in
if (e == null) {
taskCompletionEventsStore.put(t,
new WeakReference<TaskCompletionEvent>(t));
return t;
}
// It might be in the map, but the actual item might have been GC'ed
// just after we got it from the map
TaskCompletionEvent tceFromStore = e.get();
if (tceFromStore == null) {
taskCompletionEventsStore.put(t,
new WeakReference<TaskCompletionEvent>(t));
return t;
}
return tceFromStore;
}
} | java | {
"resource": ""
} |
q161332 | TaskTracker.queryJobTracker | train | private List<TaskCompletionEvent> queryJobTracker(IntWritable fromEventId,
JobID jobId,
InterTrackerProtocol jobClient)
throws IOException {
if (jobClient == null) {
List<TaskCompletionEvent> empty = Collections.emptyList();
return empty;
}
TaskCompletionEvent t[] = jobClient.getTaskCompletionEvents(
jobId,
fromEventId.get(),
probe_sample_size);
//we are interested in map task completion events only. So store
//only those
List <TaskCompletionEvent> recentMapEvents =
new ArrayList<TaskCompletionEvent>();
for (int i = 0; i < t.length; i++) {
if (t[i].isMap) {
if (useTaskCompletionEventsStore) {
// Try to get it from a store so that we don't have duplicate instances
// in memory in the same JVM. This could happen if there are multiple TT's
// and different reduce tasks from the same job are running in each TT.
recentMapEvents.add(getTceFromStore(t[i]));
} else {
recentMapEvents.add(t[i]);
}
}
}
fromEventId.set(fromEventId.get() + t.length);
return recentMapEvents;
} | java | {
"resource": ""
} |
q161333 | TaskTracker.transmitHeartBeat | train | protected HeartbeatResponse transmitHeartBeat(
InterTrackerProtocol jobClient, short heartbeatResponseId,
TaskTrackerStatus status) throws IOException {
//
// Check if we should ask for a new Task
//
boolean askForNewTask;
long localMinSpaceStart;
synchronized (this) {
askForNewTask =
((status.countOccupiedMapSlots() < maxMapSlots ||
status.countOccupiedReduceSlots() < maxReduceSlots) &&
acceptNewTasks);
localMinSpaceStart = minSpaceStart;
}
if (askForNewTask) {
checkLocalDirs(getLocalDirsFromConf(fConf));
askForNewTask = enoughFreeSpace(localMinSpaceStart);
gatherResourceStatus(status);
}
//add node health information
TaskTrackerHealthStatus healthStatus = status.getHealthStatus();
synchronized (this) {
if (healthChecker != null) {
healthChecker.setHealthStatus(healthStatus);
} else {
healthStatus.setNodeHealthy(true);
healthStatus.setLastReported(0L);
healthStatus.setHealthReport("");
}
}
//
// Xmit the heartbeat
//
HeartbeatResponse heartbeatResponse = jobClient.heartbeat(status,
justStarted,
justInited,
askForNewTask,
heartbeatResponseId);
synchronized (this) {
for (TaskStatus taskStatus : status.getTaskReports()) {
if (taskStatus.getRunState() != TaskStatus.State.RUNNING &&
taskStatus.getRunState() != TaskStatus.State.UNASSIGNED &&
taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING &&
!taskStatus.inTaskCleanupPhase()) {
if (taskStatus.getIsMap()) {
mapTotal--;
} else {
reduceTotal--;
}
try {
myInstrumentation.completeTask(taskStatus.getTaskID());
} catch (MetricsException me) {
LOG.warn("Caught: " + StringUtils.stringifyException(me));
}
removeRunningTask(taskStatus.getTaskID());
//
// When the task attempt has entered the finished state
// we log the counters to task log for future use
// load the counters into Scuba, Scriber and Hive
//
if (fConf.getBoolean(LOG_FINISHED_TASK_COUNTERS, true)) {
// for log format, 0 means json, else means name and value pair
String logHeader = "TaskCountersLogged " + taskStatus.getTaskID() + " " +
taskStatus.getFinishTime()/1000 + " ";
if (fConf.getInt(FINISHED_TASK_COUNTERS_LOG_FORMAT, 0) == 0) {
LOG.warn(
logHeader + taskStatus.getCounters().makeJsonString());
} else {
LOG.warn(
logHeader + taskStatus.getCounters().makeCompactString());
}
}
}
}
// Clear transient status information which should only
// be sent once to the JobTracker
for (TaskInProgress tip: runningTasks.values()) {
tip.getStatus().clearStatus();
}
}
return heartbeatResponse;
} | java | {
"resource": ""
} |
q161334 | TaskTracker.reinitTaskTracker | train | private boolean reinitTaskTracker(TaskTrackerAction[] actions) {
if (actions != null) {
for (TaskTrackerAction action : actions) {
if (action.getActionId() ==
TaskTrackerAction.ActionType.REINIT_TRACKER) {
LOG.info("Recieved RenitTrackerAction from JobTracker");
return true;
}
}
}
return false;
} | java | {
"resource": ""
} |
q161335 | TaskTracker.markUnresponsiveTasks | train | protected synchronized void markUnresponsiveTasks() throws IOException {
long now = System.currentTimeMillis();
for (TaskInProgress tip: runningTasks.values()) {
if (tip.getRunState() == TaskStatus.State.RUNNING ||
tip.getRunState() == TaskStatus.State.COMMIT_PENDING ||
tip.isCleaningup()) {
// Check the per-job timeout interval for tasks;
// an interval of '0' implies it is never timed-out
long jobTaskTimeout = tip.getTaskTimeout();
if (jobTaskTimeout == 0) {
continue;
}
// Check if the task has not reported progress for a
// time-period greater than the configured time-out
long timeSinceLastReport = now - tip.getLastProgressReport();
if (timeSinceLastReport > jobTaskTimeout && !tip.wasKilled) {
String msg =
"Task " + tip.getTask().getTaskID() + " failed to report status for "
+ (timeSinceLastReport / 1000) + " seconds. Killing!";
LOG.info(tip.getTask().getTaskID() + ": " + msg);
ReflectionUtils.logThreadInfo(LOG, "lost task", 30);
tip.reportDiagnosticInfo(msg);
myInstrumentation.timedoutTask(tip.getTask().getTaskID());
purgeTask(tip, true);
}
}
}
} | java | {
"resource": ""
} |
q161336 | TaskTracker.purgeJob | train | protected synchronized void purgeJob(KillJobAction action) throws IOException {
JobID jobId = action.getJobID();
LOG.info("Received 'KillJobAction' for job: " + jobId);
RunningJob rjob = null;
synchronized (runningJobs) {
rjob = runningJobs.get(jobId);
}
if (rjob == null) {
if (LOG.isDebugEnabled()) {
// We cleanup the job on all tasktrackers in the cluster
// so there is a good chance it never ran a single task from it
LOG.debug("Unknown job " + jobId + " being deleted.");
}
} else {
synchronized (rjob) {
// Add this tips of this job to queue of tasks to be purged
for (TaskInProgress tip : rjob.tasks) {
tip.jobHasFinished(false);
Task t = tip.getTask();
if (t.isMapTask()) {
indexCache.removeMap(tip.getTask().getTaskID().toString());
}
// Remove it from the runningTasks
if (this.runningTasks.containsKey(t.getTaskID())) {
LOG.info("Remove " + t.getTaskID() + " from runningTask by purgeJob");
this.runningTasks.remove(t.getTaskID());
}
}
// Delete the job directory for this
// task if the job is done/failed
if (!rjob.keepJobFiles){
PathDeletionContext[] contexts = buildPathDeletionContexts(localFs,
getLocalFiles(fConf, getLocalJobDir(rjob.getJobID().toString())));
directoryCleanupThread.addToQueue(contexts);
}
// Remove this job
rjob.tasks.clear();
}
}
synchronized(runningJobs) {
runningJobs.remove(jobId);
}
} | java | {
"resource": ""
} |
q161337 | TaskTracker.purgeTask | train | private void purgeTask(TaskInProgress tip, boolean wasFailure)
throws IOException {
if (tip != null) {
LOG.info("About to purge task: " + tip.getTask().getTaskID());
// Remove the task from running jobs,
// removing the job if it's the last task
removeTaskFromJob(tip.getTask().getJobID(), tip);
tip.jobHasFinished(wasFailure);
if (tip.getTask().isMapTask()) {
indexCache.removeMap(tip.getTask().getTaskID().toString());
}
}
} | java | {
"resource": ""
} |
q161338 | TaskTracker.killOverflowingTasks | train | protected void killOverflowingTasks() throws IOException {
long localMinSpaceKill;
synchronized(this){
localMinSpaceKill = minSpaceKill;
}
if (!enoughFreeSpace(localMinSpaceKill)) {
acceptNewTasks=false;
//we give up! do not accept new tasks until
//all the ones running have finished and they're all cleared up
synchronized (this) {
TaskInProgress killMe = findTaskToKill(null);
if (killMe!=null) {
String msg = "Tasktracker running out of space." +
" Killing task.";
LOG.info(killMe.getTask().getTaskID() + ": " + msg);
killMe.reportDiagnosticInfo(msg);
purgeTask(killMe, false);
}
}
}
} | java | {
"resource": ""
} |
q161339 | TaskTracker.getLogDiskFreeSpace | train | long getLogDiskFreeSpace() throws IOException {
String logDir = fConf.getLogDir();
// If the log disk is not specified we assume it is usable.
if (logDir == null) {
return Long.MAX_VALUE;
}
DF df = localDirsDf.get(logDir);
if (df == null) {
df = new DF(new File(logDir), fConf);
localDirsDf.put(logDir, df);
}
return df.getAvailable();
} | java | {
"resource": ""
} |
q161340 | TaskTracker.tryToGetOutputSize | train | long tryToGetOutputSize(TaskAttemptID taskId, JobConf conf) {
try{
TaskInProgress tip;
synchronized(this) {
tip = tasks.get(taskId);
}
if(tip == null)
return -1;
if (!tip.getTask().isMapTask() ||
tip.getRunState() != TaskStatus.State.SUCCEEDED) {
return -1;
}
MapOutputFile mapOutputFile = new MapOutputFile();
mapOutputFile.setJobId(taskId.getJobID());
mapOutputFile.setConf(conf);
// In simulation mode, maps/reduces complete instantly and don't produce
// any output
if (this.simulatedTaskMode) {
return 0;
}
Path tmp_output = null;
try {
tmp_output = mapOutputFile.getOutputFile(taskId);
} catch (DiskErrorException dex) {
if (LOG.isDebugEnabled()) {
LOG.debug("Error getting map output of a task " + taskId, dex);
}
}
if(tmp_output == null)
return 0;
FileSystem localFS = FileSystem.getLocal(conf);
FileStatus stat = localFS.getFileStatus(tmp_output);
if(stat == null)
return 0;
else
return stat.getLen();
} catch(IOException e) {
LOG.info(e);
return -1;
}
} | java | {
"resource": ""
} |
q161341 | TaskTracker.startNewTask | train | private void startNewTask(TaskInProgress tip) {
try {
boolean launched = localizeAndLaunchTask(tip);
if (!launched) {
// Free the slot.
tip.kill(true);
tip.cleanup(true);
}
} catch (Throwable e) {
String msg = ("Error initializing " + tip.getTask().getTaskID() +
":\n" + StringUtils.stringifyException(e));
LOG.error(msg, e);
tip.reportDiagnosticInfo(msg);
try {
tip.kill(true);
tip.cleanup(true);
} catch (IOException ie2) {
LOG.info("Error cleaning up " + tip.getTask().getTaskID() + ":\n" +
StringUtils.stringifyException(ie2));
}
// Careful!
// This might not be an 'Exception' - don't handle 'Error' here!
if (e instanceof Error) {
throw ((Error) e);
}
}
} | java | {
"resource": ""
} |
q161342 | TaskTracker.localizeAndLaunchTask | train | private boolean localizeAndLaunchTask(final TaskInProgress tip)
throws IOException {
FutureTask<Boolean> task = new FutureTask<Boolean>(
new Callable<Boolean>() {
public Boolean call() throws IOException {
JobConf localConf = localizeJob(tip);
boolean launched = false;
synchronized (tip) {
tip.setJobConf(localConf);
launched = tip.launchTask();
}
return launched;
}
});
String threadName = "Localizing " + tip.getTask().toString();
Thread thread = new Thread(task);
thread.setName(threadName);
thread.setDaemon(true);
thread.start();
boolean launched = false;
try {
launched = task.get(LOCALIZE_TASK_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (Exception e) {
task.cancel(true);
try {
LOG.info("Wait the localizeTask thread to finish");
thread.join(LOCALIZE_TASK_TIMEOUT);
} catch (InterruptedException ie) {
}
if (thread.isAlive()) {
LOG.error("Stacktrace of " + threadName + "\n" +
StringUtils.stackTraceOfThread(thread));
LOG.fatal("Cannot kill the localizeTask thread." + threadName +
" TaskTracker has to die!!");
System.exit(-1);
}
throw new IOException("TaskTracker got stuck for localized Task:" +
tip.getTask().getTaskID(), e);
}
return launched;
} | java | {
"resource": ""
} |
q161343 | TaskTracker.notifyTTAboutTaskCompletion | train | private void notifyTTAboutTaskCompletion() {
if (oobHeartbeatOnTaskCompletion) {
synchronized (finishedCount) {
int value = finishedCount.get();
finishedCount.set(value+1);
finishedCount.notifyAll();
}
}
} | java | {
"resource": ""
} |
q161344 | TaskTracker.getTask | train | public synchronized JvmTask getTask(JvmContext context)
throws IOException {
JVMId jvmId = context.jvmId;
LOG.debug("JVM with ID : " + jvmId + " asked for a task");
// save pid of task JVM sent by child
jvmManager.setPidToJvm(jvmId, context.pid);
if (!jvmManager.isJvmKnown(jvmId)) {
LOG.info("Killing unknown JVM " + jvmId);
return new JvmTask(null, true);
}
RunningJob rjob = runningJobs.get(jvmId.getJobId());
if (rjob == null) { //kill the JVM since the job is dead
LOG.info("Killing JVM " + jvmId + " since job " + jvmId.getJobId() +
" is dead");
jvmManager.killJvm(jvmId);
return new JvmTask(null, true);
}
TaskInProgress tip = jvmManager.getTaskForJvm(jvmId);
if (tip == null) {
return new JvmTask(null, false);
}
if(taskMemoryControlGroupEnabled) {
long limit = getMemoryLimit(tip.getJobConf(), tip.getTask().isMapTask());
ttMemCgroup.addTask(tip.getTask().getTaskID().toString(), context.pid, limit);
}
if(taskCPUControlGroupEnabled)
ttCPUCgroup.addTask(tip.getTask().getTaskID().toString(), context.pid);
if (tasks.get(tip.getTask().getTaskID()) != null) { //is task still present
LOG.info("JVM with ID: " + jvmId + " given task: " +
tip.getTask().getTaskID());
return new JvmTask(tip.getTask(), false);
} else {
LOG.info("Killing JVM with ID: " + jvmId + " since scheduled task: " +
tip.getTask().getTaskID() + " is " + tip.taskStatus.getRunState());
return new JvmTask(null, true);
}
} | java | {
"resource": ""
} |
q161345 | TaskTracker.statusUpdate | train | public synchronized boolean statusUpdate(TaskAttemptID taskid,
TaskStatus taskStatus)
throws IOException {
TaskInProgress tip = tasks.get(taskid);
if (tip != null) {
tip.reportProgress(taskStatus);
myInstrumentation.statusUpdate(tip.getTask(), taskStatus);
return true;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Progress from unknown child task: "+taskid);
}
return false;
}
} | java | {
"resource": ""
} |
q161346 | TaskTracker.reportDiagnosticInfo | train | public synchronized void reportDiagnosticInfo(TaskAttemptID taskid, String info) throws IOException {
TaskInProgress tip = tasks.get(taskid);
if (tip != null) {
tip.reportDiagnosticInfo(info);
} else {
LOG.warn("Error from unknown child task: "+taskid+". Ignored.");
}
} | java | {
"resource": ""
} |
q161347 | TaskTracker.commitPending | train | public synchronized void commitPending(TaskAttemptID taskid,
TaskStatus taskStatus)
throws IOException {
LOG.info("Task " + taskid + " is in commit-pending," +"" +
" task state:" +taskStatus.getRunState());
statusUpdate(taskid, taskStatus);
reportTaskFinished(taskid, true);
} | java | {
"resource": ""
} |
q161348 | TaskTracker.done | train | public synchronized void done(TaskAttemptID taskid)
throws IOException {
TaskInProgress tip = tasks.get(taskid);
commitResponses.remove(taskid);
if (tip != null) {
tip.reportDone();
} else {
LOG.warn("Unknown child task done: "+taskid+". Ignored.");
}
} | java | {
"resource": ""
} |
q161349 | TaskTracker.shuffleError | train | public synchronized void shuffleError(TaskAttemptID taskId, String message)
throws IOException {
LOG.fatal("Task: " + taskId + " - Killed due to Shuffle Failure: " + message);
TaskInProgress tip = runningTasks.get(taskId);
if (tip != null) {
tip.reportDiagnosticInfo("Shuffle Error: " + message);
purgeTask(tip, true);
}
} | java | {
"resource": ""
} |
q161350 | TaskTracker.fsError | train | public synchronized void fsError(TaskAttemptID taskId, String message)
throws IOException {
LOG.fatal("Task: " + taskId + " - Killed due to FSError: " + message);
TaskInProgress tip = runningTasks.get(taskId);
if (tip != null) {
tip.reportDiagnosticInfo("FSError: " + message);
purgeTask(tip, true);
}
if (isDiskOutOfSpaceError(message)) {
this.myInstrumentation.diskOutOfSpaceTask(taskId);
}
} | java | {
"resource": ""
} |
q161351 | TaskTracker.fatalError | train | public synchronized void fatalError(TaskAttemptID taskId, String msg)
throws IOException {
LOG.fatal("Task: " + taskId + " - Killed : " + msg);
TaskInProgress tip = runningTasks.get(taskId);
if (tip != null) {
tip.reportDiagnosticInfo("Error: " + msg);
purgeTask(tip, true);
}
} | java | {
"resource": ""
} |
q161352 | TaskTracker.reportTaskFinished | train | void reportTaskFinished(TaskAttemptID taskid, boolean commitPending) {
TaskInProgress tip;
synchronized (this) {
tip = tasks.get(taskid);
}
if (tip != null) {
tip.reportTaskFinished(commitPending);
} else {
LOG.warn("Unknown child task finished: "+taskid+". Ignored.");
}
} | java | {
"resource": ""
} |
q161353 | TaskTracker.mapOutputLost | train | public synchronized void mapOutputLost(TaskAttemptID taskid,
String errorMsg) throws IOException {
TaskInProgress tip = tasks.get(taskid);
if (tip != null) {
tip.mapOutputLost(errorMsg);
} else {
LOG.warn("Unknown child with bad map output: "+taskid+". Ignored.");
}
} | java | {
"resource": ""
} |
q161354 | TaskTracker.getRunningTaskStatuses | train | synchronized List<TaskStatus> getRunningTaskStatuses() {
List<TaskStatus> result = new ArrayList<TaskStatus>(runningTasks.size());
for(TaskInProgress tip: runningTasks.values()) {
result.add(tip.getStatus());
}
return result;
} | java | {
"resource": ""
} |
q161355 | TaskTracker.getNonRunningTasks | train | public synchronized List<TaskStatus> getNonRunningTasks() {
List<TaskStatus> result = new ArrayList<TaskStatus>(tasks.size());
for(Map.Entry<TaskAttemptID, TaskInProgress> task: tasks.entrySet()) {
if (!runningTasks.containsKey(task.getKey())) {
result.add(task.getValue().getStatus());
}
}
return result;
} | java | {
"resource": ""
} |
q161356 | TaskTracker.getTasksFromRunningJobs | train | synchronized List<TaskStatus> getTasksFromRunningJobs() {
List<TaskStatus> result = new ArrayList<TaskStatus>(tasks.size());
for (Map.Entry <JobID, RunningJob> item : runningJobs.entrySet()) {
RunningJob rjob = item.getValue();
synchronized (rjob) {
for (TaskInProgress tip : rjob.tasks) {
result.add(tip.getStatus());
}
}
}
return result;
} | java | {
"resource": ""
} |
q161357 | TaskTracker.getLocalFiles | train | Path[] getLocalFiles(JobConf conf, String subdir) throws IOException{
String[] localDirs = getLocalDirsFromConf(conf);
Path[] paths = new Path[localDirs.length];
FileSystem localFs = FileSystem.getLocal(conf);
for (int i = 0; i < localDirs.length; i++) {
paths[i] = new Path(localDirs[i], subdir);
paths[i] = paths[i].makeQualified(localFs);
}
return paths;
} | java | {
"resource": ""
} |
q161358 | TaskTracker.getLocalDirs | train | Path[] getLocalDirs() throws IOException{
String[] localDirs = getLocalDirsFromConf(fConf);
Path[] paths = new Path[localDirs.length];
FileSystem localFs = FileSystem.getLocal(fConf);
for (int i = 0; i < localDirs.length; i++) {
paths[i] = new Path(localDirs[i]);
paths[i] = paths[i].makeQualified(localFs);
}
return paths;
} | java | {
"resource": ""
} |
q161359 | TaskTracker.getAveMapSlotRefillMsecs | train | int getAveMapSlotRefillMsecs() {
synchronized (mapSlotRefillMsecsQueue) {
if (mapSlotRefillMsecsQueue.isEmpty()) {
return -1;
}
int totalMapSlotRefillMsecs = 0;
for (int refillMsecs : mapSlotRefillMsecsQueue) {
totalMapSlotRefillMsecs += refillMsecs;
}
return totalMapSlotRefillMsecs / mapSlotRefillMsecsQueue.size();
}
} | java | {
"resource": ""
} |
q161360 | TaskTracker.addAveMapSlotRefillMsecs | train | void addAveMapSlotRefillMsecs(int refillMsecs) {
synchronized (mapSlotRefillMsecsQueue) {
mapSlotRefillMsecsQueue.add(refillMsecs);
if (mapSlotRefillMsecsQueue.size() >= maxRefillQueueSize) {
mapSlotRefillMsecsQueue.remove();
}
}
} | java | {
"resource": ""
} |
q161361 | TaskTracker.getAveReduceSlotRefillMsecs | train | int getAveReduceSlotRefillMsecs() {
synchronized (reduceSlotRefillMsecsQueue) {
if (reduceSlotRefillMsecsQueue.isEmpty()) {
return -1;
}
int totalReduceSlotRefillMsecs = 0;
for (int refillMsecs : reduceSlotRefillMsecsQueue) {
totalReduceSlotRefillMsecs += refillMsecs;
}
return totalReduceSlotRefillMsecs / reduceSlotRefillMsecsQueue.size();
}
} | java | {
"resource": ""
} |
q161362 | TaskTracker.addAveReduceSlotRefillMsecs | train | void addAveReduceSlotRefillMsecs(int refillMsecs) {
synchronized (reduceSlotRefillMsecsQueue) {
reduceSlotRefillMsecsQueue.add(refillMsecs);
if (reduceSlotRefillMsecsQueue.size() >= maxRefillQueueSize) {
reduceSlotRefillMsecsQueue.remove();
}
}
} | java | {
"resource": ""
} |
q161363 | TaskTracker.initializeMemoryManagement | train | private void initializeMemoryManagement() {
//handling @deprecated
if (fConf.get(MAPRED_TASKTRACKER_VMEM_RESERVED_PROPERTY) != null) {
LOG.warn(
JobConf.deprecatedString(
MAPRED_TASKTRACKER_VMEM_RESERVED_PROPERTY));
}
//handling @deprecated
if (fConf.get(MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY) != null) {
LOG.warn(
JobConf.deprecatedString(
MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY));
}
//handling @deprecated
if (fConf.get(JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY) != null) {
LOG.warn(
JobConf.deprecatedString(
JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY));
}
//handling @deprecated
if (fConf.get(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY) != null) {
LOG.warn(
JobConf.deprecatedString(
JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY));
}
totalVirtualMemoryOnTT = resourceCalculatorPlugin.getVirtualMemorySize();
totalPhysicalMemoryOnTT = resourceCalculatorPlugin.getPhysicalMemorySize();
mapSlotMemorySizeOnTT =
fConf.getLong(
JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY,
JobConf.DISABLED_MEMORY_LIMIT);
reduceSlotSizeMemoryOnTT =
fConf.getLong(
JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY,
JobConf.DISABLED_MEMORY_LIMIT);
totalMemoryAllottedForTasks =
maxMapSlots * mapSlotMemorySizeOnTT + maxReduceSlots
* reduceSlotSizeMemoryOnTT;
if (totalMemoryAllottedForTasks < 0) {
//adding check for the old keys which might be used by the administrator
//while configuration of the memory monitoring on TT
long memoryAllotedForSlot = fConf.normalizeMemoryConfigValue(
fConf.getLong(JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY,
JobConf.DISABLED_MEMORY_LIMIT));
long limitVmPerTask = fConf.normalizeMemoryConfigValue(
fConf.getLong(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY,
JobConf.DISABLED_MEMORY_LIMIT));
if(memoryAllotedForSlot == JobConf.DISABLED_MEMORY_LIMIT) {
totalMemoryAllottedForTasks = JobConf.DISABLED_MEMORY_LIMIT;
} else {
if(memoryAllotedForSlot > limitVmPerTask) {
LOG.info("DefaultMaxVmPerTask is mis-configured. " +
"It shouldn't be greater than task limits");
totalMemoryAllottedForTasks = JobConf.DISABLED_MEMORY_LIMIT;
} else {
totalMemoryAllottedForTasks = (maxMapSlots +
maxReduceSlots) * (memoryAllotedForSlot/(1024 * 1024));
}
}
}
if (totalMemoryAllottedForTasks > totalPhysicalMemoryOnTT) {
LOG.info("totalMemoryAllottedForTasks > totalPhysicalMemoryOnTT."
+ " Thrashing might happen.");
} else if (totalMemoryAllottedForTasks > totalVirtualMemoryOnTT) {
LOG.info("totalMemoryAllottedForTasks > totalVirtualMemoryOnTT."
+ " Thrashing might happen.");
}
// start the taskMemoryManager thread only if enabled
setTaskMemoryManagerEnabledFlag();
if (isTaskMemoryManagerEnabled()) {
taskMemoryManager = new TaskMemoryManagerThread(this);
taskMemoryManager.setDaemon(true);
taskMemoryManager.start();
}
} | java | {
"resource": ""
} |
q161364 | TaskTracker.cleanUpOverMemoryTask | train | synchronized void cleanUpOverMemoryTask(TaskAttemptID tid, boolean wasFailure,
String diagnosticMsg) {
TaskInProgress tip = runningTasks.get(tid);
if (tip != null) {
tip.reportDiagnosticInfo(diagnosticMsg);
try {
purgeTask(tip, wasFailure); // Marking it as failed/killed.
} catch (IOException ioe) {
LOG.warn("Couldn't purge the task of " + tid + ". Error : " + ioe);
}
}
} | java | {
"resource": ""
} |
q161365 | TaskTracker.getUserName | train | public String getUserName(TaskAttemptID taskId) {
TaskInProgress tip = tasks.get(taskId);
if (tip != null) {
return tip.getJobConf().getUser();
}
return null;
} | java | {
"resource": ""
} |
q161366 | TaskTracker.getMaxSlots | train | protected int getMaxSlots(JobConf conf, int numCpuOnTT, TaskType type) {
int maxSlots;
String cpuToSlots;
if (type == TaskType.MAP) {
maxSlots = conf.getInt("mapred.tasktracker.map.tasks.maximum", 2);
cpuToSlots = conf.get("mapred.tasktracker.cpus.to.maptasks");
} else {
maxSlots = conf.getInt("mapred.tasktracker.reduce.tasks.maximum", 2);
cpuToSlots = conf.get("mapred.tasktracker.cpus.to.reducetasks");
}
if (cpuToSlots != null) {
try {
// Format of the configuration is
// numCpu1:maxSlot1, numCpu2:maxSlot2, numCpu3:maxSlot3
for (String str : cpuToSlots.split(",")) {
String[] pair = str.split(":");
int numCpu = Integer.parseInt(pair[0].trim());
int max = Integer.parseInt(pair[1].trim());
if (numCpu == numCpuOnTT) {
maxSlots = max;
break;
}
}
} catch (Exception e) {
LOG.warn("Error parsing number of CPU to map slots configuration", e);
}
}
return maxSlots;
} | java | {
"resource": ""
} |
q161367 | BlockMover.chooseTargetNodes | train | public DatanodeInfo chooseTargetNodes(Set<DatanodeInfo> excludedNodes)
throws IOException {
DatanodeInfo target = cluster.getNodeOnDifferentRack(excludedNodes);
if (target == null) {
throw new IOException ("Error choose datanode");
}
return target;
} | java | {
"resource": ""
} |
q161368 | DatanodeBlockReader.getChecksumInfo | train | protected void getChecksumInfo(long blockLength) {
/*
* If bytesPerChecksum is very large, then the metadata file is mostly
* corrupted. For now just truncate bytesPerchecksum to blockLength.
*/
bytesPerChecksum = checksum.getBytesPerChecksum();
if (bytesPerChecksum > 10 * 1024 * 1024 && bytesPerChecksum > blockLength) {
checksum = DataChecksum.newDataChecksum(checksum.getChecksumType(),
Math.max((int) blockLength, 10 * 1024 * 1024));
bytesPerChecksum = checksum.getBytesPerChecksum();
}
checksumSize = checksum.getChecksumSize();
} | java | {
"resource": ""
} |
q161369 | LightWeightHashSet.contains | train | @SuppressWarnings("unchecked")
public boolean contains(final Object key) {
// validate key
if (key == null) {
throw new IllegalArgumentException("Null element is not supported.");
}
// find element
final int hashCode = ((T)key).hashCode();
final int index = getIndex(hashCode);
return containsElem(index, (T) key, hashCode);
} | java | {
"resource": ""
} |
q161370 | LightWeightHashSet.containsElem | train | protected boolean containsElem(int index, final T key, int hashCode) {
for (LinkedElement<T> e = entries[index]; e != null; e = e.next) {
// element found
if (hashCode == e.hashCode && e.element.equals(key)) {
return true;
}
}
// element not found
return false;
} | java | {
"resource": ""
} |
q161371 | LightWeightHashSet.addAll | train | public boolean addAll(Collection<? extends T> toAdd) {
boolean changed = false;
for (T elem : toAdd) {
changed |= addElem(elem);
}
expandIfNecessary();
return changed;
} | java | {
"resource": ""
} |
q161372 | LightWeightHashSet.remove | train | @SuppressWarnings("unchecked")
public boolean remove(final Object key) {
// validate key
if (key == null) {
throw new IllegalArgumentException("Null element is not supported.");
}
LinkedElement<T> removed = removeElem((T) key);
shrinkIfNecessary();
return removed == null ? false : true;
} | java | {
"resource": ""
} |
q161373 | LightWeightHashSet.pollAll | train | public List<T> pollAll() {
List<T> retList = new ArrayList<T>(size);
for (int i = 0; i < entries.length; i++) {
LinkedElement<T> current = entries[i];
while (current != null) {
retList.add(current.element);
current = current.next;
}
}
this.clear();
return retList;
} | java | {
"resource": ""
} |
q161374 | LightWeightHashSet.pollToArray | train | @SuppressWarnings("unchecked")
public T[] pollToArray(T[] array) {
int currentIndex = 0;
LinkedElement<T> current = null;
if (array.length == 0) {
return array;
}
if (array.length > size) {
array = (T[]) java.lang.reflect.Array.newInstance(array.getClass()
.getComponentType(), size);
}
// do fast polling if the entire set needs to be fetched
if (array.length == size) {
for (int i = 0; i < entries.length; i++) {
current = entries[i];
while (current != null) {
array[currentIndex++] = current.element;
current = current.next;
}
}
this.clear();
return array;
}
boolean done = false;
int currentBucketIndex = 0;
while (!done) {
current = entries[currentBucketIndex];
while (current != null) {
array[currentIndex++] = current.element;
current = current.next;
entries[currentBucketIndex] = current;
size--;
modification++;
if (currentIndex == array.length) {
done = true;
break;
}
}
currentBucketIndex++;
}
shrinkIfNecessary();
return array;
} | java | {
"resource": ""
} |
q161375 | LightWeightHashSet.computeCapacity | train | private int computeCapacity(int initial) {
if (initial < MINIMUM_CAPACITY) {
return MINIMUM_CAPACITY;
}
if (initial > MAXIMUM_CAPACITY) {
return MAXIMUM_CAPACITY;
}
int capacity = 1;
while (capacity < initial) {
capacity <<= 1;
}
return capacity;
} | java | {
"resource": ""
} |
q161376 | LightWeightHashSet.resize | train | @SuppressWarnings("unchecked")
private void resize(int cap) {
int newCapacity = computeCapacity(cap);
if (newCapacity == this.capacity) {
return;
}
this.capacity = newCapacity;
this.expandThreshold = (int) (capacity * maxLoadFactor);
this.shrinkThreshold = (int) (capacity * minLoadFactor);
this.hash_mask = capacity - 1;
LinkedElement<T>[] temp = entries;
entries = new LinkedElement[capacity];
for (int i = 0; i < temp.length; i++) {
LinkedElement<T> curr = temp[i];
while (curr != null) {
LinkedElement<T> next = curr.next;
int index = getIndex(curr.hashCode);
curr.next = entries[index];
entries[index] = curr;
curr = next;
}
}
} | java | {
"resource": ""
} |
q161377 | LightWeightHashSet.clear | train | @SuppressWarnings("unchecked")
public void clear() {
this.capacity = this.initialCapacity;
this.hash_mask = capacity - 1;
this.expandThreshold = (int) (capacity * maxLoadFactor);
this.shrinkThreshold = (int) (capacity * minLoadFactor);
entries = new LinkedElement[capacity];
size = 0;
modification++;
} | java | {
"resource": ""
} |
q161378 | EagerTaskInitializationListener.resortInitQueue | train | private synchronized void resortInitQueue() {
Comparator<JobInProgress> comp = new Comparator<JobInProgress>() {
public int compare(JobInProgress o1, JobInProgress o2) {
int res = o1.getPriority().compareTo(o2.getPriority());
if(res == 0) {
if(o1.getStartTime() < o2.getStartTime())
res = -1;
else
res = (o1.getStartTime()==o2.getStartTime() ? 0 : 1);
}
return res;
}
};
synchronized (jobInitQueue) {
Collections.sort(jobInitQueue, comp);
}
} | java | {
"resource": ""
} |
q161379 | EagerTaskInitializationListener.jobStateChanged | train | private void jobStateChanged(JobStatusChangeEvent event) {
// Resort the job queue if the job-start-time or job-priority changes
if (event.getEventType() == EventType.START_TIME_CHANGED
|| event.getEventType() == EventType.PRIORITY_CHANGED) {
synchronized (jobInitQueue) {
resortInitQueue();
}
}
} | java | {
"resource": ""
} |
q161380 | JsonUtils.readStartObjectToken | train | public static void readStartObjectToken(JsonParser jsonParser,
String parentFieldName)
throws IOException {
readToken(jsonParser, parentFieldName, JsonToken.START_OBJECT);
} | java | {
"resource": ""
} |
q161381 | JsonUtils.readStartArrayToken | train | public static void readStartArrayToken(JsonParser jsonParser,
String parentFieldName)
throws IOException {
readToken(jsonParser, parentFieldName, JsonToken.START_ARRAY);
} | java | {
"resource": ""
} |
q161382 | JsonUtils.readEndObjectToken | train | public static void readEndObjectToken(JsonParser jsonParser,
String parentFieldName)
throws IOException {
readToken(jsonParser, parentFieldName, JsonToken.END_OBJECT);
} | java | {
"resource": ""
} |
q161383 | JsonUtils.readEndArrayToken | train | public static void readEndArrayToken(JsonParser jsonParser,
String parentFieldName)
throws IOException {
readToken(jsonParser, parentFieldName, JsonToken.END_ARRAY);
} | java | {
"resource": ""
} |
q161384 | JsonUtils.createJsonGenerator | train | public static JsonGenerator createJsonGenerator(CoronaConf conf)
throws IOException {
OutputStream outputStream = new FileOutputStream(conf.getCMStateFile());
if (conf.getCMCompressStateFlag()) {
outputStream = new GZIPOutputStream(outputStream);
}
ObjectMapper mapper = new ObjectMapper();
JsonGenerator jsonGenerator =
new JsonFactory().createJsonGenerator(outputStream, JsonEncoding.UTF8);
jsonGenerator.setCodec(mapper);
if (!conf.getCMCompressStateFlag()) {
jsonGenerator.setPrettyPrinter(new DefaultPrettyPrinter());
}
return jsonGenerator;
} | java | {
"resource": ""
} |
q161385 | JsonUtils.createJsonParser | train | public static JsonParser createJsonParser(CoronaConf conf)
throws IOException {
InputStream inputStream = new FileInputStream(conf.getCMStateFile());
if (conf.getCMCompressStateFlag()) {
inputStream = new GZIPInputStream(inputStream);
}
ObjectMapper mapper = new ObjectMapper();
mapper.configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES,
false);
JsonFactory jsonFactory = new JsonFactory();
jsonFactory.setCodec(mapper);
return jsonFactory.createJsonParser(inputStream);
} | java | {
"resource": ""
} |
q161386 | JsonUtils.readField | train | public static void readField(JsonParser jsonParser,
String expectedFieldName) throws IOException {
readToken(jsonParser, expectedFieldName, JsonToken.FIELD_NAME);
String fieldName = jsonParser.getCurrentName();
if (!fieldName.equals(expectedFieldName)) {
foundUnknownField(fieldName, expectedFieldName);
}
} | java | {
"resource": ""
} |
q161387 | INodeDirectory.replaceChild | train | void replaceChild(INode newChild) {
if ( children == null ) {
throw new IllegalArgumentException("The directory is empty");
}
int low = Collections.binarySearch(children, newChild.name);
if (low>=0) { // an old child exists so replace by the newChild
INode oldChild = children.get(low);
// Need to make sure we are replacing the oldChild with newChild that is the same as oldChild
// which means they reference to the same children or they are null or empty array
children.set(low, newChild);
// newChild should point to current instance (parent of oldChild)
newChild.parent = this;
// if both are directory, all the children from oldChild should point to newChild
if (newChild.isDirectory() && oldChild.isDirectory()) {
if (((INodeDirectory)oldChild).getChildren() != null) {
for (INode oldGrandChild : ((INodeDirectory)oldChild).getChildren()) {
oldGrandChild.parent = (INodeDirectory)newChild;
}
}
}
} else {
throw new IllegalArgumentException("No child exists to be replaced");
}
} | java | {
"resource": ""
} |
q161388 | INodeDirectory.getExistingPathINodes | train | public INode[] getExistingPathINodes(String path) {
byte[][] components = getPathComponents(path);
INode[] inodes = new INode[components.length];
this.getExistingPathINodes(components, inodes);
return inodes;
} | java | {
"resource": ""
} |
q161389 | INodeDirectory.nextChild | train | int nextChild(byte[] name) {
if (name.length == 0) { // empty name
return 0;
}
int nextPos = Collections.binarySearch(children, name) + 1;
if (nextPos >= 0) { // the name is in the list of children
return nextPos;
}
return -nextPos; // insert point
} | java | {
"resource": ""
} |
q161390 | INodeDirectory.addNode | train | <T extends INode> T addNode(String path, T newNode, boolean inheritPermission
) throws FileNotFoundException {
byte[][] pathComponents = getPathComponents(path);
if(addToParent(pathComponents, newNode, inheritPermission, true) == null)
return null;
return newNode;
} | java | {
"resource": ""
} |
q161391 | INodeDirectory.computeContentSummary | train | private long[] computeContentSummary(long[] summary, Set<Long> visitedCtx) {
if (children != null) {
for (INode child : children) {
if (child.isDirectory()) {
// Process the directory with the visited hard link context
((INodeDirectory)child).computeContentSummary(summary, visitedCtx);
} else {
// Process the file
if (child instanceof INodeHardLinkFile) {
// Get the current hard link ID
long hardLinkID = ((INodeHardLinkFile) child).getHardLinkID();
if (visitedCtx.contains(hardLinkID)) {
// The current hard link file has been visited, so only increase the file count.
summary[1] ++;
continue;
} else {
// Add the current hard link file to the visited set
visitedCtx.add(hardLinkID);
// Compute the current hardlink file
child.computeContentSummary(summary);
}
} else {
// compute the current child for non hard linked files
child.computeContentSummary(summary);
}
}
}
}
summary[2]++;
return summary;
} | java | {
"resource": ""
} |
q161392 | INodeDirectory.countItems | train | public void countItems() {
itemCounts = new ItemCounts();
itemCounts.startTime = System.currentTimeMillis();
itemCounts.numDirectories = 1; // count the current directory
itemCounts.numFiles = 0;
itemCounts.numBlocks = 0;
if (children != null) {
for (INode child : children) {
countItemsRecursively(child);
}
}
itemCounts.finishTime = System.currentTimeMillis();
} | java | {
"resource": ""
} |
q161393 | Sudoku.stringifySolution | train | static String stringifySolution(int size, List<List<ColumnName>> solution) {
int[][] picture = new int[size][size];
StringBuffer result = new StringBuffer();
// go through the rows selected in the model and build a picture of the
// solution.
for(List<ColumnName> row: solution) {
int x = -1;
int y = -1;
int num = -1;
for(ColumnName item: row) {
if (item instanceof ColumnConstraint) {
x = ((ColumnConstraint) item).column;
num = ((ColumnConstraint) item).num;
} else if (item instanceof RowConstraint) {
y = ((RowConstraint) item).row;
}
}
picture[y][x] = num;
}
// build the string
for(int y=0; y < size; ++y) {
for (int x=0; x < size; ++x) {
result.append(picture[y][x]);
result.append(" ");
}
result.append("\n");
}
return result.toString();
} | java | {
"resource": ""
} |
q161394 | Sudoku.generateRow | train | private boolean[] generateRow(boolean[] rowValues, int x, int y, int num) {
// clear the scratch array
for(int i=0; i < rowValues.length; ++i) {
rowValues[i] = false;
}
// find the square coordinates
int xBox = (int) x / squareXSize;
int yBox = (int) y / squareYSize;
// mark the column
rowValues[x*size + num - 1] = true;
// mark the row
rowValues[size*size + y*size + num - 1] = true;
// mark the square
rowValues[2*size*size + (xBox*squareXSize + yBox)*size + num - 1] = true;
// mark the cell
rowValues[3*size*size + size*x + y] = true;
return rowValues;
} | java | {
"resource": ""
} |
q161395 | Sudoku.main | train | public static void main(String[] args) throws IOException {
if (args.length == 0) {
System.out.println("Include a puzzle on the command line.");
}
for(int i=0; i < args.length; ++i) {
Sudoku problem = new Sudoku(new FileInputStream(args[i]));
System.out.println("Solving " + args[i]);
problem.solve();
}
} | java | {
"resource": ""
} |
q161396 | KosmosFileSystem.delete | train | public boolean delete(Path path, boolean recursive) throws IOException {
Path absolute = makeAbsolute(path);
String srep = absolute.toUri().getPath();
if (kfsImpl.isFile(srep))
return kfsImpl.remove(srep) == 0;
FileStatus[] dirEntries = listStatus(absolute);
if ((!recursive) && (dirEntries != null) &&
(dirEntries.length != 0)) {
throw new IOException("Directory " + path.toString() +
" is not empty.");
}
if (dirEntries != null) {
for (int i = 0; i < dirEntries.length; i++) {
delete(new Path(absolute, dirEntries[i].getPath()), recursive);
}
}
return kfsImpl.rmdir(srep) == 0;
} | java | {
"resource": ""
} |
q161397 | KosmosFileSystem.getFileBlockLocations | train | @Override
public BlockLocation[] getFileBlockLocations(FileStatus file, long start,
long len) throws IOException {
if (file == null) {
return null;
}
String srep = makeAbsolute(file.getPath()).toUri().getPath();
String[][] hints = kfsImpl.getDataLocation(srep, start, len);
if (hints == null) {
return null;
}
BlockLocation[] result = new BlockLocation[hints.length];
long blockSize = getDefaultBlockSize();
long length = len;
long blockStart = start;
for(int i=0; i < result.length; ++i) {
result[i] = new BlockLocation(null, hints[i], blockStart,
length < blockSize ? length : blockSize);
blockStart += blockSize;
length -= blockSize;
}
return result;
} | java | {
"resource": ""
} |
q161398 | INodeDirectoryWithQuota.verifyQuota | train | void verifyQuota(long nsDelta, long dsDelta) throws QuotaExceededException {
long newCount = nsCount + nsDelta;
long newDiskspace = diskspace + dsDelta;
if (nsDelta>0 || dsDelta>0) {
if (nsQuota >= 0 && nsQuota < newCount) {
throw new NSQuotaExceededException(nsQuota, newCount);
}
if (dsQuota >= 0 && dsQuota < newDiskspace) {
throw new DSQuotaExceededException(dsQuota, newDiskspace);
}
}
} | java | {
"resource": ""
} |
q161399 | Storage.checkVersionUpgradable | train | public static void checkVersionUpgradable(int oldVersion)
throws IOException {
if (oldVersion > LAST_UPGRADABLE_LAYOUT_VERSION) {
String msg = "*********** Upgrade is not supported from this older" +
" version of storage to the current version." +
" Please upgrade to " + LAST_UPGRADABLE_HADOOP_VERSION +
" or a later version and then upgrade to current" +
" version. Old layout version is " +
(oldVersion == 0 ? "'too old'" : (""+oldVersion)) +
" and latest layout version this software version can" +
" upgrade from is " + LAST_UPGRADABLE_LAYOUT_VERSION +
". ************";
LOG.error(msg);
throw new IOException(msg);
}
} | java | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.