_id
stringlengths
2
7
title
stringlengths
3
140
partition
stringclasses
3 values
text
stringlengths
73
34.1k
language
stringclasses
1 value
meta_information
dict
q160800
BlockReader.readBlockSizeInfo
train
private synchronized void readBlockSizeInfo() throws IOException { if (!transferBlockSize) { return; } blkLenInfoUpdated = true; isBlockFinalized = in.readBoolean(); updatedBlockLength = in.readLong(); if (dataTransferVersion >= DataTransferProtocol.READ_PROFILING_VERSION) { readDataNodeProfilingData(); } if (LOG.isDebugEnabled()) { LOG.debug("ifBlockComplete? " + isBlockFinalized + " block size: " + updatedBlockLength); } }
java
{ "resource": "" }
q160801
BlockReader.newBlockReader
train
public static BlockReader newBlockReader( int dataTransferVersion, int namespaceId, Socket sock, String file, long blockId, long genStamp, long startOffset, long len, int bufferSize, boolean verifyChecksum) throws IOException { return newBlockReader(dataTransferVersion, namespaceId, sock, file, blockId, genStamp, startOffset, len, bufferSize, verifyChecksum, "", Long.MAX_VALUE, -1, false, null, new ReadOptions()); }
java
{ "resource": "" }
q160802
CoronaTaskTracker.createRunningJob
train
@Override protected RunningJob createRunningJob(JobID jobId, TaskInProgress tip) throws IOException { CoronaSessionInfo info = (CoronaSessionInfo)(tip.getExtensible()); // JobClient will be set by JobTrackerReporter thread later RunningJob rJob = new RunningJob(jobId, null, info); JobTrackerReporter reporter = new JobTrackerReporter(rJob, info.getJobTrackerAddr(), info.getSecondaryTracker(), info.getSessionHandle()); reporter.setName("JobTrackerReporter for " + jobId); // Start the heartbeat to the jobtracker reporter.start(); jobTrackerReporters.put(jobId, reporter); return rJob; }
java
{ "resource": "" }
q160803
CoronaTaskTracker.purgeJob
train
@Override protected synchronized void purgeJob(KillJobAction action) throws IOException { JobID jobId = action.getJobID(); JobTrackerReporter reporter = jobTrackerReporters.remove(jobId); if (reporter != null) { reporter.shutdown(); } super.purgeJob(action); crReleaseManager.returnRelease(jobId); }
java
{ "resource": "" }
q160804
DefaultTaskController.launchTaskJVM
train
void launchTaskJVM(TaskController.TaskControllerContext context) throws IOException { JvmEnv env = context.env; List<String> wrappedCommand = TaskLog.captureOutAndError(env.setup, env.vargs, env.stdout, env.stderr, env.logSize, true); ShellCommandExecutor shexec = new ShellCommandExecutor(wrappedCommand.toArray(new String[0]), env.workDir, env.env); // set the ShellCommandExecutor for later use. context.shExec = shexec; shexec.execute(); }
java
{ "resource": "" }
q160805
DFSContentProvider.refresh
train
void refresh() { // no display, nothing to update if (this.viewer == null) return; Display.getDefault().asyncExec(new Runnable() { public void run() { DFSContentProvider.this.viewer.refresh(); } }); }
java
{ "resource": "" }
q160806
DFSContentProvider.refresh
train
void refresh(final DFSContent content) { if (this.sviewer != null) { Display.getDefault().asyncExec(new Runnable() { public void run() { DFSContentProvider.this.sviewer.refresh(content); } }); } else { refresh(); } }
java
{ "resource": "" }
q160807
TaskLogServlet.getTaskLogUrl
train
public static String getTaskLogUrl(String taskTrackerHostName, String httpPort, String taskAttemptID) { return ("http://" + taskTrackerHostName + ":" + httpPort + "/tasklog?taskid=" + taskAttemptID); }
java
{ "resource": "" }
q160808
TaskLogServlet.findFirstQuotable
train
private static int findFirstQuotable(byte[] data, int offset, int end) { while (offset < end) { switch (data[offset]) { case '<': case '>': case '&': return offset; default: offset += 1; } } return offset; }
java
{ "resource": "" }
q160809
PoolGroupManager.snapshot
train
public void snapshot() { snapshotPoolGroups = new ArrayList<PoolGroupSchedulable>(nameToPoolGroup.values()); for (PoolGroupSchedulable poolGroup : snapshotPoolGroups) { poolGroup.snapshot(); } scheduleQueue = null; preemptQueue = null; // Load the configured pools for stats and cm.jsp // (needs to modify nameToPoolGroup) Collection<PoolInfo> configuredPoolInfos = configManager.getConfiguredPoolInfos(); if (configuredPoolInfos != null) { for (PoolInfo poolInfo : configuredPoolInfos) { getPoolSchedulable(poolInfo); } } }
java
{ "resource": "" }
q160810
PoolGroupManager.getScheduleQueue
train
public Queue<PoolGroupSchedulable> getScheduleQueue() { if (scheduleQueue == null) { scheduleQueue = createPoolGroupQueue(ScheduleComparator.FAIR); } return scheduleQueue; }
java
{ "resource": "" }
q160811
PoolGroupManager.getPreemptQueue
train
public Queue<PoolGroupSchedulable> getPreemptQueue() { if (preemptQueue == null) { preemptQueue = createPoolGroupQueue(ScheduleComparator.FAIR_PREEMPT); } return preemptQueue; }
java
{ "resource": "" }
q160812
PoolGroupManager.createPoolGroupQueue
train
private Queue<PoolGroupSchedulable> createPoolGroupQueue( ScheduleComparator comparator) { int initCapacity = snapshotPoolGroups.size() == 0 ? 1 : snapshotPoolGroups.size(); Queue<PoolGroupSchedulable> poolGroupQueue = new PriorityQueue<PoolGroupSchedulable>(initCapacity, comparator); poolGroupQueue.addAll(snapshotPoolGroups); return poolGroupQueue; }
java
{ "resource": "" }
q160813
PoolGroupManager.addSession
train
public void addSession(String id, Session session) { PoolInfo poolInfo = getPoolInfo(session); LOG.info("Session " + id + " added to pool info " + poolInfo + " (originally " + session.getInfo().getPoolInfoStrings() +") for " + type); getPoolSchedulable(poolInfo).addSession(id, session); }
java
{ "resource": "" }
q160814
PoolGroupManager.checkPoolInfoIfStrict
train
public static void checkPoolInfoIfStrict(PoolInfo poolInfo, ConfigManager configManager, CoronaConf conf) throws InvalidSessionHandle { if (!conf.onlyAllowConfiguredPools()) { return; } // When only allowing configured pools, check the pool name to ensure // it is a configured pool name. Not setting the pool info is also // invalid. A legal name must be specified. if (poolInfo == null) { throw new InvalidSessionHandle("This cluster is operating in " + "configured pools only mode. The pool group " + "and pool was not specified. Please use the Corona parameter " + CoronaConf.EXPLICIT_POOL_PROPERTY + " to set a valid poolgroup and " + "pool in the format '<poolgroup>.<pool>'"); } if (!configManager.isConfiguredPoolInfo(poolInfo)) { throw new InvalidSessionHandle("This cluster is operating in " + "configured pools only mode. The pool group " + "and pool was specified as '" + poolInfo.getPoolGroupName() + "." + poolInfo.getPoolName() + "' and is not part of this cluster. " + "Please use the Corona parameter " + CoronaConf.EXPLICIT_POOL_PROPERTY + " to set a valid pool " + "group and pool in the format <poolgroup>.<pool>"); } if (!PoolInfo.isLegalPoolInfo(poolInfo)) { throw new InvalidSessionHandle("This cluster is operating in " + "configured pools only mode. The pool group " + "and pool was specified as '" + poolInfo.getPoolGroupName() + "." + poolInfo.getPoolName() + "' and has illegal characters (Something not in " + PoolInfo.INVALID_REGEX + "). Please use the Corona parameter " + CoronaConf.EXPLICIT_POOL_PROPERTY + " to set a valid pool " + "group and pool in the format <poolgroup>.<pool>"); } }
java
{ "resource": "" }
q160815
PoolGroupManager.getPoolInfo
train
public static PoolInfo getPoolInfo( Session session) { PoolInfo poolInfo = session.getPoolInfo(); // If there is no explicit pool info set, take user name. if (poolInfo == null || poolInfo.getPoolName().equals("")) { poolInfo = new PoolInfo(DEFAULT_POOL_GROUP, session.getUserId()); } if (!PoolInfo.isLegalPoolInfo(poolInfo)) { LOG.warn("Illegal pool info :" + poolInfo + " from session " + session.getSessionId()); return DEFAULT_POOL_INFO; } return poolInfo; }
java
{ "resource": "" }
q160816
PoolGroupManager.getPoolSchedulable
train
private PoolSchedulable getPoolSchedulable(PoolInfo poolInfo) { PoolGroupSchedulable poolGroup = nameToPoolGroup.get(poolInfo.getPoolGroupName()); if (poolGroup == null) { poolGroup = new PoolGroupSchedulable( poolInfo.getPoolGroupName(), type, configManager); PoolGroupSchedulable prevPoolGroup = nameToPoolGroup.putIfAbsent(poolInfo.getPoolGroupName(), poolGroup); if (prevPoolGroup != null) { poolGroup = prevPoolGroup; } } return poolGroup.getPool(poolInfo); }
java
{ "resource": "" }
q160817
NativeIO.ioprioGetIfPossible
train
public static int ioprioGetIfPossible() throws IOException { if (nativeLoaded && ioprioPossible) { try { return ioprio_get(); } catch (UnsupportedOperationException uoe) { LOG.warn("ioprioGetIfPossible() failed", uoe); ioprioPossible = false; } catch (UnsatisfiedLinkError ule) { LOG.warn("ioprioGetIfPossible() failed", ule); ioprioPossible = false; } catch (NativeIOException nie) { LOG.warn("ioprioGetIfPossible() failed", nie); throw nie; } } return -1; }
java
{ "resource": "" }
q160818
NativeIO.posixFadviseIfPossible
train
public static void posixFadviseIfPossible( FileDescriptor fd, long offset, long len, int flags) throws NativeIOException { if (nativeLoaded && fadvisePossible) { try { posix_fadvise(fd, offset, len, flags); InjectionHandler.processEvent( InjectionEventCore.NATIVEIO_POSIX_FADVISE, flags); } catch (UnsupportedOperationException uoe) { LOG.warn("posixFadviseIfPossible() failed", uoe); fadvisePossible = false; } catch (UnsatisfiedLinkError ule) { LOG.warn("posixFadviseIfPossible() failed", ule); fadvisePossible = false; } catch (NativeIOException nie) { LOG.warn("posixFadviseIfPossible() failed", nie); throw nie; } } }
java
{ "resource": "" }
q160819
NativeIO.syncFileRangeIfPossible
train
public static void syncFileRangeIfPossible( FileDescriptor fd, long offset, long nbytes, int flags) throws NativeIOException { InjectionHandler.processEvent(InjectionEventCore.NATIVEIO_SYNC_FILE_RANGE, flags); if (nativeLoaded && syncFileRangePossible) { try { sync_file_range(fd, offset, nbytes, flags); } catch (UnsupportedOperationException uoe) { LOG.warn("syncFileRangeIfPossible() failed", uoe); syncFileRangePossible = false; } catch (UnsatisfiedLinkError ule) { LOG.warn("syncFileRangeIfPossible() failed", ule); syncFileRangePossible = false; } catch (NativeIOException nie) { LOG.warn("syncFileRangeIfPossible() failed: fd " + fd + " offset " + offset + " nbytes " + nbytes + " flags " + flags, nie); throw nie; } } }
java
{ "resource": "" }
q160820
CreateOptions.writeOptions
train
public static WriteOptions writeOptions(Boolean overwrite, Boolean forceSync){ WriteOptions wo = new WriteOptions(); if (overwrite != null) wo.setOverwrite(overwrite); if (forceSync != null) wo.setForcesync(forceSync); return wo; }
java
{ "resource": "" }
q160821
CreateOptions.getOpt
train
public static CreateOptions getOpt(Class<? extends CreateOptions> theClass, CreateOptions... opts) { if (opts == null) return null; CreateOptions result = null; for (int i = 0; i < opts.length; ++i) { if (opts[i].getClass() == theClass) { if (result != null) throw new IllegalArgumentException("Multiple args with type " + theClass); result = opts[i]; } } return result; }
java
{ "resource": "" }
q160822
CreateOptions.setOpt
train
public static <T extends CreateOptions> CreateOptions[] setOpt(T newValue, CreateOptions ... opts) { boolean alreadyInOpts = false; if (opts != null) { for (int i = 0; i < opts.length; ++i) { if (opts[i].getClass() == newValue.getClass()) { if (alreadyInOpts) throw new IllegalArgumentException("Multiple args with type " + newValue.getClass()); alreadyInOpts = true; opts[i] = newValue; } } } CreateOptions[] resultOpt = opts; if (!alreadyInOpts) { // no newValue in opt CreateOptions[] newOpts = new CreateOptions[opts.length + 1]; System.arraycopy(opts, 0, newOpts, 0, opts.length); newOpts[opts.length] = newValue; resultOpt = newOpts; } return resultOpt; }
java
{ "resource": "" }
q160823
BlockReceiver.handleMirrorOutError
train
private void handleMirrorOutError(IOException ioe) throws IOException { LOG.info(datanode.getDatanodeInfo() + ": Exception writing block " + block + " namespaceId: " + namespaceId + " to mirror " + mirrorAddr + "\n" + StringUtils.stringifyException(ioe)); if (Thread.interrupted()) { // shut down if the thread is interrupted throw ioe; } else { // encounter an error while writing to mirror // continue to run even if can not write to mirror // notify client of the error // and wait for the client to shut down the pipeline mirrorError = true; } }
java
{ "resource": "" }
q160824
BlockReceiver.verifyChunks
train
private void verifyChunks( byte[] dataBuf, int dataOff, int len, byte[] checksumBuf, int checksumOff, int firstChunkOffset, int packetVersion) throws IOException { int chunkOffset = firstChunkOffset; while (len > 0) { int chunkLen = Math.min(len, bytesPerChecksum - chunkOffset); chunkOffset = 0; checksum.update(dataBuf, dataOff, chunkLen); dataOff += chunkLen; boolean checksumCorrect; if (packetVersion == DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST) { checksumCorrect = checksum.compare(checksumBuf, checksumOff); checksumOff += checksumSize; } else { // Expect packetVersion == DataTransferProtocol.PACKET_VERSION_CHECKSUM_INLINE checksumCorrect = checksum.compare(dataBuf, dataOff); dataOff += checksumSize; } if (!checksumCorrect) { if (srcDataNode != null) { try { LOG.info("report corrupt block " + block + " from datanode " + srcDataNode + " to namenode"); LocatedBlock lb = new LocatedBlock(block, new DatanodeInfo[] {srcDataNode}); datanode.reportBadBlocks(namespaceId, new LocatedBlock[] {lb}); } catch (IOException e) { LOG.warn("Failed to report bad block " + block + " from datanode " + srcDataNode + " to namenode"); } } throw new IOException("Unexpected checksum mismatch " + "while writing " + block + " from " + inAddr); } checksum.reset(); len -= chunkLen; } }
java
{ "resource": "" }
q160825
Util.stringAsURI
train
public static URI stringAsURI(String s) throws IOException { URI u = null; // try to make a URI try { u = new URI(s); } catch (URISyntaxException e){ LOG.error("Syntax error in URI " + s + ". Please check hdfs configuration.", e); } // if URI is null or scheme is undefined, then assume it's file:// if(u == null || u.getScheme() == null){ LOG.warn("Path " + s + " should be specified as a URI " + "in configuration files. Please update hdfs configuration."); u = fileAsURI(new File(s)); } return u; }
java
{ "resource": "" }
q160826
Util.stringCollectionAsURIs
train
public static Collection<URI> stringCollectionAsURIs( Collection<String> names) { Collection<URI> uris = new ArrayList<URI>(names.size()); for(String name : names) { try { uris.add(stringAsURI(name)); } catch (IOException e) { LOG.error("Error while processing URI: " + name, e); } } return uris; }
java
{ "resource": "" }
q160827
TaskLog.getRealTaskLogFilePath
train
static String getRealTaskLogFilePath(String location, LogName filter) throws IOException { return FileUtil.makeShellPath(new File(getBaseDir(location), filter.toString())); }
java
{ "resource": "" }
q160828
TaskLog.captureOutAndError
train
public static List<String> captureOutAndError(List<String> cmd, File stdoutFilename, File stderrFilename, long tailLength ) throws IOException { return captureOutAndError(null, cmd, stdoutFilename, stderrFilename, tailLength, false); }
java
{ "resource": "" }
q160829
TaskLog.captureOutAndError
train
public static List<String> captureOutAndError(List<String> setup, List<String> cmd, File stdoutFilename, File stderrFilename, long tailLength, boolean useSetsid ) throws IOException { List<String> result = new ArrayList<String>(3); result.add(bashCommand); result.add("-c"); String mergedCmd = buildCommandLine(setup, cmd, stdoutFilename, stderrFilename, tailLength, useSetsid); result.add(mergedCmd.toString()); return result; }
java
{ "resource": "" }
q160830
TaskLog.addCommand
train
public static String addCommand(List<String> cmd, boolean isExecutable) throws IOException { StringBuffer command = new StringBuffer(); for(String s: cmd) { command.append('\''); if (isExecutable) { // the executable name needs to be expressed as a shell path for the // shell to find it. command.append(FileUtil.makeShellPath(new File(s))); isExecutable = false; } else { command.append(s); } command.append('\''); command.append(" "); } return command.toString(); }
java
{ "resource": "" }
q160831
TaskLog.captureDebugOut
train
public static List<String> captureDebugOut(List<String> cmd, File debugoutFilename ) throws IOException { String debugout = FileUtil.makeShellPath(debugoutFilename); List<String> result = new ArrayList<String>(3); result.add(bashCommand); result.add("-c"); StringBuffer mergedCmd = new StringBuffer(); mergedCmd.append("exec "); boolean isExecutable = true; for(String s: cmd) { if (isExecutable) { // the executable name needs to be expressed as a shell path for the // shell to find it. mergedCmd.append(FileUtil.makeShellPath(new File(s))); isExecutable = false; } else { mergedCmd.append(s); } mergedCmd.append(" "); } mergedCmd.append(" < /dev/null "); mergedCmd.append(" >"); mergedCmd.append(debugout); mergedCmd.append(" 2>&1 "); result.add(mergedCmd.toString()); return result; }
java
{ "resource": "" }
q160832
DU.start
train
public void start() { //only start the thread if the interval is sane if(refreshInterval > 0) { refreshUsed = new Thread(new DURefreshThread(), "refreshUsed-"+dirPath); refreshUsed.setDaemon(true); refreshUsed.start(); } }
java
{ "resource": "" }
q160833
DU.shutdown
train
public void shutdown() { this.shouldRun = false; this.namespaceSliceDUMap.clear(); if(this.refreshUsed != null) { this.refreshUsed.interrupt(); try { this.refreshUsed.join(); this.refreshUsed = null; } catch (InterruptedException ie) { } } }
java
{ "resource": "" }
q160834
CoronaClient.killSession
train
private int killSession(String sessionId)throws IOException { try { System.out.printf("Killing %s", sessionId); ClusterManagerService.Client client = getCMSClient(); try { client.killSession(sessionId); } catch (SafeModeException e) { throw new IOException( "Cannot kill session yet, ClusterManager is in Safe Mode"); } System.err.printf("%s killed", sessionId); } catch (TException e) { throw new IOException(e); } return 0; }
java
{ "resource": "" }
q160835
CoronaClient.listSessions
train
private int listSessions() throws IOException { try { ClusterManagerService.Client client = getCMSClient(); List<RunningSession> sessions; try { sessions = client.getSessions(); } catch (SafeModeException e) { throw new IOException( "Cannot list sessions, ClusterManager is in Safe Mode"); } System.out.printf("%d sessions currently running:\n", sessions.size()); System.out.printf("SessionID\t" + "Session Name\t" + "Session User\t" + "Session Poolgroup\t" + "Session Pool\t" + "Session Priority\t" + "Running Mappers\t" + "Running Reducers\t" + "Running Jobtrackers\n"); for (RunningSession session : sessions) { SessionPriority priority = session.getPriority(); if (priority == null) { priority = SessionPriority.NORMAL; } System.out.printf("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", session.getHandle(), session.getName().replace("\t", "\\t").replace("\n", "\\n"), session.getUserId(), session.getPoolInfo().getPoolGroupName(), session.getPoolInfo().getPoolName(), priority, session.getRunningResources().get(ResourceType.MAP), session.getRunningResources().get(ResourceType.REDUCE), session.getRunningResources().get(ResourceType.JOBTRACKER)); } } catch (TException e) { throw new IOException(e); } return 0; }
java
{ "resource": "" }
q160836
EditsVisitorFactory.getEditsVisitor
train
static public EditsVisitor getEditsVisitor(String filename, String processor, Tokenizer tokenizer, boolean printToScreen) throws IOException { if(processor.toLowerCase().equals("xml")) { return new XmlEditsVisitor(filename, tokenizer, printToScreen); } else if(processor.toLowerCase().equals("stats")) { return new StatisticsEditsVisitor(filename, tokenizer, printToScreen); } else if(processor.toLowerCase().equals("binary")) { return new BinaryEditsVisitor(filename, tokenizer, printToScreen); } else { throw new IOException("Unknown proccesor " + processor + " (valid processors: xml, binary, stats)"); } }
java
{ "resource": "" }
q160837
LinuxTaskController.runCommand
train
private void runCommand(TaskCommands taskCommand, String user, List<String> cmdArgs, File workDir, Map<String, String> env) throws IOException { ShellCommandExecutor shExec = buildTaskControllerExecutor(taskCommand, user, cmdArgs, workDir, env); try { shExec.execute(); } catch (Exception e) { LOG.warn("Exit code from " + taskCommand.toString() + " is : " + shExec.getExitCode()); LOG.warn("Exception thrown by " + taskCommand.toString() + " : " + StringUtils.stringifyException(e)); LOG.info("Output from LinuxTaskController's " + taskCommand.toString() + " follows:"); logOutput(shExec.getOutput()); throw new IOException(e); } if (LOG.isDebugEnabled()) { LOG.info("Output from LinuxTaskController's " + taskCommand.toString() + " follows:"); logOutput(shExec.getOutput()); } }
java
{ "resource": "" }
q160838
LinuxTaskController.getJobId
train
private String getJobId(TaskControllerContext context) { String taskId = context.task.getTaskID().toString(); TaskAttemptID tId = TaskAttemptID.forName(taskId); String jobId = tId.getJobID().toString(); return jobId; }
java
{ "resource": "" }
q160839
LinuxTaskController.getDirectoryChosenForTask
train
private String getDirectoryChosenForTask(File directory, TaskControllerContext context) { String jobId = getJobId(context); String taskId = context.task.getTaskID().toString(); for (String dir : mapredLocalDirs) { File mapredDir = new File(dir); File taskDir = new File(mapredDir, TaskTracker.getLocalTaskDir( jobId, taskId, context.task.isTaskCleanupTask())); if (directory.equals(taskDir)) { return dir; } } LOG.error("Couldn't parse task cache directory correctly"); throw new IllegalArgumentException("invalid task cache directory " + directory.getAbsolutePath()); }
java
{ "resource": "" }
q160840
LinuxTaskController.setupTaskLogFileAccess
train
private void setupTaskLogFileAccess(TaskControllerContext context) { TaskAttemptID taskId = context.task.getTaskID(); File f = TaskLog.getTaskLogFile(taskId, TaskLog.LogName.SYSLOG); String taskAttemptLogDir = f.getParentFile().getAbsolutePath(); changeDirectoryPermissions(taskAttemptLogDir, FILE_PERMISSIONS, false); }
java
{ "resource": "" }
q160841
LinuxTaskController.setupTaskCacheFileAccess
train
private void setupTaskCacheFileAccess(TaskControllerContext context) { String taskId = context.task.getTaskID().toString(); JobID jobId = JobID.forName(getJobId(context)); //Change permission for the task across all the disks for(String localDir : mapredLocalDirs) { File f = new File(localDir); File taskCacheDir = new File(f,TaskTracker.getLocalTaskDir( jobId.toString(), taskId, context.task.isTaskCleanupTask())); if(taskCacheDir.exists()) { changeDirectoryPermissions(taskCacheDir.getPath(), FILE_PERMISSIONS, true); } }//end of local directory Iteration }
java
{ "resource": "" }
q160842
LinuxTaskController.changeDirectoryPermissions
train
private void changeDirectoryPermissions(String dir, String mode, boolean isRecursive) { int ret = 0; try { ret = FileUtil.chmod(dir, mode, isRecursive); } catch (Exception e) { LOG.warn("Exception in changing permissions for directory " + dir + ". Exception: " + e.getMessage()); } if (ret != 0) { LOG.warn("Could not change permissions for directory " + dir); } }
java
{ "resource": "" }
q160843
LinuxTaskController.getTaskCacheDirectory
train
private String getTaskCacheDirectory(TaskControllerContext context) { // In the case of JVM reuse, the task specific directory // is different from what is set with respect with // env.workDir. Hence building this from the taskId everytime. String taskId = context.task.getTaskID().toString(); File cacheDirForJob = context.env.workDir.getParentFile().getParentFile(); if(context.task.isTaskCleanupTask()) { taskId = taskId + TaskTracker.TASK_CLEANUP_SUFFIX; } return new File(cacheDirForJob, taskId).getAbsolutePath(); }
java
{ "resource": "" }
q160844
LinuxTaskController.writeCommand
train
private void writeCommand(String cmdLine, String directory) throws IOException { PrintWriter pw = null; String commandFile = directory + File.separator + COMMAND_FILE; LOG.info("Writing commands to " + commandFile); try { FileWriter fw = new FileWriter(commandFile); BufferedWriter bw = new BufferedWriter(fw); pw = new PrintWriter(bw); pw.write(cmdLine); } catch (IOException ioe) { LOG.error("Caught IOException while writing JVM command line to file. " + ioe.getMessage()); } finally { if (pw != null) { pw.close(); } // set execute permissions for all on the file. File f = new File(commandFile); if (f.exists()) { f.setReadable(true, false); f.setExecutable(true, false); } } }
java
{ "resource": "" }
q160845
LinuxTaskController.finishTask
train
private void finishTask(TaskControllerContext context, TaskCommands command) throws IOException{ if(context.task == null) { LOG.info("Context task null not killing the JVM"); return; } ShellCommandExecutor shExec = buildTaskControllerExecutor( command, context.env.conf.getUser(), buildKillTaskCommandArgs(context), context.env.workDir, context.env.env); try { shExec.execute(); } catch (Exception e) { LOG.warn("Output from task-contoller is : " + shExec.getOutput()); throw new IOException(e); } }
java
{ "resource": "" }
q160846
JobHistory.getHistoryFilePath
train
public static String getHistoryFilePath(JobID jobId) { MovedFileInfo info = jobHistoryFileMap.get(jobId); if (info == null) { return null; } return info.historyFile; }
java
{ "resource": "" }
q160847
JobHistory.init
train
public static boolean init(JobHistoryObserver jobTracker, JobConf conf, String hostname, long jobTrackerStartTime){ try { LOG_DIR = conf.get("hadoop.job.history.location" , "file:///" + new File( System.getProperty("hadoop.log.dir", "/tmp")).getAbsolutePath() + File.separator + "history"); JOBTRACKER_UNIQUE_STRING = hostname + "_" + String.valueOf(jobTrackerStartTime) + "_"; jobtrackerHostname = hostname; Path logDir = new Path(LOG_DIR); LOGDIR_FS = logDir.getFileSystem(conf); if (!LOGDIR_FS.exists(logDir)){ if (!LOGDIR_FS.mkdirs(logDir, new FsPermission(HISTORY_DIR_PERMISSION))) { throw new IOException("Mkdirs failed to create " + logDir.toString()); } } conf.set("hadoop.job.history.location", LOG_DIR); disableHistory = false; // set the job history block size (default is 3MB) jobHistoryBlockSize = conf.getLong("mapred.jobtracker.job.history.block.size", 3 * 1024 * 1024); jtConf = conf; // initialize the file manager fileManager = new JobHistoryFilesManager(conf, jobTracker, logDir); } catch(IOException e) { LOG.error("Failed to initialize JobHistory log file", e); disableHistory = true; } fileManager.startIOExecutor(); return !(disableHistory); }
java
{ "resource": "" }
q160848
JobHistory.parseLine
train
private static void parseLine(String line, Listener l, boolean isEscaped) throws IOException{ // extract the record type int idx = line.indexOf(' '); String recType = line.substring(0, idx); String data = line.substring(idx+1, line.length()); Matcher matcher = pattern.matcher(data); Map<Keys,String> parseBuffer = new HashMap<Keys, String>(); while(matcher.find()){ String tuple = matcher.group(0); String []parts = StringUtils.split(tuple, StringUtils.ESCAPE_CHAR, '='); String value = parts[1].substring(1, parts[1].length() -1); if (isEscaped) { value = StringUtils.unEscapeString(value, StringUtils.ESCAPE_CHAR, charsToEscape); } parseBuffer.put(Keys.valueOf(parts[0]), value); } l.handle(RecordTypes.valueOf(recType), parseBuffer); parseBuffer.clear(); }
java
{ "resource": "" }
q160849
JobHistory.log
train
public static void log(PrintWriter out, RecordTypes recordType, Keys key, String value){ value = escapeString(value); out.println(recordType.name() + DELIMITER + key + "=\"" + value + "\"" + DELIMITER + LINE_DELIMITER_CHAR); }
java
{ "resource": "" }
q160850
JobHistory.getTaskLogsUrl
train
public static String getTaskLogsUrl(JobHistory.TaskAttempt attempt) { if (attempt.get(Keys.HTTP_PORT).equals("") || attempt.get(Keys.TRACKER_NAME).equals("") || attempt.get(Keys.TASK_ATTEMPT_ID).equals("")) { return null; } String taskTrackerName = JobInProgress.convertTrackerNameToHostName( attempt.get(Keys.TRACKER_NAME)); return TaskLogServlet.getTaskLogUrl(taskTrackerName, attempt .get(Keys.HTTP_PORT), attempt.get(Keys.TASK_ATTEMPT_ID)); }
java
{ "resource": "" }
q160851
TaskTracker.getAvailableSlots
train
public int getAvailableSlots(TaskType taskType) { int availableSlots = 0; if (taskType == TaskType.MAP) { if (LOG.isDebugEnabled()) { LOG.debug(trackerName + " getAvailSlots:" + " max(m)=" + status.getMaxMapSlots() + " occupied(m)=" + status.countOccupiedMapSlots()); } availableSlots = status.getAvailableMapSlots(); } else { if (LOG.isDebugEnabled()) { LOG.debug(trackerName + " getAvailSlots:" + " max(r)=" + status.getMaxReduceSlots() + " occupied(r)=" + status.countOccupiedReduceSlots()); } availableSlots = status.getAvailableReduceSlots(); } return availableSlots; }
java
{ "resource": "" }
q160852
OfferService.sendIncrementalBlockReport
train
private void sendIncrementalBlockReport(long startTime) throws Exception { // check if there are newly received blocks Block[] receivedAndDeletedBlockArray = null; int numBlocksReceivedAndDeleted = 0; int currentPendingRequests = 0; synchronized (receivedAndDeletedBlockList) { // construct the ACKs array lastDeletedReport = startTime; numBlocksReceivedAndDeleted = receivedAndDeletedBlockList.size(); if (numBlocksReceivedAndDeleted > 0) { receivedAndDeletedBlockArray = receivedAndDeletedBlockList .toArray(new Block[numBlocksReceivedAndDeleted]); receivedAndDeletedBlockList.clear(); currentPendingRequests = pendingReceivedRequests; pendingReceivedRequests = 0; } } // process received + deleted // if exception is thrown, add all blocks to the retry list if (receivedAndDeletedBlockArray != null) { long[] failed = null; try { IncrementalBlockReport ibr = new IncrementalBlockReport(receivedAndDeletedBlockArray); long rpcStartTime = 0; if (LOG.isDebugEnabled()) { rpcStartTime = System.nanoTime(); LOG.debug("sending blockReceivedAndDeletedNew " + receivedAndDeletedBlockArray.length + " blocks to " + namenodeAddress); } failed = avatarnode.blockReceivedAndDeletedNew(nsRegistration, ibr); if (LOG.isDebugEnabled()) { LOG.debug("finished blockReceivedAndDeletedNew " + "to " + namenodeAddress + " time: " + (System.nanoTime() - rpcStartTime) + " ns"); } boolean isPrimaryCached = isPrimaryServiceCached(); // if we talk to primary failed must be null // if we talk to standby failed shouldn't be null if(isPrimaryCached && failed!=null){ //this should never happen //the primary can't switch to standby throw new IOException("Primary started acting as standby"); } else if (!isPrimaryCached && failed == null) { String msg = "Received null response from standby for incremental" + " block report. "; if (clearPrimaryCommandProcessed) { LOG.info(msg + "Failover is in progress" + " - will not clear primary again"); } else { LOG.info(msg + "Standby is acting as primary. Clearing primary"); // failover - we need to refresh our knowledge this.clearPrimary(); } } } catch (Exception e) { processFailedBlocks( receivedAndDeletedBlockArray, currentPendingRequests); throw e; } if(failed != null && failed.length != 0){ processFailedReceivedDeleted(failed, receivedAndDeletedBlockArray); } } }
java
{ "resource": "" }
q160853
OfferService.shouldSendIncrementalReport
train
private boolean shouldSendIncrementalReport(long startTime){ boolean isPrimary = isPrimaryServiceCached() || donotDelayIncrementalBlockReports; boolean deleteIntervalTrigger = (startTime - lastDeletedReport > anode.deletedReportInterval); // by default the report should be sent if there are any received // acks, or the deleteInterval has passed boolean sendReportDefault = pendingReceivedRequests > 0 || deleteIntervalTrigger; if(isPrimary){ // if talking to primary, send the report with the default // conditions return sendReportDefault; } else { // if talking to standby. send the report ONLY when the // retry interval has passed in addition to the default // condidtions boolean sendIfStandby = (lastBlockReceivedFailed + blockReceivedRetryInterval < startTime) && sendReportDefault; return sendIfStandby; } }
java
{ "resource": "" }
q160854
OfferService.processFailedBlocks
train
private void processFailedBlocks(Block []failed, int failedPendingRequests) { synchronized (receivedAndDeletedBlockList) { // We are adding to the front of a linked list and hence to preserve // order we should add the blocks in the reverse order. for (int i = failed.length - 1; i >= 0; i--) { receivedAndDeletedBlockList.add(0, failed[i]); } pendingReceivedRequests += failedPendingRequests; } }
java
{ "resource": "" }
q160855
OfferService.checkFailover
train
private boolean checkFailover() throws InterruptedException { boolean isPrimary = isPrimaryServiceCached(); if (!isPrimary && isPrimaryService()) { this.servicePair.setPrimaryOfferService(this); return true; } return false; }
java
{ "resource": "" }
q160856
OfferService.processCommand
train
private boolean processCommand(DatanodeCommand[] cmds, long processStartTime) throws InterruptedException { if (cmds != null) { // at each heartbeat the standby offer service will talk to ZK! boolean switchedFromStandbyToPrimary = checkFailover(); for (DatanodeCommand cmd : cmds) { try { // The datanode has received a register command after the failover, this // means that the offerservice thread for the datanode was down for a // while and it most probably did not clean up its deletion queue, hence // force a cleanup. if (switchedFromStandbyToPrimary && cmd.getAction() == DatanodeProtocol.DNA_REGISTER) { this.clearPrimary(); } // The standby service thread is allowed to process only a small set // of valid commands. if (!isPrimaryServiceCached() && !isValidStandbyCommand(cmd)) { LOG.warn("Received an invalid command " + cmd.getAction() + " from standby " + this.namenodeAddress); continue; } if (processCommand(cmd, processStartTime) == false) { return false; } } catch (IOException ioe) { LOG.warn("Error processing datanode Command", ioe); } } } return true; }
java
{ "resource": "" }
q160857
OfferService.prepareFailover
train
private void prepareFailover() { LOG.info("PREPARE FAILOVER requested by : " + this.avatarnodeAddress); // we should start sending incremental block reports and block // reports normally setBackoff(false); this.donotDelayIncrementalBlockReports = true; InjectionHandler.processEvent(InjectionEvent.OFFERSERVICE_PREPARE_FAILOVER, nsRegistration.toString()); }
java
{ "resource": "" }
q160858
OfferService.clearPrimary
train
private boolean clearPrimary() throws InterruptedException { try { if (!isPrimaryServiceCached()) { InetSocketAddress addr1 = servicePair.avatarAddr1; InetSocketAddress addr2 = servicePair.avatarAddr2; if (avatarnodeAddress.equals(addr2)) { LOG.info("Restarting service for AvatarNode : " + addr1); servicePair.restartService1(); } else if (avatarnodeAddress.equals(addr1)) { LOG.info("Restarting service for AvatarNode : " + addr2); servicePair.restartService2(); } else { throw new IOException("Address : " + avatarnodeAddress + " does not match any avatar address"); } LOG.info("Finished Processing CLEAR PRIMARY requested by : " + this.avatarnodeAddress); this.forceIncrementalReport = true; this.donotDelayIncrementalBlockReports = true; } InjectionHandler.processEvent(InjectionEvent.OFFERSERVICE_CLEAR_PRIMARY); } catch (IOException e) { LOG.error("Exception processing CLEAR PRIMARY", e); return false; } return true; }
java
{ "resource": "" }
q160859
OfferService.scheduleBlockReport
train
public void scheduleBlockReport(long delay) { if (delay > 0) { // send BR after random delay lastBlockReport = System.currentTimeMillis() - ( anode.blockReportInterval - R.nextInt((int)(delay))); } else { // send at next heartbeat lastBlockReport = lastHeartbeat - anode.blockReportInterval; } resetBlockReportTime = true; // reset future BRs for randomness }
java
{ "resource": "" }
q160860
OfferService.removeReceivedBlocks
train
void removeReceivedBlocks(Block[] removeList) { long start = AvatarDataNode.now(); synchronized(receivedAndDeletedBlockList) { ReceivedBlockInfo block = new ReceivedBlockInfo(); block.setDelHints(ReceivedBlockInfo.WILDCARD_HINT); for (Block bi : removeList) { block.set(bi.getBlockId(), bi.getNumBytes(), bi.getGenerationStamp()); while (receivedAndDeletedBlockList.remove(block)) { LOG.info("Block deletion command deleted from receivedDeletedBlockList " + bi); } } } long stop = AvatarDataNode.now(); LOG.info("Pruning blocks from the received list took " + (stop - start) + "ms for: " + removeList.length + "blocks, queue length: " + receivedAndDeletedBlockList.size()); }
java
{ "resource": "" }
q160861
CrcConcat.transform
train
static int transform(int crc, int[][] lookupTable) { int cb1 = lookupTable[0][crc & 0xff]; int cb2 = lookupTable[1][(crc >>>= 8) & 0xff]; int cb3 = lookupTable[2][(crc >>>= 8) & 0xff]; int cb4 = lookupTable[3][(crc >>>= 8) & 0xff]; return cb1 ^ cb2 ^ cb3 ^ cb4; }
java
{ "resource": "" }
q160862
CrcConcat.concatCrc
train
static public int concatCrc(int crc1, int crc2, int order) { // Calculate CRC of crc1 + order's 0 int crcForCrc1 = crc1; int orderRemained = order; // Fast transforming CRCs for adding 0 to the end of the byte array by table // look-up for (LookupTable lookupTable : lookupTables) { while (orderRemained >= lookupTable.getOrder()) { crcForCrc1 = transform(crcForCrc1, lookupTable.getLookupTable()); orderRemained -= lookupTable.getOrder(); } } if (orderRemained > 0) { // We continue the first byte array's CRC calculating // and adding 0s to it. And then we plus it with CRC2 // // Doing that, we need to offset the CRC initial value of CRC2 by // subtracting a CRC value of empty string. // // For example, A1A2A3's CRC is C1C2C3C4, // while B1 B2 B3's CRc is C5C6C7C8 and we wnat to concatenate them, // it means (our initial value is FF FF FF FF): // FF FF FF FF A1 A2 A3 C1 C2 C3 C4 // FF FF FF FF B1 B2 B3 C5 C6 C7 C8 // both are multiple of generation polynomial. // By continue CRC by adding zeros, actually, we calculated // the CRC C1'C2'C3'C4, so that // FF FF FF FF A1 A2 A3 00 00 00 C1'C2'C3'C4' // is the multiple of generation polynomial. // By adding C5C6C7C8 and C1'C2'C3'C4', what we got is not // the CRC for // FF FF FF FF A1 A2 A3 B1 B2 B3 // which we expect, but this string plus: // FF FF FF FF 00 00 00 // To offset the impact, the only thing we need to do, is // to subtract the result by the CRC value for 00 00 00. // int initial = CrcConcatLookupTables.initCrcMap[orderRemained]; NativeCrc32 pjc = new NativeCrc32(); pjc.setValue(crcForCrc1); byte[] zeros = new byte[orderRemained]; pjc.update(zeros, 0, zeros.length); crcForCrc1 = (int) pjc.getValue() ^ initial; } return crcForCrc1 ^ crc2; }
java
{ "resource": "" }
q160863
JobEndNotifier.localRunnerNotification
train
public static void localRunnerNotification(JobConf conf, JobStatus status) { JobEndStatusInfo notification = createNotification(conf, status); if (notification != null) { while (notification.configureForRetry()) { try { int code = httpNotification(notification.getUri()); if (code != 200) { throw new IOException("Invalid response status code: " + code); } else { break; } } catch (IOException ioex) { LOG.error("Notification error [" + notification.getUri() + "]", ioex); } catch (Exception ex) { LOG.error("Notification error [" + notification.getUri() + "]", ex); } try { synchronized (Thread.currentThread()) { Thread.currentThread().sleep(notification.getRetryInterval()); } } catch (InterruptedException iex) { LOG.error("Notification retry error [" + notification + "]", iex); } } } }
java
{ "resource": "" }
q160864
ResourceTypeProperties.canBePreempted
train
public static boolean canBePreempted(ResourceType type) { // Preemption is not allowed for JOBTRACKER grants. switch (type) { case MAP: return true; case REDUCE: return true; case JOBTRACKER: return false; default: throw new RuntimeException("Undefined Preemption behavior for " + type); } }
java
{ "resource": "" }
q160865
ResourceTypeProperties.neededLocalityLevels
train
public static List<LocalityLevel> neededLocalityLevels(ResourceType type) { List<LocalityLevel> l = new ArrayList<LocalityLevel>(); switch (type) { case MAP: l.add(LocalityLevel.NODE); l.add(LocalityLevel.RACK); l.add(LocalityLevel.ANY); break; case REDUCE: l.add(LocalityLevel.ANY); break; case JOBTRACKER: l.add(LocalityLevel.ANY); break; default: throw new RuntimeException("Undefined locality behavior for " + type); } return l; }
java
{ "resource": "" }
q160866
OfflineImageViewer.findImageVersion
train
private int findImageVersion(DataInputStream in) throws IOException { in.mark(42); // arbitrary amount, resetting immediately int version = in.readInt(); in.reset(); return version; }
java
{ "resource": "" }
q160867
FaultTolerantBlockPlacementPolicy.setBadHostsAndRacks
train
public static void setBadHostsAndRacks(Set<String> racks, Set<String> hosts) { badRacks = racks; badHosts = hosts; }
java
{ "resource": "" }
q160868
FaultTolerantBlockPlacementPolicy.initParityConfigs
train
private void initParityConfigs() { Set<String> acceptedCodecIds = new HashSet<String>(); for (String s : conf.get("dfs.f4.accepted.codecs", "rs,xor").split(",")) { acceptedCodecIds.add(s); } for (Codec c : Codec.getCodecs()) { if (acceptedCodecIds.contains(c.id)) { FSNamesystem.LOG.info("F4: Parity info." + " Id: " + c.id + " Parity Length: " + c.parityLength + " Parity Stripe Length: " + c.stripeLength + " Parity directory: " + c.parityDirectory + " Parity temp directory: " + c.tmpParityDirectory); acceptedCodecs.add(c); if (c.stripeLength > this.stripeLen) { // Use the max stripe length this.stripeLen = c.stripeLength; } } } FSNamesystem.LOG.info("F4: Initialized stripe len to: " + this.stripeLen); }
java
{ "resource": "" }
q160869
FaultTolerantBlockPlacementPolicy.getRackToHostsMapForStripe
train
private HashMap<String, HashSet<Node>> getRackToHostsMapForStripe( String srcFileName, String parityFileName, int stripeLen, int parityLen, int stripeIndex) throws IOException { HashMap<String, HashSet<Node>> rackToHosts = new HashMap<String, HashSet<Node>>(); if (srcFileName != null) { rackToHosts = getRackToHostsMapForStripe(srcFileName, stripeIndex, stripeLen); } if (parityFileName != null) { HashMap<String, HashSet<Node>> rackToHostsForParity = getRackToHostsMapForStripe(parityFileName, stripeIndex, parityLen); for (Map.Entry<String, HashSet<Node>> e : rackToHostsForParity.entrySet()) { HashSet<Node> nodes = rackToHosts.get(e.getKey()); if (nodes == null) { nodes = new HashSet<Node>(); rackToHosts.put(e.getKey(), nodes); } for (Node n : e.getValue()) { nodes.add(n); } } } for (Map.Entry<String, HashSet<Node>> e : rackToHosts.entrySet()) { if (e.getValue().size() > 1) { FSNamesystem.LOG.warn("F4: Rack " + e.getKey() + " being overused for stripe: " + stripeIndex); } } return rackToHosts; }
java
{ "resource": "" }
q160870
FaultTolerantBlockPlacementPolicy.getGoodNode
train
private boolean getGoodNode( HashMap<String, HashSet<Node>> candidateNodesByRacks, boolean considerLoad, long blockSize, List<DatanodeDescriptor> results) { List<Map.Entry<String, HashSet<Node>>> sorted = new ArrayList<Map.Entry<String, HashSet<Node>>>(); for (Map.Entry<String, HashSet<Node>> entry : candidateNodesByRacks.entrySet()) { sorted.add(entry); } Collections.sort(sorted, new RackComparator(blockSize)); int count = sorted.size() / 4; Collections.shuffle(sorted.subList(0, count)); for (Map.Entry<String, HashSet<Node>> e : sorted) { if (getGoodNode(e.getValue(), considerLoad, blockSize, results)) { return true; } } return false; }
java
{ "resource": "" }
q160871
FaultTolerantBlockPlacementPolicy.getGoodNode
train
private boolean getGoodNode(Set<Node> candidateNodes, boolean considerLoad, long blockSize, List<DatanodeDescriptor> results) { List<DatanodeDescriptor> sorted = new ArrayList<DatanodeDescriptor>(); for (Node n : candidateNodes) { sorted.add((DatanodeDescriptor)n); } final long blocksize = blockSize; Collections.sort(sorted, new Comparator<DatanodeDescriptor>() { public int compare(DatanodeDescriptor n1, DatanodeDescriptor n2) { long ret = (n2.getRemaining() - (n2.getBlocksScheduled() * blocksize)) - (n1.getRemaining() - (n1.getBlocksScheduled() * blocksize)); return ret == 0 ? 0 : (ret > 0) ? 1 : -1; } }); // Also, add some randomness. We are doing so because it seems // that if there are many copies scheduled at the same time, namenode // does not have the uptodate information. So, we need to add some // randomness so that there is not a lot of copies targeted to // the same node, which will overload the hosts and may lead to // timeouts. int count = sorted.size() / 2; Collections.shuffle(sorted.subList(0, count)); for (DatanodeDescriptor n : sorted) { if (this.isGoodTarget((DatanodeDescriptor)n, blocksize, 1, // MaxTargerPerLoc (per rack) considerLoad, results)) { results.add((DatanodeDescriptor)n); return true; } } return false; }
java
{ "resource": "" }
q160872
AvatarStorageSetup.checkImageStorage
train
private static String checkImageStorage(URI sharedImage, URI sharedEdits) { if (sharedImage.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) { // shared image is stored in file storage return ""; } else if (sharedImage.getScheme().equals( QuorumJournalManager.QJM_URI_SCHEME) && sharedImage.equals(sharedEdits)) { // image is stored in qjm together with edits return ""; } return "Shared image uri: " + sharedImage + " must be either file storage" + " or be equal to shared edits storage " + sharedEdits + ". "; }
java
{ "resource": "" }
q160873
AvatarStorageSetup.checkFileURIScheme
train
private static String checkFileURIScheme(Collection<URI> uris) { for (URI uri : uris) if (uri.getScheme().compareTo(JournalType.FILE.name().toLowerCase()) != 0) return "The specified path is not a file." + "Avatar supports file non-shared storage only... "; return ""; }
java
{ "resource": "" }
q160874
FileImageManager.getCheckpointOutputStream
train
@Override public OutputStream getCheckpointOutputStream(long imageTxId) throws IOException { String fileName = NNStorage.getCheckpointImageFileName(imageTxId); return new FileOutputStream(new File(sd.getCurrentDir(), fileName)); }
java
{ "resource": "" }
q160875
FileImageManager.renameCheckpointInDir
train
private static void renameCheckpointInDir(StorageDirectory sd, long txid) throws IOException { File ckpt = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid); File curFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, txid); // renameTo fails on Windows if the destination file // already exists. LOG.info("Renaming " + ckpt.getAbsolutePath() + " to " + curFile.getAbsolutePath()); if (!ckpt.renameTo(curFile)) { if (!curFile.delete() || !ckpt.renameTo(curFile)) { throw new IOException("renaming " + ckpt.getAbsolutePath() + " to " + curFile.getAbsolutePath() + " FAILED"); } } }
java
{ "resource": "" }
q160876
FileImageManager.reportError
train
private void reportError(StorageDirectory sd) { if (storage instanceof NNStorage) { // pass null, since we handle the disable here ((NNStorage)storage).reportErrorsOnDirectory(sd, null); } else { LOG.error("Failed direcory: " + sd.getCurrentDir()); } }
java
{ "resource": "" }
q160877
PoolInfoMetrics.getResourceMetadata
train
public ResourceMetadata getResourceMetadata() { if (poolInfo.getPoolName() == null || !counters.containsKey(MetricName.MIN) || !counters.containsKey(MetricName.MAX) || !counters.containsKey(MetricName.GRANTED) || !counters.containsKey(MetricName.REQUESTED)) { return null; } return new ResourceMetadata( PoolInfo.createStringFromPoolInfo(poolInfo), counters.get(MetricName.MIN).intValue(), counters.get(MetricName.MAX).intValue(), counters.get(MetricName.GRANTED).intValue(), counters.get(MetricName.REQUESTED).intValue()); }
java
{ "resource": "" }
q160878
PoolInfoMetrics.updateMetricsRecord
train
public void updateMetricsRecord() { for (Map.Entry<MetricName, Long> entry: counters.entrySet()) { String name = (entry.getKey() + "_" + type).toLowerCase(); record.setMetric(name, entry.getValue()); } record.update(); }
java
{ "resource": "" }
q160879
Chain.configure
train
public void configure(JobConf jobConf) { String prefix = getPrefix(isMap); chainJobConf = jobConf; SerializationFactory serializationFactory = new SerializationFactory(chainJobConf); int index = jobConf.getInt(prefix + CHAIN_MAPPER_SIZE, 0); for (int i = 0; i < index; i++) { Class<? extends Mapper> klass = jobConf.getClass(prefix + CHAIN_MAPPER_CLASS + i, null, Mapper.class); JobConf mConf = getChainElementConf(jobConf, prefix + CHAIN_MAPPER_CONFIG + i); Mapper mapper = ReflectionUtils.newInstance(klass, mConf); mappers.add(mapper); if (mConf.getBoolean(MAPPER_BY_VALUE, true)) { mappersKeySerialization.add(serializationFactory.getSerialization( mConf.getClass(MAPPER_OUTPUT_KEY_CLASS, null))); mappersValueSerialization.add(serializationFactory.getSerialization( mConf.getClass(MAPPER_OUTPUT_VALUE_CLASS, null))); } else { mappersKeySerialization.add(null); mappersValueSerialization.add(null); } } Class<? extends Reducer> klass = jobConf.getClass(prefix + CHAIN_REDUCER_CLASS, null, Reducer.class); if (klass != null) { JobConf rConf = getChainElementConf(jobConf, prefix + CHAIN_REDUCER_CONFIG); reducer = ReflectionUtils.newInstance(klass, rConf); if (rConf.getBoolean(REDUCER_BY_VALUE, true)) { reducerKeySerialization = serializationFactory .getSerialization(rConf.getClass(REDUCER_OUTPUT_KEY_CLASS, null)); reducerValueSerialization = serializationFactory .getSerialization(rConf.getClass(REDUCER_OUTPUT_VALUE_CLASS, null)); } else { reducerKeySerialization = null; reducerValueSerialization = null; } } }
java
{ "resource": "" }
q160880
Chain.close
train
public void close() throws IOException { for (Mapper map : mappers) { map.close(); } if (reducer != null) { reducer.close(); } }
java
{ "resource": "" }
q160881
NamenodeFsck.fsck
train
public void fsck() throws IOException { InjectionHandler.processEvent(InjectionEvent.NAMENODE_FSCK_START); try { FileStatus[] files = nn.namesystem.dir.getListing(path); FsckResult res = new FsckResult(); if (!this.showFiles && !this.showBlocks && !this.showLocations && !this.showRacks) { res.totalRacks = nn.getNetworkTopology().getNumOfRacks(); res.totalDatanodes = nn.namesystem .getNumberOfDatanodes(DatanodeReportType.LIVE); } res.setReplication((short) conf.getInt("dfs.replication", 3)); if (files != null) { if (showCorruptFileBlocks && showOpenFiles) { listCorruptOpenFiles(); return; } if (showCorruptFileBlocks) { listCorruptFileBlocks(); return; } for (int i = 0; i < files.length; i++) { check(files[i], res); } out.println(res); // DFSck client scans for the string HEALTHY/CORRUPT to check the status // of file system and return appropriate code. Changing the output // string might break testcases. if (res.isHealthy()) { out.print("\n\nThe filesystem under path '" + path + "' " + HEALTHY_STATUS); } else { out.print("\n\nThe filesystem under path '" + path + "' " + CORRUPT_STATUS); } } else { out.print("\n\nPath '" + path + "' " + NONEXISTENT_STATUS); } } catch (Throwable e) { String errMsg = "Fsck on path '" + path + "' " + FAILURE_STATUS; LOG.warn(errMsg, e); out.println(e.getMessage()); out.print("\n\n" + errMsg); } finally { out.close(); } }
java
{ "resource": "" }
q160882
CoronaJTState.getPrettyReport
train
public String getPrettyReport(JobID jobId) { Map<TaskAttemptID, TaskLaunch> lastLaunch = new HashMap<TaskAttemptID, CoronaStateUpdate.TaskLaunch>(); Map<TaskAttemptID, TaskStatus.State> lastKnownStatus = new HashMap<TaskAttemptID, TaskStatus.State>(); JTFailoverMetrics jtFailoverMetrics = new JTFailoverMetrics(); for (CoronaStateUpdate update : updates) { if (update.getTaskLaunch() != null) { TaskLaunch launch = update.getTaskLaunch(); lastLaunch.put(launch.getTaskId(), launch); } else if (update.getTaskStatus() != null) { TaskStatus status = update.getTaskStatus(); lastKnownStatus.put(status.getTaskID(), status.getRunState()); jtFailoverMetrics.update(status); } } StringBuilder result = new StringBuilder(); result.append("CoronaJTState report"); if (jobId != null) { result.append(" for job ").append(jobId); } for (CoronaStateUpdate update : updates) { TaskLaunch launch = update.getTaskLaunch(); if (launch != null) { result.append("\n").append(launch).append(" last known "); result.append(lastKnownStatus.get(launch.getTaskId())); } } if (sessionId != null && !sessionId.isEmpty()) { result.append("\n Session id ").append(sessionId); } result.append("\nThis remoteJobTracker failover totally saved: "); result.append("\nmappers ").append(jtFailoverMetrics.savedMappers). append(" map cpu ").append(jtFailoverMetrics.savedMapCPU). append(" map wallclock ").append(jtFailoverMetrics.savedMapWallclock); result.append("\nreducers ").append(jtFailoverMetrics.savedReducers). append(" reduce cpu ").append(jtFailoverMetrics.savedReduceCPU). append(" reduce wallclock ").append(jtFailoverMetrics.savedReduceWallclock); return result.toString(); }
java
{ "resource": "" }
q160883
Task.getFileSystemCounterNames
train
protected static String[] getFileSystemCounterNames(String uriScheme) { String scheme = uriScheme.toUpperCase(); return new String[]{scheme+"_BYTES_READ", scheme+"_BYTES_WRITTEN", scheme+"_FILES_CREATED", scheme + "_BYTES_READ_LOCAL", scheme + "_BYTES_READ_RACK", scheme + "_READ_EXCEPTIONS", scheme + "_WRITE_EXCEPTIONS"}; }
java
{ "resource": "" }
q160884
Task.reportNextRecordRange
train
protected void reportNextRecordRange(final TaskUmbilicalProtocol umbilical, long nextRecIndex) throws IOException{ //currentRecStartIndex is the start index which has not yet been finished //and is still in task's stomach. long len = nextRecIndex - currentRecStartIndex +1; SortedRanges.Range range = new SortedRanges.Range(currentRecStartIndex, len); taskStatus.setNextRecordRange(range); LOG.debug("sending reportNextRecordRange " + range); umbilical.reportNextRecordRange(taskId, range); }
java
{ "resource": "" }
q160885
Task.updateGCcounters
train
void updateGCcounters(){ long gccount = 0; long gctime = 0; for(GarbageCollectorMXBean gc : ManagementFactory.getGarbageCollectorMXBeans()) { long count = gc.getCollectionCount(); if(count >= 0) { gccount += count; } long time = gc.getCollectionTime(); if(time >= 0) { gctime += time; } } Iterator beans = ManagementFactory.getMemoryPoolMXBeans().iterator(); long aftergc = 0; long maxaftergc = 0; while (beans.hasNext()){ MemoryPoolMXBean bean = (MemoryPoolMXBean) beans.next(); String beanname = bean.getName(); if(!beanname.toUpperCase().contains("OLD GEN")) continue; MemoryUsage mu = bean.getCollectionUsage(); if(mu == null) continue; aftergc = mu.getUsed(); if(aftergc > maxaftergc) { maxaftergc = aftergc; } } counters.findCounter(GC_COUNTER_GROUP,"Total number of GC") .setValue(gccount); counters.findCounter(GC_COUNTER_GROUP,"Total time of GC in milliseconds") .setValue(gctime); counters.findCounter(GC_COUNTER_GROUP,"Heap size after last GC in bytes") .setValue(maxaftergc); long currentMax = counters.findCounter(GC_COUNTER_GROUP,"Max heap size after GC in bytes") .getValue(); if(maxaftergc>currentMax){ counters.findCounter(GC_COUNTER_GROUP,"Max heap size after GC in bytes") .setValue(maxaftergc); } }
java
{ "resource": "" }
q160886
Task.updateResourceCounters
train
void updateResourceCounters() { if (resourceCalculator == null) { return; } ProcResourceValues res = resourceCalculator.getProcResourceValues(); long cpuTime = res.getCumulativeCpuTime(); long pMem = res.getPhysicalMemorySize(); long vMem = res.getVirtualMemorySize(); long cpuJvmTime = this.jmxThreadInfoTracker.getCumulativeCPUTime(); // Remove the CPU time consumed previously by JVM reuse cpuTime -= initCpuCumulativeTime; cpuJvmTime -= this.initJvmCpuCumulativeTime; counters.findCounter(Counter.CPU_MILLISECONDS).setValue(cpuTime); counters.findCounter(Counter.PHYSICAL_MEMORY_BYTES).setValue(pMem); counters.findCounter(Counter.VIRTUAL_MEMORY_BYTES).setValue(vMem); counters.findCounter(Counter.CPU_MILLISECONDS_JVM).setValue(cpuJvmTime); if(isMapTask()) { //Mapper Task counters.findCounter(MapCounter.MAP_CPU_MILLISECONDS).setValue(cpuTime); } else { counters.findCounter(ReduceCounter.REDUCE_CPU_MILLISECONDS).setValue(cpuTime); } }
java
{ "resource": "" }
q160887
Task.loadStaticResolutions
train
public static void loadStaticResolutions(Configuration conf) { String hostToResolved[] = conf.getStrings("hadoop.net.static.resolutions"); if (hostToResolved != null) { for (String str : hostToResolved) { String name = str.substring(0, str.indexOf('=')); String resolvedName = str.substring(str.indexOf('=') + 1); NetUtils.addStaticResolution(name, resolvedName); } } }
java
{ "resource": "" }
q160888
Task.saveStaticResolutions
train
public static boolean saveStaticResolutions(Configuration conf) { List<String[]> staticResolutions = NetUtils.getAllStaticResolutions(); if (staticResolutions != null && staticResolutions.size() > 0) { StringBuffer str = new StringBuffer(); for (int i = 0; i < staticResolutions.size(); i++) { String[] hostToResolved = staticResolutions.get(i); str.append(hostToResolved[0]+"="+hostToResolved[1]); if (i != staticResolutions.size() - 1) { str.append(','); } } conf.set("hadoop.net.static.resolutions", str.toString()); return true; } return false; }
java
{ "resource": "" }
q160889
ResourceTracker.getResourceUsage
train
public ResourceUsage getResourceUsage() { int totalMapperGrants = 0; int totalReducerGrants = 0; synchronized (lockObject) { for (Map.Entry<Integer, ResourceGrant> entry : grantedResources.entrySet()) { switch(entry.getValue().getType()) { case MAP: ++totalMapperGrants; break; case REDUCE: ++totalReducerGrants; break; case JOBTRACKER: // Ignore for now break; default: throw new RuntimeException("Illegal type " + entry.getValue().getType()); } } } return new ResourceUsage(totalMapperGrants, totalReducerGrants); }
java
{ "resource": "" }
q160890
ResourceTracker.getWantedResources
train
public List<ResourceRequest> getWantedResources() { List<ResourceRequest> wanted = new ArrayList<ResourceRequest>(); synchronized(lockObject) { for (Integer requestId: setDifference(requestMap.keySet(), requestedResources.keySet())) { ResourceRequest req = requestMap.get(requestId); LOG.info("Filing request for resource " + requestId); requestedResources.put(requestId, req); wanted.add(req); } } return wanted; }
java
{ "resource": "" }
q160891
ResourceTracker.getResourcesToRelease
train
public List<ResourceRequest> getResourcesToRelease() { List<ResourceRequest> release = new ArrayList<ResourceRequest>(); synchronized(lockObject) { for (Integer requestId: setDifference(requestedResources.keySet(), requestMap.keySet())) { // We update the data structures right away. This assumes that the // caller will be able to release the resources. ResourceRequest req = requestedResources.remove(requestId); if (req != null) { release.add(req); LOG.info("Filing release for requestId: " + req.getId()); } } } return release; }
java
{ "resource": "" }
q160892
ResourceTracker.releaseResource
train
public void releaseResource(int resourceId) { synchronized (lockObject) { ResourceRequest req = requestedResources.get(resourceId); removeRequestUnprotected(req); } }
java
{ "resource": "" }
q160893
ResourceTracker.addNewGrants
train
public void addNewGrants(List<ResourceGrant> grants) { int numGranted = 0; int numAvailable = 0; synchronized(lockObject) { for (ResourceGrant grant: grants) { Integer requestId = grant.getId(); if (!requestedResources.containsKey(requestId) || !requestMap.containsKey(requestId)) { LOG.info("Request for grant " + grant.getId() + " no longer exists"); continue; } assert !grantedResources.containsKey(grant.getId()) : "Grant " + grant.getId() + " has already been processed."; updateTrackerAddressUnprotected(grant); addGrantedResourceUnprotected(grant); } updateGrantStatsUnprotected(); numGranted = grantedResources.size(); numAvailable = availableResources.size(); lockObject.notify(); } LOG.info("Number of available grants: " + numAvailable + " out of " + numGranted); }
java
{ "resource": "" }
q160894
ResourceTracker.updateTrackerAddr
train
public void updateTrackerAddr(String trackerName, InetAddress addr) { synchronized (lockObject) { trackerAddress.put(trackerName, addr); } }
java
{ "resource": "" }
q160895
Server.getRemoteIp
train
public static InetAddress getRemoteIp() { Call call = CurCall.get(); if (call != null) { return call.connection.socket.getInetAddress(); } return null; }
java
{ "resource": "" }
q160896
Server.getCurrentUGI
train
public static UserGroupInformation getCurrentUGI() { try { // Check original caller's UGI in case call went through proxy UserGroupInformation origUGI = OrigUGI.get(); if (origUGI != null) { return origUGI; } // If original caller's UGI is not set then we get the UGI of connecting client Call call = CurCall.get(); if (call != null) { return call.connection.header.getUgi(); } } catch (Exception e) { } return null; }
java
{ "resource": "" }
q160897
Server.delayResponse
train
public static long delayResponse() { Call call = CurCall.get(); long res = 0; if (call != null) { call.delayResponse(); res = delayedRpcId.getAndIncrement(); delayedCalls.put(res, call); } return res; }
java
{ "resource": "" }
q160898
Server.getRemoteAddress
train
public static String getRemoteAddress() { InetAddress addr = getRemoteIp(); return (addr == null) ? null : addr.getHostAddress(); }
java
{ "resource": "" }
q160899
Server.bind
train
public static void bind(ServerSocket socket, InetSocketAddress address, int backlog) throws IOException { try { socket.bind(address, backlog); } catch (BindException e) { BindException bindException = new BindException("Problem binding to " + address + " : " + e.getMessage()); bindException.initCause(e); throw bindException; } catch (SocketException e) { // If they try to bind to a different host's address, give a better // error message. if ("Unresolved address".equals(e.getMessage())) { throw new UnknownHostException("Invalid hostname for server: " + address.getHostName()); } else { throw e; } } }
java
{ "resource": "" }