_id
stringlengths
2
7
title
stringlengths
3
140
partition
stringclasses
3 values
text
stringlengths
73
34.1k
language
stringclasses
1 value
meta_information
dict
q160400
CompletedJobStatusStore.readCounters
train
public Counters readCounters(JobID jobId) { Counters counters = null; if (active) { try { FSDataInputStream dataIn = getJobInfoFile(jobId); if (dataIn != null) { readJobStatus(dataIn); readJobProfile(dataIn); counters = readCounters(dataIn); dataIn.close(); } } catch (IOException ex) { LOG.warn("Could not read [" + jobId + "] job counters : " + ex, ex); } } return counters; }
java
{ "resource": "" }
q160401
CompletedJobStatusStore.readJobTaskCompletionEvents
train
public TaskCompletionEvent[] readJobTaskCompletionEvents(JobID jobId, int fromEventId, int maxEvents) { TaskCompletionEvent[] events = TaskCompletionEvent.EMPTY_ARRAY; if (active) { try { FSDataInputStream dataIn = getJobInfoFile(jobId); if (dataIn != null) { readJobStatus(dataIn); readJobProfile(dataIn); readCounters(dataIn); events = readEvents(dataIn, fromEventId, maxEvents); dataIn.close(); } } catch (IOException ex) { LOG.warn("Could not read [" + jobId + "] job events : " + ex, ex); } } return events; }
java
{ "resource": "" }
q160402
UTF8ByteArrayUtils.findBytes
train
public static int findBytes(byte [] utf, int start, int end, byte[] b) { int matchEnd = end - b.length; for(int i=start; i<=matchEnd; i++) { boolean matched = true; for(int j=0; j<b.length; j++) { if (utf[i+j] != b[j]) { matched = false; break; } } if (matched) { return i; } } return -1; }
java
{ "resource": "" }
q160403
HadoopJob.getStatus
train
public String getStatus() { StringBuffer s = new StringBuffer(); s.append("Maps : " + completedMaps + "/" + totalMaps); s.append(" (" + mapProgress + ")"); s.append(" Reduces : " + completedReduces + "/" + totalReduces); s.append(" (" + reduceProgress + ")"); return s.toString(); }
java
{ "resource": "" }
q160404
HadoopJob.update
train
void update(JobStatus status) { this.status = status; try { this.counters = running.getCounters(); this.completed = running.isComplete(); this.successful = running.isSuccessful(); this.mapProgress = running.mapProgress(); this.reduceProgress = running.reduceProgress(); // running.getTaskCompletionEvents(fromEvent); } catch (IOException ioe) { ioe.printStackTrace(); } this.completedMaps = (int) (this.totalMaps * this.mapProgress); this.completedReduces = (int) (this.totalReduces * this.reduceProgress); }
java
{ "resource": "" }
q160405
FlushableLogger.setFlush
train
private synchronized void setFlush(boolean immediateFlush) { try { Set<FileAppender> flushedFileAppenders = new HashSet<FileAppender>(); Enumeration<?> currentLoggers = LogManager.getLoggerRepository() .getCurrentLoggers(); while (currentLoggers.hasMoreElements()) { Object nextLogger = currentLoggers.nextElement(); if (nextLogger instanceof Logger) { Logger currentLogger = (Logger) nextLogger; Enumeration<?> allAppenders = currentLogger.getParent() .getAllAppenders(); while (allAppenders.hasMoreElements()) { Object nextElement = allAppenders.nextElement(); if (nextElement instanceof FileAppender) { FileAppender fileAppender = (FileAppender) nextElement; if (!flushedFileAppenders.contains(fileAppender)) { flushedFileAppenders.add(fileAppender); fileAppender.setImmediateFlush(immediateFlush); } } } } } } catch (Throwable e) { LOG.error("Failed flushing logs", e); } }
java
{ "resource": "" }
q160406
PersistentState.readState
train
public static void readState(String fname) { filename = fname; try { persData.load(new FileInputStream(filename)); } catch (FileNotFoundException e1) { // ignore } catch (IOException e) { e.printStackTrace(); } }
java
{ "resource": "" }
q160407
PersistentState.getState
train
public static ParseState getState(String fname) { String [] fields = persData.getProperty(fname, "null" + SEPARATOR + "0").split(SEPARATOR, 2); String firstLine; long offset; if (fields.length < 2) { System.err.println("Malformed persistent state data found"); Environment.logInfo("Malformed persistent state data found"); firstLine = null; offset = 0; } else { firstLine = (fields[0].equals("null") ? null : fields[0]); offset = Long.parseLong(fields[1]); } return new ParseState(fname, firstLine, offset); }
java
{ "resource": "" }
q160408
PersistentState.setState
train
public static void setState(ParseState state) { if (state == null) { System.err.println("Null state found"); Environment.logInfo("Null state found"); } persData.setProperty(state.filename, state.firstLine + SEPARATOR + state.offset); }
java
{ "resource": "" }
q160409
PersistentState.updateState
train
public static void updateState(String filename, String firstLine, long offset) { ParseState ps = getState(filename); if (firstLine != null) ps.firstLine = firstLine; ps.offset = offset; setState(ps); }
java
{ "resource": "" }
q160410
PersistentState.writeState
train
public static void writeState(String fname) { try { persData.store(new FileOutputStream(fname), Calendar.getInstance().getTime().toString()); } catch (FileNotFoundException e1) { e1.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } }
java
{ "resource": "" }
q160411
DBInputFormat.getCountQuery
train
protected String getCountQuery() { if(dbConf.getInputCountQuery() != null) { return dbConf.getInputCountQuery(); } StringBuilder query = new StringBuilder(); query.append("SELECT COUNT(*) FROM " + tableName); if (conditions != null && conditions.length() > 0) query.append(" WHERE " + conditions); return query.toString(); }
java
{ "resource": "" }
q160412
DFSPath.delete
train
public void delete() { try { getDFS().delete(this.path, true); } catch (IOException e) { e.printStackTrace(); MessageDialog.openWarning(null, "Delete file", "Unable to delete file \"" + this.path + "\"\n" + e); } }
java
{ "resource": "" }
q160413
DFSPath.getDFS
train
DistributedFileSystem getDFS() throws IOException { if (this.dfs == null) { FileSystem fs = location.getDFS(); if (!(fs instanceof DistributedFileSystem)) { ErrorMessageDialog.display("DFS Browser", "The DFS Browser cannot browse anything else " + "but a Distributed File System!"); throw new IOException("DFS Browser expects a DistributedFileSystem!"); } this.dfs = (DistributedFileSystem) fs; } return this.dfs; }
java
{ "resource": "" }
q160414
CapBasedLoadManager.getCap
train
int getCap(int totalRunnableTasks, int localMaxTasks, int totalSlots) { double load = maxDiff + ((double)totalRunnableTasks) / totalSlots; int cap = (int) Math.min(localMaxTasks, Math.ceil(load * localMaxTasks)); if (LOG.isDebugEnabled()) { LOG.debug("load:" + load + " maxDiff:" + maxDiff + " totalRunnable:" + totalRunnableTasks + " totalSlots:" + totalSlots + " localMaxTasks:" + localMaxTasks + " cap:" + cap); } return cap; }
java
{ "resource": "" }
q160415
ServerLogReaderTransactional.tryReloadingEditLog
train
private void tryReloadingEditLog() throws IOException { LOG.info("Segment - trying to reload edit log segment"); // sleep on error sleep(errorSleepTimeout); // check if a new segment exists checkProgress(); // reopen current segment setupIngestStreamWithRetries(currentSegmentTxId); // set the position to last good position refreshStreamPosition(); }
java
{ "resource": "" }
q160416
ServerLogReaderTransactional.updateState
train
private void updateState(FSEditLogOp op, boolean checkTxnId) throws IOException { InjectionHandler.processEvent(InjectionEvent.SERVERLOGREADER_UPDATE, op); if (checkTxnId) { mostRecentlyReadTransactionTxId = ServerLogReaderUtil.checkTransactionId( mostRecentlyReadTransactionTxId, op); } updateStreamPosition(); // read a valid operation core.getMetrics().readOperations.inc(); mostRecentlyReadTransactionTime = now(); // current log segment ends normally if (op.opCode == FSEditLogOpCodes.OP_END_LOG_SEGMENT) { LOG.info("Segment - ending log segment start txid: " + currentSegmentTxId + ", end txid: " + op.getTransactionId()); // move forward with next segment currentSegmentTxId = op.getTransactionId() + 1; // set the stream to null so the next getNotification() // will recreate it currentEditLogInputStream = null; // indicate that a new stream will be opened currentEditLogInputStreamPosition = -1; } else if (op.opCode == FSEditLogOpCodes.OP_START_LOG_SEGMENT) { LOG.info("Segment - starting log segment start txid: " + currentSegmentTxId); } }
java
{ "resource": "" }
q160417
ServerLogReaderTransactional.refreshStreamPosition
train
private void refreshStreamPosition() throws IOException { if (currentEditLogInputStreamPosition != -1) { // stream was reopened currentEditLogInputStream.refresh(currentEditLogInputStreamPosition, mostRecentlyReadTransactionTxId); } else { // freshly opened stream currentEditLogInputStreamPosition = currentEditLogInputStream.getPosition(); } }
java
{ "resource": "" }
q160418
ServerLogReaderTransactional.refreshInputStream
train
private void refreshInputStream() throws IOException { // if stream is null, we probably switched to a new // segment. if (currentEditLogInputStream == null) { LOG.info("Segment - setup input stream for txid: " + currentSegmentTxId); setupIngestStreamWithRetries(currentSegmentTxId); if (currentEditLogInputStreamPosition == -1) { // we are opening a fresh segment currentEditLogInputStreamPosition = currentEditLogInputStream.getPosition(); } } // if we are re-opening stream previously consumed // set correct position if (currentEditLogInputStreamPosition != -1) { currentEditLogInputStream.refresh(currentEditLogInputStreamPosition, mostRecentlyReadTransactionTxId); } }
java
{ "resource": "" }
q160419
ServerLogReaderTransactional.initialize
train
protected void initialize() throws IOException { for (int i = 0; i < 3; i++) { try { LOG.info("Detecting current primary node - attempt " + i); detectJournalManager(); LOG.info("Finding oldest segment txid - attempt " + i); currentSegmentTxId = findOldestLogSegmentTxid(); LOG.info("Setting up input stream for txid: " + currentSegmentTxId + " - attempt " + i); setupIngestStreamWithRetries(currentSegmentTxId); return; } catch (IOException e) { LOG.warn("Initialization exception", e); if (i == 2) { LOG.error("Initialization failed."); throw e; } } } }
java
{ "resource": "" }
q160420
ServerLogReaderTransactional.setupIngestStreamWithRetries
train
private void setupIngestStreamWithRetries(long txid) throws IOException { for (int i = 0; i < inputStreamRetries; i++) { try { setupCurrentEditStream(txid); return; } catch (IOException e) { if (i == inputStreamRetries - 1) { throw new IOException("Cannot obtain stream for txid: " + txid, e); } LOG.info("Error :", e); } sleep(1000); LOG.info("Retrying to get edit input stream for txid: " + txid + ", tried: " + (i + 1) + " times"); } }
java
{ "resource": "" }
q160421
ServerLogReaderTransactional.setupCurrentEditStream
train
private void setupCurrentEditStream(long txid) throws IOException { // get new stream currentEditLogInputStream = JournalSet.getInputStream(remoteJournalManager, txid); // we just started a new log segment currentSegmentTxId = txid; // indicate that we successfully reopened the stream mostRecentlyReadTransactionTime = now(); }
java
{ "resource": "" }
q160422
ServerLogReaderTransactional.segmentExists
train
boolean segmentExists(long txid) throws IOException { List<RemoteEditLog> segments = getManifest(); for (RemoteEditLog segment : segments) { if (segment.getStartTxId() == txid) { return true; } } return false; }
java
{ "resource": "" }
q160423
ServerLogReaderTransactional.getManifest
train
List<RemoteEditLog> getManifest() throws IOException { RemoteEditLogManifest rm = remoteJournalManager.getEditLogManifest(-1); if (rm == null || rm.getLogs().size() == 0) { throw new IOException("Cannot obtain the list of log segments"); } return rm.getLogs(); }
java
{ "resource": "" }
q160424
ServerLogReaderTransactional.sleep
train
protected void sleep(long ms) throws IOException { try { Thread.sleep(ms); } catch (InterruptedException e) { LOG.error("Interrupted when sleeping", e); Thread.currentThread().interrupt(); throw new IOException("Received interruption"); } }
java
{ "resource": "" }
q160425
SequenceFile.getCompressionType
train
@Deprecated static public CompressionType getCompressionType(Configuration job) { String name = job.get("io.seqfile.compression.type"); return name == null ? CompressionType.RECORD : CompressionType.valueOf(name); }
java
{ "resource": "" }
q160426
SequenceFile.setCompressionType
train
@Deprecated static public void setCompressionType(Configuration job, CompressionType val) { job.set("io.seqfile.compression.type", val.toString()); }
java
{ "resource": "" }
q160427
NameCache.put
train
K put(final K name) { K internal = cache.get(name); if (internal != null) { lookups++; return internal; } // Track the usage count only during initialization if (!initialized) { UseCount useCount = transientMap.get(name); if (useCount != null) { useCount.increment(); if (useCount.get() >= useThreshold) { promote(name); } return useCount.value; } useCount = new UseCount(name); transientMap.put(name, useCount); } return null; }
java
{ "resource": "" }
q160428
NameCache.initialized
train
void initialized() { LOG.info("initialized with " + size() + " entries " + lookups + " lookups"); this.initialized = true; transientMap.clear(); transientMap = null; }
java
{ "resource": "" }
q160429
NameCache.promote
train
private void promote(final K name) { transientMap.remove(name); cache.put(name, name); lookups += useThreshold; }
java
{ "resource": "" }
q160430
PriorityQueue.initialize
train
@SuppressWarnings("unchecked") protected final void initialize(int maxSize) { size = 0; int heapSize = maxSize + 1; heap = (T[]) new Object[heapSize]; this.maxSize = maxSize; }
java
{ "resource": "" }
q160431
RemoteEditLogManifest.checkState
train
private void checkState() { Preconditions.checkNotNull(logs); if (!contiguous) return; RemoteEditLog prev = null; for (RemoteEditLog log : logs) { if (prev != null) { if (log.getStartTxId() <= prev.getEndTxId()) { throw new IllegalStateException("Invalid log manifest:" + this); } } prev = log; } }
java
{ "resource": "" }
q160432
ReduceTaskStatus.getSortProcessingRate
train
@Override public double getSortProcessingRate(long currentTime) { long timeSpentSorting = 0; float progress = 0; Phase phase = getPhase(); long sortFinishTime = getSortFinishTime(); long shuffleFinishTime = getShuffleFinishTime(); if (phase == Phase.SHUFFLE ) { return 0; } else if (getPhase() == Phase.SORT) { if (shuffleFinishTime < currentTime) { LOG.error("Shuffle finish time is " + shuffleFinishTime + " which is < current time " + currentTime + " in " + this.getTaskID()); } timeSpentSorting = currentTime - shuffleFinishTime; progress = getProgress() - (float)1.0/3; if (progress < 0) { LOG.error("Shuffle progress calculated to be " + progress + " in task status for " + this.getTaskID() + ". Settings to 0"); progress = 0; } } else if (getPhase() == Phase.REDUCE) { // when it is reduce phase, use 33%/(sort finish time - shuffle // finish time as the progress rate. Using percentages instead of bytes // as it is tricky progress = (float)1.0/3; if (shuffleFinishTime <= sortFinishTime) { LOG.error("Shuffle finish fime is " + shuffleFinishTime + " which is <= sort finish time " + sortFinishTime + " in " + this.getTaskID()); return 0; } timeSpentSorting = sortFinishTime - shuffleFinishTime; } sortProcessingRate = progress/timeSpentSorting; return sortProcessingRate; }
java
{ "resource": "" }
q160433
ReduceTaskStatus.getReduceProcessingRate
train
@Override public double getReduceProcessingRate(long currentTime) { Phase phase = getPhase(); if (phase != Phase.REDUCE) { return 0; } @SuppressWarnings("deprecation") long bytesProcessed = super.getCounters().findCounter (Task.Counter.REDUCE_INPUT_BYTES).getCounter(); long timeSpentInReduce = 0; long sortFinishTime = getSortFinishTime(); if (sortFinishTime >= currentTime) { LOG.error("Sort finish time is " + sortFinishTime + " which is >= current time " + currentTime + " in " + this.getTaskID()); return 0; } timeSpentInReduce = currentTime - sortFinishTime; reduceProcessingRate = bytesProcessed/timeSpentInReduce; return reduceProcessingRate; }
java
{ "resource": "" }
q160434
SessionManager.readSessions
train
private void readSessions(CoronaSerializer coronaSerializer) throws IOException { coronaSerializer.readField("sessions"); // Expecting the START_OBJECT token for sessions coronaSerializer.readStartObjectToken("sessions"); JsonToken current = coronaSerializer.nextToken(); while (current != JsonToken.END_OBJECT) { String sessionId = coronaSerializer.getFieldName(); Session session = new Session(clusterManager.conf.getCMHeartbeatDelayMax(), coronaSerializer); sessions.put(sessionId, session); current = coronaSerializer.nextToken(); } // Done with reading the END_OBJECT token for sessions }
java
{ "resource": "" }
q160435
SessionManager.restoreAfterSafeModeRestart
train
public void restoreAfterSafeModeRestart() { if (!clusterManager.safeMode) { return; } for (Session session : sessions.values()) { for (ResourceRequestInfo resourceRequestInfo : session.idToRequest.values()) { // The helper method to restore the ResourceRequestInfo instances // is placed in NodeManager because it makes use of other members // of NodeManager clusterManager.nodeManager. restoreResourceRequestInfo(resourceRequestInfo); } session.restoreAfterSafeModeRestart(); clusterManager.getScheduler().addSession(session.getSessionId(), session); } clusterManager.getMetrics().setNumRunningSessions(sessions.size()); }
java
{ "resource": "" }
q160436
SessionManager.write
train
public void write(JsonGenerator jsonGenerator) throws IOException { jsonGenerator.writeStartObject(); // retiredSessions and numRetiredSessions need not be persisted // sessionCounter can be set to 0, when the SessionManager is instantiated // sessions begins jsonGenerator.writeFieldName("sessions"); jsonGenerator.writeStartObject(); for (String sessionId : sessions.keySet()) { jsonGenerator.writeFieldName(sessionId); sessions.get(sessionId).write(jsonGenerator); } jsonGenerator.writeEndObject(); // sessions ends jsonGenerator.writeNumberField("sessionCounter", sessionCounter.longValue()); jsonGenerator.writeEndObject(); // We can rebuild runnableSessions // No need to write startTime and numRetiredSessions }
java
{ "resource": "" }
q160437
SessionManager.getTypePoolInfoAveFirstWaitMs
train
public Map<PoolInfo, Long> getTypePoolInfoAveFirstWaitMs(ResourceType type) { Map<PoolInfo, WaitCount> poolInfoWaitCount = new HashMap<PoolInfo, WaitCount>(); for (Session session : sessions.values()) { synchronized (session) { if (!session.isDeleted()) { Long wait = session.getTypeFirstWaitMs(type); if (wait == null) { continue; } WaitCount waitCount = poolInfoWaitCount.get(session.getPoolInfo()); if (waitCount == null) { poolInfoWaitCount.put(session.getPoolInfo(), new WaitCount(wait)); } else { waitCount.addWaitMsecs(wait); } } } } Map<PoolInfo, Long> poolInfoWaitMs = new HashMap<PoolInfo, Long>(poolInfoWaitCount.size()); for (Map.Entry<PoolInfo, WaitCount> entry : poolInfoWaitCount.entrySet()) { poolInfoWaitMs.put(entry.getKey(), entry.getValue().getAverageWait()); } return poolInfoWaitMs; }
java
{ "resource": "" }
q160438
StreamFile.getDFSClient
train
protected DFSClient getDFSClient(HttpServletRequest request) throws IOException, InterruptedException { Configuration conf = new Configuration(masterConf); UnixUserGroupInformation.saveToConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME, getUGI(request)); return JspHelper.getDFSClient(request, conf); }
java
{ "resource": "" }
q160439
StreamFile.getDatanodes
train
private DatanodeID[] getDatanodes(HttpServletRequest request) throws IOException { final String datanodes = request.getParameter("candidates"); if (datanodes == null) { return null; } final String[] datanodeStrs = datanodes.split(" "); if (datanodeStrs.length == 0) { return null; } final DatanodeID[] dnIDs = new DatanodeID[datanodeStrs.length]; for (int i=0; i<dnIDs.length; i++) { String hostName = datanodeStrs[i]; int colon = datanodeStrs[i].indexOf(":"); if (colon < 0) { throw new IOException("Invalid datanode name " + datanodeStrs[i] + ", expecting name:port pair"); } hostName = datanodeStrs[i].substring(0, colon); int infoPort; try { infoPort = Integer.parseInt(datanodeStrs[i].substring(colon+1)); } catch (NumberFormatException ne) { throw new IOException("Invalid datanode name " + datanodeStrs[i] + ", expecting name:port pair", ne); } dnIDs[i] = new DatanodeID(hostName, null, infoPort, -1); } return dnIDs; }
java
{ "resource": "" }
q160440
FSEditLogLoader.loadFSEdits
train
int loadFSEdits(EditLogInputStream edits, long lastAppliedTxId) throws IOException { long startTime = now(); this.lastAppliedTxId = lastAppliedTxId; int numEdits = loadFSEdits(edits, true); FSImage.LOG.info("Edits file " + edits.toString() + " of size: " + edits.length() + ", # of edits: " + numEdits + " loaded in: " + (now()-startTime)/1000 + " seconds."); return numEdits; }
java
{ "resource": "" }
q160441
FSEditLogLoader.checkFail
train
private void checkFail(String errorMsg) throws IOException { if (fsNamesys.failOnTxIdMismatch()) { FSEditLog.LOG.error(errorMsg); throw new IOException(errorMsg); } MetaRecoveryContext.editLogLoaderPrompt(errorMsg); }
java
{ "resource": "" }
q160442
FSEditLogLoader.validateEditLog
train
public static EditLogValidation validateEditLog(EditLogInputStream in) { long lastPos = 0; long firstTxId = HdfsConstants.INVALID_TXID; long lastTxId = HdfsConstants.INVALID_TXID; long numValid = 0; try { FSEditLogOp op = null; while (true) { lastPos = in.getPosition(); try { if ((op = in.readOp()) == null) { break; } } catch (Throwable t) { FSImage.LOG.warn("Caught exception after reading " + numValid + " ops from " + in + " while determining its valid length." + "Position was " + lastPos, t); in.resync(); FSImage.LOG.info("After resync, position is " + in.getPosition()); continue; } if (firstTxId == HdfsConstants.INVALID_TXID) { firstTxId = op.getTransactionId(); } if (lastTxId == HdfsConstants.INVALID_TXID || op.txid > lastTxId) { lastTxId = op.getTransactionId(); } else { FSImage.LOG.error("Out of order txid found. Found " + op.txid + ", expected " + (lastTxId + 1)); } numValid++; } } catch (Throwable t) { // Catch Throwable and not just IOE, since bad edits may generate // NumberFormatExceptions, AssertionErrors, OutOfMemoryErrors, etc. FSImage.LOG.debug("Caught exception after reading " + numValid + " ops from " + in + " while determining its valid length.", t); } return new EditLogValidation(lastPos, firstTxId, lastTxId, false); }
java
{ "resource": "" }
q160443
CoronaJobInProgress.initTasks
train
public void initTasks() throws IOException { // log job info jobHistory.logSubmitted(jobFile.toString(), this.startTime, this.jobTrackerId); // log the job priority JobClient.RawSplit[] splits = null; splits = JobClient.getAndRemoveCachedSplits(jobId); if (splits == null) { FileSystem fs = jobFile.getFileSystem(jobConf); Path splitFile = new Path(jobFile.getParent(), "job.split"); LOG.info("Reading splits from " + splitFile); DataInputStream splitFileIn = fs.open(splitFile); try { splits = JobClient.readSplitFile(splitFileIn); } finally { splitFileIn.close(); } } initTasksFromSplits(splits); jobHistory.logInited(this.launchTime, numMapTasks, numReduceTasks); }
java
{ "resource": "" }
q160444
CoronaJobInProgress.removeMatchingTipUnprotectedUnconditional
train
private TaskInProgress removeMatchingTipUnprotectedUnconditional( List<TaskInProgress> taskList, TaskInProgress intendedTip) { for (Iterator<TaskInProgress> iter = taskList.iterator(); iter.hasNext();) { TaskInProgress t = iter.next(); if (t.getTIPId().equals(intendedTip.getTIPId())) { iter.remove(); return t; } } return null; }
java
{ "resource": "" }
q160445
CoronaJobInProgress.forceNewReduceTaskForTip
train
public Task forceNewReduceTaskForTip(String taskTrackerName, String hostName, TaskInProgress forcedTip) { synchronized (lockObject) { Task result = obtainTaskCleanupTask(taskTrackerName, forcedTip); if (result != null) { return result; } removeMatchingTipUnprotectedUnconditional(nonRunningMaps, forcedTip); LOG.info("Running task " + forcedTip.getTIPId() + " on " + taskTrackerName + "(" + hostName + ")"); scheduleReduceUnprotected(forcedTip); result = forcedTip.getTaskToRun(taskTrackerName); if (result != null) { addRunningTaskToTIPUnprotected(forcedTip, result.getTaskID(), taskTrackerName, hostName, true); // Handle cleanup task setJobCleanupTaskState(result); } return result; } }
java
{ "resource": "" }
q160446
CoronaJobInProgress.canTrackerBeUsed
train
public boolean canTrackerBeUsed( String taskTracker, String trackerHost, TaskInProgress tip) { synchronized (lockObject) { return !tip.hasFailedOnMachine(trackerHost); } }
java
{ "resource": "" }
q160447
CoronaJobInProgress.setJobCleanupTaskState
train
private void setJobCleanupTaskState(Task task) { if (task.isJobCleanupTask()) { if (jobFailed) { task.setJobCleanupTaskState( org.apache.hadoop.mapreduce.JobStatus.State.FAILED); } else if (jobKilled) { task.setJobCleanupTaskState( org.apache.hadoop.mapreduce.JobStatus.State.KILLED); } else { task.setJobCleanupTaskState( org.apache.hadoop.mapreduce.JobStatus.State.SUCCEEDED); } } }
java
{ "resource": "" }
q160448
JournalConfigHelper.getJournalHttpHosts
train
public static String[] getJournalHttpHosts(Configuration conf) { Collection<String> hosts = conf.getStringCollection(JournalConfigKeys.DFS_JOURNALNODE_HOSTS); int defaultHttpPort = JournalConfigKeys.DFS_JOURNALNODE_HTTP_PORT_DEFAULT; String[] httpAddresses = new String[hosts.size()]; int i = 0; for (String address : hosts) { if (address.indexOf(":") < 0) { address += ":" + defaultHttpPort; } httpAddresses[i++] = address; } return httpAddresses; }
java
{ "resource": "" }
q160449
JournalConfigHelper.getAddress
train
public static InetSocketAddress getAddress(Configuration conf) { String addr = conf.get(JournalConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, JournalConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT); return NetUtils.createSocketAddr(addr, JournalConfigKeys.DFS_JOURNALNODE_HTTP_PORT_DEFAULT); }
java
{ "resource": "" }
q160450
Node.addChild
train
public synchronized boolean addChild(Node child) { if (child.parent != null) { throw new IllegalArgumentException( "The child is already under another node:" + child.parent); } checkChildren(); boolean retval = children.add(child); if (retval) child.parent = this; return retval; }
java
{ "resource": "" }
q160451
HourGlass.updateShares
train
static private void updateShares(Cluster clusters[]) { assert(clusters.length == 2); if (clusters[0].runnableMaps == 0 && clusters[0].runnableMaps == 0 && clusters[1].runnableReduces == 0 && clusters[1].runnableReduces == 0) { // Do nothing if both clusters are empty return; } // Update target task shares using runnable tasks and weight if (!(clusters[0].runnableMaps == 0 && clusters[1].runnableMaps == 0)) { clusters[0].targetMapShare = clusters[0].runnableMaps * clusters[0].weight / (clusters[0].runnableMaps * clusters[0].weight + clusters[1].runnableMaps * clusters[1].weight); clusters[1].targetMapShare = 1 - clusters[0].targetMapShare; } if (!(clusters[0].runnableReduces == 0 && clusters[1].runnableReduces == 0)) { clusters[0].targetReduceShare = clusters[0].runnableReduces * clusters[0].weight / (clusters[0].runnableReduces * clusters[0].weight + clusters[1].runnableReduces * clusters[1].weight); clusters[1].targetReduceShare = 1 - clusters[0].targetReduceShare; } for (int i = 0; i < 2; ++i) { LOG.info(String.format("Update Shares. " + "cluster%s:%s runnableMaps:%s runnableReduces:%s " + "weight:%s targetMapShare:%s targetReduceShare:%s", i, clusters[i].address, clusters[i].weight, clusters[i].runnableMaps, clusters[i].runnableReduces, clusters[i].targetMapShare, clusters[i].targetReduceShare)); } }
java
{ "resource": "" }
q160452
HourGlass.run
train
@Override public void run() { long lastUpdate = -1L; // Start balancing the clusters while (running) { try { Thread.sleep(updateInterval / 10); long now = JobTracker.getClock().getTime(); if (now - lastUpdate > updateInterval) { lastUpdate = now; doMoveSlots(clusters); } } catch (Exception e) { LOG.error("Exception while balancing cluster.", e); } } }
java
{ "resource": "" }
q160453
HourGlass.getTotalSlots
train
private int getTotalSlots( TaskTrackerStatus status, TaskType type) { Map<Integer, Integer> defaultCpuToMaxSlots = (type == TaskType.MAP) ? defaultCpuToMaxMapSlots : defaultCpuToMaxReduceSlots; int cpus = status.getResourceStatus().getNumProcessors(); Integer slots = defaultCpuToMaxSlots.get(cpus); if (slots == null) { slots = (type == TaskType.MAP) ? defaultMaxMapSlots : defaultMaxReduceSlots; } int taskTrackerSlots = (type == TaskType.MAP) ? status.getMaxMapSlots() : status.getMaxReduceSlots(); return Math.min(slots, taskTrackerSlots); }
java
{ "resource": "" }
q160454
HourGlass.createClient
train
private static FairSchedulerProtocol createClient( String target, Configuration conf) throws IOException { InetSocketAddress addr = NetUtils.createSocketAddr(target); UserGroupInformation ugi = UserGroupInformation.getCurrentUGI(); LOG.info("Connecting to " + addr); return (FairSchedulerProtocol) RPC.getProxy(FairSchedulerProtocol.class, FairSchedulerProtocol.versionID, addr, ugi, conf, NetUtils.getSocketFactory(conf, FairSchedulerProtocol.class)); }
java
{ "resource": "" }
q160455
HourGlass.main
train
public static void main(String argv[]) { StringUtils.startupShutdownMessage(HourGlass.class, argv, LOG); try { HourGlass hourGlass = new HourGlass(new Configuration()); hourGlass.run(); } catch (Throwable e) { LOG.fatal(StringUtils.stringifyException(e)); System.exit(-1); } }
java
{ "resource": "" }
q160456
ConfigManager.reloadConfigsIfNecessary
train
public synchronized boolean reloadConfigsIfNecessary() { long time = RaidNode.now(); if (time > lastReloadAttempt + reloadInterval) { lastReloadAttempt = time; try { File file = new File(configFileName); long lastModified = file.lastModified(); if (lastModified > lastSuccessfulReload && time > lastModified + RELOAD_WAIT) { reloadConfigs(); lastSuccessfulReload = time; lastReloadAttemptFailed = false; return true; } } catch (Exception e) { if (!lastReloadAttemptFailed) { LOG.error("Failed to reload config file - " + "will use existing configuration.", e); } lastReloadAttemptFailed = true; } } return false; }
java
{ "resource": "" }
q160457
ConfigManager.stopReload
train
void stopReload() throws InterruptedException { if (reloadThread != null) { running = false; reloadThread.interrupt(); reloadThread.join(); reloadThread = null; } }
java
{ "resource": "" }
q160458
ConfigManager.getPolicy
train
PolicyInfo getPolicy(String policyName) { for (PolicyInfo policy : allPolicies) { if (policyName.equals(policy.getName())) { return policy; } } return null; }
java
{ "resource": "" }
q160459
RaidNode.stop
train
public void stop() { if (stopRequested) { return; } stopRequested = true; running = false; if (server != null) server.stop(); if (triggerThread != null) { triggerThread.interrupt(); triggerMonitor = null; } if (urfThread != null) { urfThread.interrupt(); urfProcessor = null; } if (blockIntegrityMonitor != null) blockIntegrityMonitor.running = false; if (blockFixerThread != null) blockFixerThread.interrupt(); if (blockCopierThread != null) blockCopierThread.interrupt(); if (corruptFileCounterThread != null) corruptFileCounterThread.interrupt(); if (purgeMonitor != null) purgeMonitor.running = false; if (purgeThread != null) purgeThread.interrupt(); if (placementMonitor != null) placementMonitor.stop(); if (statsCollector != null) statsCollector.stop(); if (statsCollectorThread != null) statsCollectorThread.interrupt(); if (infoServer != null) { try { infoServer.stop(); } catch (Exception e) { LOG.warn("Exception shutting down " + RaidNode.class, e); } } this.unregisterMBean(); }
java
{ "resource": "" }
q160460
RaidNode.getCorruptFilesCounterMap
train
public Map<String, Map<CorruptFileStatus, Long>> getCorruptFilesCounterMap() { return ((CorruptionWorker)blockIntegrityMonitor.getCorruptionMonitor()). getCorruptFilesCounterMap(); }
java
{ "resource": "" }
q160461
RaidNode.determinePolicy
train
public PolicyInfo determinePolicy(Codec codec) { for (PolicyInfo info : configMgr.getAllPolicies()) { if (!info.getShouldRaid()) { continue; } if (info.getCodecId().equals(codec.id)) { return info; } } return null; }
java
{ "resource": "" }
q160462
RaidNode.doRaid
train
public static boolean doRaid(Configuration conf, FileStatus stat, Path destPath, Codec codec, Statistics statistics, Progressable reporter, boolean doSimulate, int targetRepl, int metaRepl) throws IOException { boolean succeed = false; for (EncodingCandidate ec : RaidNode.splitPaths(conf, codec, stat)) { succeed = succeed || doRaid(conf, ec, destPath, codec, statistics, reporter, doSimulate, targetRepl, metaRepl); } return succeed; }
java
{ "resource": "" }
q160463
RaidNode.raidedByOtherHighPriCodec
train
public static boolean raidedByOtherHighPriCodec(Configuration conf, FileStatus stat, Codec codec) throws IOException { for (Codec tcodec : Codec.getCodecs()) { if (tcodec.priority > codec.priority) { if (stat.isDir() && !tcodec.isDirRaid) { // A directory could not be raided by a file level codec. continue; } // check if high priority parity file exists. if (ParityFilePair.parityExists(stat, tcodec, conf)) { InjectionHandler.processEvent(InjectionEvent.RAID_ENCODING_SKIP_PATH); return true; } } } return false; }
java
{ "resource": "" }
q160464
RaidNode.doDirRaid
train
private static LOGRESULTS doDirRaid(Configuration conf, EncodingCandidate ec, Path destPath, Codec codec, Statistics statistics, Progressable reporter, boolean doSimulate, int targetRepl, int metaRepl) throws IOException { FileStatus stat = ec.srcStat; Path p = stat.getPath(); FileSystem srcFs = p.getFileSystem(conf); List<FileStatus> lfs = RaidNode.listDirectoryRaidFileStatus(conf, srcFs, p); if (lfs == null) { return LOGRESULTS.NOACTION; } // add up the total number of blocks in the directory long blockNum = DirectoryStripeReader.getBlockNum(lfs); // if the directory has fewer than 2 blocks, then nothing to do if (blockNum <= 2) { return LOGRESULTS.NOACTION; } // add up the raw disk space occupied by this directory long diskSpace = 0; // we use the maximum replication int srcRepl = 0; for (FileStatus fsStat: lfs) { diskSpace += (fsStat.getLen() * fsStat.getReplication()); if (fsStat.getReplication() > srcRepl) { srcRepl = fsStat.getReplication(); } } long parityBlockSize = DirectoryStripeReader.getParityBlockSize(conf, lfs); statistics.numProcessedBlocks += blockNum; statistics.processedSize += diskSpace; boolean parityGenerated = false; // generate parity file try { parityGenerated = generateParityFile(conf, ec, targetRepl, reporter, srcFs, destPath, codec, blockNum, srcRepl, metaRepl, parityBlockSize, lfs); } catch (InterruptedException e) { throw new IOException (e); } if (!parityGenerated) return LOGRESULTS.NOACTION; if (!doSimulate) { for (FileStatus fsStat: lfs) { if (srcFs.setReplication(fsStat.getPath(), (short)targetRepl) == false) { LOG.info("Error in reducing replication of " + fsStat.getPath() + " to " + targetRepl); statistics.remainingSize += diskSpace; return LOGRESULTS.FAILURE; } }; } diskSpace = 0; for (FileStatus fsStat: lfs) { diskSpace += (fsStat.getLen() * targetRepl); } statistics.remainingSize += diskSpace; // the metafile will have this many number of blocks long numMeta = blockNum / codec.stripeLength; if (blockNum % codec.stripeLength != 0) { numMeta++; } // we create numMeta blocks. This metablock has metaRepl # replicas. // the last block of the metafile might not be completely filled up, but we // ignore that for now. statistics.numMetaBlocks += (numMeta * metaRepl); statistics.metaSize += (numMeta * metaRepl * parityBlockSize); return LOGRESULTS.SUCCESS; }
java
{ "resource": "" }
q160465
RaidNode.doFileRaid
train
private static LOGRESULTS doFileRaid(Configuration conf, EncodingCandidate ec, Path destPath, Codec codec, Statistics statistics, Progressable reporter, boolean doSimulate, int targetRepl, int metaRepl) throws IOException, InterruptedException { FileStatus stat = ec.srcStat; Path p = stat.getPath(); FileSystem srcFs = p.getFileSystem(conf); // extract block locations from File system BlockLocation[] locations = srcFs.getFileBlockLocations(stat, 0, stat.getLen()); // if the file has fewer than 2 blocks, then nothing to do if (locations.length <= 2) { return LOGRESULTS.NOACTION; } // add up the raw disk space occupied by this file long diskSpace = 0; for (BlockLocation l: locations) { diskSpace += (l.getLength() * stat.getReplication()); } statistics.numProcessedBlocks += locations.length; statistics.processedSize += diskSpace; // generate parity file boolean parityGenerated = generateParityFile(conf, ec, targetRepl, reporter, srcFs, destPath, codec, locations.length, stat.getReplication(), metaRepl, stat.getBlockSize(), null); if (!parityGenerated) { return LOGRESULTS.NOACTION; } if (!doSimulate) { if (srcFs.setReplication(p, (short)targetRepl) == false) { LOG.info("Error in reducing replication of " + p + " to " + targetRepl); statistics.remainingSize += diskSpace; return LOGRESULTS.FAILURE; }; } diskSpace = 0; for (BlockLocation l: locations) { diskSpace += (l.getLength() * targetRepl); } statistics.remainingSize += diskSpace; // the metafile will have this many number of blocks int numMeta = locations.length / codec.stripeLength; if (locations.length % codec.stripeLength != 0) { numMeta++; } // we create numMeta for every file. This metablock has metaRepl # replicas. // the last block of the metafile might not be completely filled up, but we // ignore that for now. statistics.numMetaBlocks += (numMeta * metaRepl); statistics.metaSize += (numMeta * metaRepl * stat.getBlockSize()); return LOGRESULTS.SUCCESS; }
java
{ "resource": "" }
q160466
RaidNode.createRaidNode
train
public static RaidNode createRaidNode(Configuration conf) throws ClassNotFoundException { try { // default to distributed raid node Class<?> raidNodeClass = conf.getClass(RAIDNODE_CLASSNAME_KEY, DistRaidNode.class); if (!RaidNode.class.isAssignableFrom(raidNodeClass)) { throw new ClassNotFoundException("not an implementation of RaidNode"); } Constructor<?> constructor = raidNodeClass.getConstructor(new Class[] {Configuration.class} ); return (RaidNode) constructor.newInstance(conf); } catch (NoSuchMethodException e) { throw new ClassNotFoundException("cannot construct raidnode", e); } catch (InstantiationException e) { throw new ClassNotFoundException("cannot construct raidnode", e); } catch (IllegalAccessException e) { throw new ClassNotFoundException("cannot construct raidnode", e); } catch (InvocationTargetException e) { throw new ClassNotFoundException("cannot construct raidnode", e); } }
java
{ "resource": "" }
q160467
RaidNode.createRaidNode
train
public static RaidNode createRaidNode(String argv[], Configuration conf) throws IOException, ClassNotFoundException { if (conf == null) { conf = new Configuration(); } StartupOption startOpt = parseArguments(argv); if (startOpt == null) { printUsage(); return null; } setStartupOption(conf, startOpt); RaidNode node = createRaidNode(conf); return node; }
java
{ "resource": "" }
q160468
RaidNode.getJobID
train
public static String getJobID(Configuration conf) { String jobId = conf.get("mapred.job.id", null); if (jobId == null) { jobId = "localRaid" + df.format(new Date()); conf.set("mapred.job.id", jobId); } return jobId; }
java
{ "resource": "" }
q160469
AvatarDFSck.adjustConf
train
public static String[] adjustConf(String[] argv, Configuration conf) { String[] serviceId = new String[] { "" }; String[] filteredArgv = DFSUtil.getServiceName(argv, serviceId); if (!serviceId[0].equals("")) { NameNode.checkServiceName(conf, serviceId[0]); DFSUtil.setGenericConf(conf, serviceId[0], AvatarNode.AVATARSERVICE_SPECIFIC_KEYS); NameNode.setupDefaultURI(conf); } return filteredArgv; }
java
{ "resource": "" }
q160470
BlockPlacementPolicyRaid.countCompanionBlocks
train
@SuppressWarnings("unchecked") static Map<String, Integer>[] countCompanionBlocks( Collection<LocatedBlock> companionBlocks) { Map<String, Integer>[] result = new HashMap[2]; result[0] = new HashMap<String, Integer>(); result[1] = new HashMap<String, Integer>(); for (LocatedBlock block : companionBlocks) { for (DatanodeInfo d : block.getLocations()) { // count the companion blocks on the datanodes String name = d.getName(); Integer currentCount = result[0].get(name); result[0].put(name, currentCount == null ? 1 : currentCount + 1); // count the companion blocks on the racks of datanodes name = d.getParent().getName(); currentCount = result[1].get(name); result[1].put(name, currentCount == null ? 1 : currentCount + 1); } } return result; }
java
{ "resource": "" }
q160471
BlockPlacementPolicyRaid.getCompanionBlocks
train
List<LocatedBlock> getCompanionBlocks(String path, FileInfo info, Block block, FSInodeInfo inode) throws IOException { Codec codec = info.codec; switch (info.type) { case NOT_RAID: return Collections.emptyList(); case HAR_TEMP_PARITY: return getCompanionBlocksForHarParityBlock( path, codec.parityLength, block, inode); case TEMP_PARITY: NameWithINode ni = getSourceFile(path, codec.tmpParityDirectory); return getCompanionBlocksForParityBlock( ni.name, path, codec.parityLength, codec.stripeLength, block, codec.isDirRaid, ni.inode, inode); case PARITY: ni = getSourceFile(path, codec.parityDirectory); return getCompanionBlocksForParityBlock( ni.name, path, codec.parityLength, codec.stripeLength, block, codec.isDirRaid, ni.inode, inode); case SOURCE: return getCompanionBlocksForSourceBlock( path, info.parityName, codec.parityLength, codec.stripeLength, block, codec.isDirRaid, inode, info.parityInode); } return Collections.emptyList(); }
java
{ "resource": "" }
q160472
BlockPlacementPolicyRaid.getSourceFile
train
NameWithINode getSourceFile(String parity, String prefix) throws IOException { if (isHarFile(parity)) { return null; } // remove the prefix String src = parity.substring(prefix.length()); byte[][] components = INodeDirectory.getPathComponents(src); INode inode = namesystem.dir.getINode(components); return new NameWithINode(src, inode); }
java
{ "resource": "" }
q160473
BlockPlacementPolicyRaid.getParityFile
train
private NameWithINode getParityFile(Codec codec, String src) throws IOException { String parity; if (codec.isDirRaid) { String parent = getParentPath(src); parity = codec.parityDirectory + parent; } else { parity = codec.parityDirectory + src; } byte[][] components = INodeDirectory.getPathComponents(parity); INode parityInode = namesystem.dir.getINode(components); if (parityInode == null) return null; return new NameWithINode(parity, parityInode); }
java
{ "resource": "" }
q160474
BlockPlacementPolicyRaid.getFileInfo
train
protected FileInfo getFileInfo(FSInodeInfo srcINode, String path) throws IOException { for (Codec c : Codec.getCodecs()) { if (path.startsWith(c.tmpHarDirectoryPS)) { return new FileInfo(FileType.HAR_TEMP_PARITY, c); } if (path.startsWith(c.tmpParityDirectoryPS)) { return new FileInfo(FileType.TEMP_PARITY, c); } if (path.startsWith(c.parityDirectoryPS)) { return new FileInfo(FileType.PARITY, c); } NameWithINode ni = getParityFile(c, path); if (ni != null) { if (c.isDirRaid && srcINode != null && srcINode instanceof INodeFile) { INodeFile inf = (INodeFile)srcINode; if (inf.getFileSize() < this.minFileSize) { // It's too small to be raided return new FileInfo(FileType.NOT_RAID, null); } } return new FileInfo(FileType.SOURCE, c, ni.name, ni.inode); } } return new FileInfo(FileType.NOT_RAID, null); }
java
{ "resource": "" }
q160475
CompositeContext.isMonitoring
train
@Override public boolean isMonitoring() { boolean ret = true; for (MetricsContext ctxt : subctxt) { ret &= ctxt.isMonitoring(); } return ret; }
java
{ "resource": "" }
q160476
BufferedByteInputOutput.read
train
public int read() throws IOException { while (true) { lockR.lock(); try { if (availableCount.get() > 0) { // as long as there is available data // serve it even if closed int b = bytes[readCursor] & 0xFF; incReadCursor(1); availableCount.decrementAndGet(); totalRead++; return b; } else if (closed){ // when no bytes are left and buffer is closed // return -1 return -1; } } finally { lockR.unlock(); } sleep(1); } }
java
{ "resource": "" }
q160477
BufferedByteInputOutput.read
train
public int read(byte[] buf, int off, int len) throws IOException { while (true) { lockR.lock(); try { int available = availableCount.get(); if (available > 0) { // as long as there is available data // serve it even if closed final int lenToRead = Math.min(available, len); final int lenForward = Math.min(lenToRead, length - readCursor); final int lenRemaining = lenToRead - lenForward; // after readCursor if (lenForward > 0) { System.arraycopy(bytes, readCursor, buf, off, lenForward); incReadCursor(lenForward); } // before readCursor if (lenRemaining > 0) { System.arraycopy(bytes, 0, buf, off + lenForward, lenRemaining); incReadCursor(lenRemaining); } availableCount.addAndGet(-1 * lenToRead); totalRead += lenToRead; return lenToRead; } else if (nonBlockingRead) { // we do not serve any bytes return 0; } else if (closed) { // when no bytes are left and buffer is closed // return -1 return -1; } } finally { lockR.unlock(); } sleep(1); } }
java
{ "resource": "" }
q160478
BufferedByteInputOutput.write
train
public void write(byte[] buf, int off, int len) throws IOException { while (true) { lockW.lock(); try { // fail if closed checkClosed(); final int lenToWrite = Math.min(len, length - availableCount.get()); final int lenForward = Math.min(lenToWrite, length - writeCursor); final int lenRemaining = lenToWrite - lenForward; // after writeCursor if (lenForward > 0) { System.arraycopy(buf, off, bytes, writeCursor, lenForward); incWriteCursor(lenForward); } // before writeCursor if (lenRemaining > 0) { System.arraycopy(buf, off + lenForward, bytes, 0, lenRemaining); incWriteCursor(lenRemaining); } availableCount.addAndGet(lenToWrite); totalWritten += lenToWrite; // modify offset and len for next iteration off += lenToWrite; len -= lenToWrite; if (len == 0) { return; } } finally { lockW.unlock(); } sleep(1); } }
java
{ "resource": "" }
q160479
BufferedByteInputOutput.write
train
public void write(int b) throws IOException { while (true) { lockW.lock(); try { // fail if closed checkClosed(); if (length - availableCount.get() > 0) { bytes[writeCursor] = (byte) b; incWriteCursor(1); availableCount.incrementAndGet(); totalWritten++; return; } } finally { lockW.unlock(); } sleep(1); } }
java
{ "resource": "" }
q160480
DistRaid.createJobConf
train
private static JobConf createJobConf(Configuration conf) { JobConf jobconf = new JobConf(conf, DistRaid.class); jobName = NAME + " " + dateForm.format(new Date(RaidNode.now())); jobconf.setUser(RaidNode.JOBUSER); jobconf.setJobName(jobName); jobconf.setMapSpeculativeExecution(false); RaidUtils.parseAndSetOptions(jobconf, SCHEDULER_OPTION_LABEL); jobconf.setJarByClass(DistRaid.class); jobconf.setInputFormat(DistRaidInputFormat.class); jobconf.setOutputKeyClass(Text.class); jobconf.setOutputValueClass(Text.class); jobconf.setMapperClass(DistRaidMapper.class); jobconf.setNumReduceTasks(0); return jobconf; }
java
{ "resource": "" }
q160481
DistRaid.addRaidPaths
train
public void addRaidPaths(PolicyInfo info, List<EncodingCandidate> paths) { raidPolicyPathPairList.add(new RaidPolicyPathPair(info, paths)); }
java
{ "resource": "" }
q160482
DistRaid.startDistRaid
train
public boolean startDistRaid() throws IOException { assert(raidPolicyPathPairList.size() > 0); if (setup()) { this.jobClient = new JobClient(jobconf); this.runningJob = this.jobClient.submitJob(jobconf); LOG.info("Job Started: " + runningJob.getID()); this.startTime = System.currentTimeMillis(); return true; } return false; }
java
{ "resource": "" }
q160483
DistRaid.checkComplete
train
public boolean checkComplete() throws IOException { JobID jobID = runningJob.getID(); if (runningJob.isComplete()) { // delete job directory final String jobdir = jobconf.get(JOB_DIR_LABEL); if (jobdir != null) { final Path jobpath = new Path(jobdir); jobpath.getFileSystem(jobconf).delete(jobpath, true); } if (runningJob.isSuccessful()) { LOG.info("Job Complete(Succeeded): " + jobID); } else { LOG.info("Job Complete(Failed): " + jobID); } raidPolicyPathPairList.clear(); Counters ctrs = runningJob.getCounters(); if (ctrs != null) { RaidNodeMetrics metrics = RaidNodeMetrics.getInstance(RaidNodeMetrics.DEFAULT_NAMESPACE_ID); if (ctrs.findCounter(Counter.FILES_FAILED) != null) { long filesFailed = ctrs.findCounter(Counter.FILES_FAILED).getValue(); metrics.raidFailures.inc(filesFailed); } long slotSeconds = ctrs.findCounter( JobInProgress.Counter.SLOTS_MILLIS_MAPS).getValue() / 1000; metrics.raidSlotSeconds.inc(slotSeconds); } return true; } else { String report = (" job " + jobID + " map " + StringUtils.formatPercent(runningJob.mapProgress(), 0)+ " reduce " + StringUtils.formatPercent(runningJob.reduceProgress(), 0)); if (!report.equals(lastReport)) { LOG.info(report); lastReport = report; } TaskCompletionEvent[] events = runningJob.getTaskCompletionEvents(jobEventCounter); jobEventCounter += events.length; for(TaskCompletionEvent event : events) { if (event.getTaskStatus() == TaskCompletionEvent.Status.FAILED) { LOG.info(" Job " + jobID + " " + event.toString()); } } return false; } }
java
{ "resource": "" }
q160484
DistRaid.setup
train
private boolean setup() throws IOException { estimateSavings(); final String randomId = getRandomId(); JobClient jClient = new JobClient(jobconf); Path jobdir = new Path(jClient.getSystemDir(), NAME + "_" + randomId); LOG.info(JOB_DIR_LABEL + "=" + jobdir); jobconf.set(JOB_DIR_LABEL, jobdir.toString()); Path log = new Path(jobdir, "_logs"); // The control file should have small size blocks. This helps // in spreading out the load from mappers that will be spawned. jobconf.setInt("dfs.blocks.size", OP_LIST_BLOCK_SIZE); FileOutputFormat.setOutputPath(jobconf, log); LOG.info("log=" + log); // create operation list FileSystem fs = jobdir.getFileSystem(jobconf); Path opList = new Path(jobdir, "_" + OP_LIST_LABEL); jobconf.set(OP_LIST_LABEL, opList.toString()); int opCount = 0, synCount = 0; SequenceFile.Writer opWriter = null; try { opWriter = SequenceFile.createWriter(fs, jobconf, opList, Text.class, PolicyInfo.class, SequenceFile.CompressionType.NONE); for (RaidPolicyPathPair p : raidPolicyPathPairList) { // If a large set of files are Raided for the first time, files // in the same directory that tend to have the same size will end up // with the same map. This shuffle mixes things up, allowing a better // mix of files. java.util.Collections.shuffle(p.srcPaths); for (EncodingCandidate ec : p.srcPaths) { opWriter.append(new Text(ec.toString()), p.policy); opCount++; if (++synCount > SYNC_FILE_MAX) { opWriter.sync(); synCount = 0; } } } } finally { if (opWriter != null) { opWriter.close(); } fs.setReplication(opList, OP_LIST_REPLICATION); // increase replication for control file } raidPolicyPathPairList.clear(); jobconf.setInt(OP_COUNT_LABEL, opCount); LOG.info("Number of files=" + opCount); jobconf.setNumMapTasks(getMapCount(opCount)); LOG.info("jobName= " + jobName + " numMapTasks=" + jobconf.getNumMapTasks()); return opCount != 0; }
java
{ "resource": "" }
q160485
SerializedRecord.getStrings
train
private ArrayList<String> getStrings(Object o) { ArrayList<String> retval = new ArrayList<String>(); retval.clear(); if (o == null) retval.add("null"); else if (o instanceof String) retval.add((String) o); else if (o instanceof Calendar) retval.add(dateFormatter.format(((Calendar) o).getTime())); else if (o instanceof InetAddress[]) for (InetAddress ip : ((InetAddress[]) o)) retval.add(ip.getHostAddress()); else if (o instanceof String[]) for (String s : (String []) o) retval.add(s); else retval.add(o.toString()); return retval; }
java
{ "resource": "" }
q160486
DFSClient.createNamenode
train
public static ClientProtocol createNamenode(Configuration conf) throws IOException { return createNamenode(NameNode.getClientProtocolAddress(conf), conf); }
java
{ "resource": "" }
q160487
DFSClient.createRPCNamenodeIfCompatible
train
private void createRPCNamenodeIfCompatible( InetSocketAddress nameNodeAddr, Configuration conf, UserGroupInformation ugi) throws IOException { try { this.namenodeProtocolProxy = createRPCNamenode(nameNodeAddr, conf, ugi, namenodeRPCSocketTimeout); this.rpcNamenode = namenodeProtocolProxy.getProxy(); } catch (RPC.VersionMismatch e) { long clientVersion = e.getClientVersion(); namenodeVersion = e.getServerVersion(); if (clientVersion > namenodeVersion && !ProtocolCompatible.isCompatibleClientProtocol( clientVersion, namenodeVersion)) { throw new RPC.VersionIncompatible( ClientProtocol.class.getName(), clientVersion, namenodeVersion); } this.rpcNamenode = (ClientProtocol)e.getProxy(); } }
java
{ "resource": "" }
q160488
DFSClient.close
train
public synchronized void close() throws IOException { if(clientRunning) { leasechecker.close(); leasechecker.closeRenewal(); if (blockLocationRenewal != null) { blockLocationRenewal.stop(); } clientRunning = false; try { leasechecker.interruptAndJoin(); if (blockLocationRenewal != null) { blockLocationRenewal.join(); } } catch (InterruptedException ie) { } // close connections to the namenode RPC.stopProxy(rpcNamenode); } }
java
{ "resource": "" }
q160489
DFSClient.getBlockLocations
train
public BlockLocation[] getBlockLocations(String src, long start, long length) throws IOException { LocatedBlocks blocks = callGetBlockLocations(namenode, src, start, length, isMetaInfoSuppoted(namenodeProtocolProxy)); return DFSUtil.locatedBlocks2Locations(blocks); }
java
{ "resource": "" }
q160490
DFSClient.open
train
DFSInputStream open(String src, int buffersize, boolean verifyChecksum, FileSystem.Statistics stats, boolean clearOsBuffer, ReadOptions options ) throws IOException { checkOpen(); incFileReadToStats(); // Get block info from namenode DFSInputStream stream = new DFSInputStream(this, src, buffersize, verifyChecksum, clearOsBuffer, options); if (blockLocationRenewal != null) { blockLocationRenewal.add(stream); } return stream; }
java
{ "resource": "" }
q160491
DFSClient.create
train
public OutputStream create(String src, boolean overwrite ) throws IOException { return create(src, overwrite, defaultReplication, defaultBlockSize, null); }
java
{ "resource": "" }
q160492
DFSClient.create
train
public OutputStream create(String src, boolean overwrite, short replication, long blockSize ) throws IOException { return create(src, overwrite, replication, blockSize, null); }
java
{ "resource": "" }
q160493
DFSClient.raidFile
train
public boolean raidFile(String source, String codecId, short expectedSourceRepl) throws IOException { checkOpen(); try { return namenode.raidFile(source, codecId, expectedSourceRepl); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, NSQuotaExceededException.class, DSQuotaExceededException.class); } }
java
{ "resource": "" }
q160494
DFSClient.recoverLease
train
boolean recoverLease(String src, boolean discardLastBlock) throws IOException { checkOpen(); // We remove the file from local lease checker. Usually it is already been // removed by the client but we want to be extra safe. leasechecker.remove(src); if (this.namenodeProtocolProxy == null) { return versionBasedRecoverLease(src); } return methodBasedRecoverLease(src, discardLastBlock); }
java
{ "resource": "" }
q160495
DFSClient.versionBasedRecoverLease
train
private boolean versionBasedRecoverLease(String src) throws IOException { if (namenodeVersion < ClientProtocol.RECOVER_LEASE_VERSION) { OutputStream out; try { out = append(src, conf.getInt("io.file.buffer.size", 4096), null); } catch (RemoteException re) { IOException e = re.unwrapRemoteException(AlreadyBeingCreatedException.class); if (e instanceof AlreadyBeingCreatedException) { return false; } throw re; } out.close(); return true; } else if (namenodeVersion < ClientProtocol.CLOSE_RECOVER_LEASE_VERSION){ try { namenode.recoverLease(src, clientName); } catch (RemoteException re) { throw re.unwrapRemoteException(FileNotFoundException.class, AccessControlException.class); } return !namenode.getBlockLocations(src, 0, Long.MAX_VALUE).isUnderConstruction(); } else { try { return namenode.closeRecoverLease(src, clientName, false); } catch (RemoteException re) { throw re.unwrapRemoteException(FileNotFoundException.class, AccessControlException.class); } } }
java
{ "resource": "" }
q160496
DFSClient.methodBasedRecoverLease
train
private boolean methodBasedRecoverLease(String src, boolean discardLastBlock) throws IOException { // check if closeRecoverLease(discardLastBlock) is supported if (namenodeProtocolProxy.isMethodSupported( "closeRecoverLease", String.class, String.class, boolean.class)) { try { return namenode.closeRecoverLease(src, clientName, discardLastBlock); } catch (RemoteException re) { throw re.unwrapRemoteException(FileNotFoundException.class, AccessControlException.class); } } // check if closeRecoverLease is supported else if (namenodeProtocolProxy.isMethodSupported( "closeRecoverLease", String.class, String.class)) { try { return namenode.closeRecoverLease(src, clientName); } catch (RemoteException re) { throw re.unwrapRemoteException(FileNotFoundException.class, AccessControlException.class); } } // check if recoverLease is supported if (namenodeProtocolProxy.isMethodSupported( "recoverLease", String.class, String.class)) { try { namenode.recoverLease(src, clientName); } catch (RemoteException re) { throw re.unwrapRemoteException(FileNotFoundException.class, AccessControlException.class); } return !namenode.getBlockLocations(src, 0, Long.MAX_VALUE).isUnderConstruction(); } // now use append OutputStream out; try { out = append(src, conf.getInt("io.file.buffer.size", 4096), null); } catch (RemoteException re) { IOException e = re.unwrapRemoteException(AlreadyBeingCreatedException.class); if (e instanceof AlreadyBeingCreatedException) { return false; } throw re; } out.close(); return true; }
java
{ "resource": "" }
q160497
DFSClient.append
train
OutputStream append(String src, int buffersize, Progressable progress ) throws IOException { checkOpen(); clearFileStatusCache(); FileStatus stat = null; LocatedBlock lastBlock = null; boolean success = false; try { stat = getFileInfo(src); if (namenodeProtocolProxy != null && namenodeProtocolProxy.isMethodSupported( "appendAndFetchOldGS", String.class, String.class)) { LocatedBlockWithOldGS loc = namenode.appendAndFetchOldGS(src, clientName); lastBlock = loc; if (loc != null) { updateNamespaceIdIfNeeded(loc.getNamespaceID()); updateDataTransferProtocolVersionIfNeeded(loc.getDataProtocolVersion()); getNewNameNodeIfNeeded(loc.getMethodFingerPrint()); } } else if (namenodeProtocolProxy != null && dataTransferVersion >= DataTransferProtocol.APPEND_BLOCK_VERSION) { // fail the request if the data transfer version support the new append // protocol, but the namenode method is not supported. // This should not happen unless there is a bug. throw new IOException ("DataTransferVersion " + dataTransferVersion + "requires the method appendAndFetchOldGS is supported in Namenode"); } else if (namenodeProtocolProxy != null && namenodeProtocolProxy.isMethodSupported( "appendAndFetchMetaInfo", String.class, String.class)) { LocatedBlockWithMetaInfo loc = namenode.appendAndFetchMetaInfo(src, clientName); lastBlock = loc; if (loc != null) { updateNamespaceIdIfNeeded(loc.getNamespaceID()); updateDataTransferProtocolVersionIfNeeded(loc.getDataProtocolVersion()); getNewNameNodeIfNeeded(loc.getMethodFingerPrint()); } } else { lastBlock = namenode.append(src, clientName); } OutputStream result = new DFSOutputStream(this, src, buffersize, progress, lastBlock, stat, conf.getInt("io.bytes.per.checksum", 512), namespaceId); leasechecker.put(src, result); success = true; return result; } catch(RemoteException re) { throw re.unwrapRemoteException(FileNotFoundException.class, AccessControlException.class, NSQuotaExceededException.class, DSQuotaExceededException.class); } finally { if (!success) { try { namenode.abandonFile(src, clientName); } catch (RemoteException e) { if (e.unwrapRemoteException() instanceof LeaseExpiredException) { LOG.debug(String.format( "client %s attempting to abandon file %s which it does not own", clientName, src), e ); } else { throw e; } } } } }
java
{ "resource": "" }
q160498
DFSClient.setReplication
train
public boolean setReplication(String src, short replication ) throws IOException { try { return namenode.setReplication(src, replication); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, NSQuotaExceededException.class, DSQuotaExceededException.class); } }
java
{ "resource": "" }
q160499
DFSClient.merge
train
public void merge(String parity, String source, String codecId, int[] checksums) throws IOException { checkOpen(); try { namenode.merge(parity, source, codecId, checksums); } catch (RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, NSQuotaExceededException.class, DSQuotaExceededException.class); } }
java
{ "resource": "" }