_id
stringlengths
2
7
title
stringlengths
3
140
partition
stringclasses
3 values
text
stringlengths
73
34.1k
language
stringclasses
1 value
meta_information
dict
q160900
Server.start
train
public synchronized void start() throws IOException { if (responder.isAlive()) { // The server is already running return; } responder.start(); listener.start(); handlers = new Handler[handlerCount]; for (int i = 0; i < handlerCount; i++) { handlers[i] = new Handler(i); handlers[i].start(); } }
java
{ "resource": "" }
q160901
Server.waitForHandlers
train
public synchronized void waitForHandlers() throws InterruptedException { if (handlers != null) { for (int i = 0; i < handlerCount; i++) { if (handlers[i] != null) { handlers[i].join(); } } } }
java
{ "resource": "" }
q160902
Server.call
train
@Deprecated public Writable call(Writable param, long receiveTime) throws IOException { return call(null, param, receiveTime); }
java
{ "resource": "" }
q160903
JournalNode.getJournals
train
synchronized Collection<Journal> getJournals() { Collection<Journal> journals = new ArrayList<Journal>(); for (Journal j : journalsById.values()) { journals.add(j); } return journals; }
java
{ "resource": "" }
q160904
JournalNode.start
train
public void start() throws IOException { Preconditions.checkState(!isStarted(), "JN already running"); journalNodes = getJournalHttpAddresses(conf); // crash the JournalNode if the DFS_JOURNALNODE_HOSTS is not configured. if (journalNodes.isEmpty()) { String msg = JournalConfigKeys.DFS_JOURNALNODE_HOSTS + " is not present in the configuration."; LOG.fatal(msg); throw new IOException(msg); } LOG.info("JournalNode hosts: " + journalNodes); validateAndCreateJournalDir(localDir); LOG.info("JournalNode storage: " + localDir.getAbsolutePath()); InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf); // TODO serverId has to be set correctly metrics = new JournalNodeMetrics(conf, socAddr.toString()); httpServer = new JournalNodeHttpServer(conf, this); httpServer.start(); rpcServer = new JournalNodeRpcServer(conf, this); rpcServer.start(); journalSyncer = new JournalNodeJournalSyncer(journalNodes, httpServer.getAddress(), conf); journalSyncerThread = new Thread(journalSyncer, "Thread-JournalSyncer"); journalSyncerThread.start(); }
java
{ "resource": "" }
q160905
JournalNode.stop
train
public void stop(int rc) { this.resultCode = rc; LOG.info("Stopping Journal Node: " + this); if (rpcServer != null) { rpcServer.stop(); rpcServer = null; } if (httpServer != null) { try { httpServer.stop(); } catch (IOException ioe) { LOG.warn("Unable to stop HTTP server for " + this, ioe); } } for (Journal j : journalsById.values()) { IOUtils.cleanup(LOG, j); } if (metrics != null) { metrics.shutdown(); } if (journalSyncer != null) { journalSyncer.stop(); } }
java
{ "resource": "" }
q160906
JournalNode.getJournalDir
train
private File getJournalDir(String jid) { String dir = conf.get(JournalConfigKeys.DFS_JOURNALNODE_DIR_KEY, JournalConfigKeys.DFS_JOURNALNODE_DIR_DEFAULT); Preconditions.checkArgument(jid != null && !jid.isEmpty(), "bad journal identifier: %s", jid); return new File(new File(new File(dir), "edits"), jid); }
java
{ "resource": "" }
q160907
JournalNode.getJournalHttpAddresses
train
static List<InetSocketAddress> getJournalHttpAddresses(Configuration conf) { String[] hosts = JournalConfigHelper.getJournalHttpHosts(conf); List<InetSocketAddress> addrs = new ArrayList<InetSocketAddress>(); for (String host : hosts) { addrs.add(NetUtils.createSocketAddr(host)); } return addrs; }
java
{ "resource": "" }
q160908
ByteWritable.compareTo
train
public int compareTo(Object o) { int thisValue = this.value; int thatValue = ((ByteWritable)o).value; return (thisValue < thatValue ? -1 : (thisValue == thatValue ? 0 : 1)); }
java
{ "resource": "" }
q160909
SecurityUtil.setPolicy
train
public static void setPolicy(Policy policy) { if (LOG.isDebugEnabled()) { LOG.debug("Setting Hadoop security policy"); } Policy.setPolicy(policy); }
java
{ "resource": "" }
q160910
PipesReducer.reduce
train
public void reduce(K2 key, Iterator<V2> values, OutputCollector<K3, V3> output, Reporter reporter ) throws IOException { isOk = false; startApplication(output, reporter); downlink.reduceKey(key); while (values.hasNext()) { downlink.reduceValue(values.next()); } if(skipping) { //flush the streams on every record input if running in skip mode //so that we don't buffer other records surrounding a bad record. downlink.flush(); } isOk = true; }
java
{ "resource": "" }
q160911
PipesReducer.close
train
public void close() throws IOException { // if we haven't started the application, we have nothing to do if (isOk) { OutputCollector<K3, V3> nullCollector = new OutputCollector<K3, V3>() { public void collect(K3 key, V3 value) throws IOException { // NULL } }; startApplication(nullCollector, Reporter.NULL); } try { if (isOk) { application.getDownlink().endOfInput(); } else { // send the abort to the application and let it clean up application.getDownlink().abort(); } LOG.info("waiting for finish"); application.waitForFinish(); LOG.info("got done"); } catch (Throwable t) { application.abort(t); } finally { application.cleanup(); } }
java
{ "resource": "" }
q160912
MapTask.updateJobWithSplit
train
private void updateJobWithSplit(final JobConf job, InputSplit inputSplit) { if (inputSplit instanceof FileSplit) { FileSplit fileSplit = (FileSplit) inputSplit; job.set("map.input.file", fileSplit.getPath().toString()); job.setLong("map.input.start", fileSplit.getStart()); job.setLong("map.input.length", fileSplit.getLength()); } LOG.info("split: " + inputSplit.toString()); }
java
{ "resource": "" }
q160913
CapacityTaskScheduler.jobAdded
train
synchronized void jobAdded(JobInProgress job) throws IOException { QueueSchedulingInfo qsi = queueInfoMap.get(job.getProfile().getQueueName()); // qsi shouldn't be null // update user-specific info Integer i = qsi.numJobsByUser.get(job.getProfile().getUser()); if (null == i) { i = 1; // set the count for running tasks to 0 qsi.mapTSI.numSlotsOccupiedByUser.put(job.getProfile().getUser(), Integer.valueOf(0)); qsi.reduceTSI.numSlotsOccupiedByUser.put(job.getProfile().getUser(), Integer.valueOf(0)); } else { i++; } qsi.numJobsByUser.put(job.getProfile().getUser(), i); // setup scheduler specific job information preInitializeJob(job); LOG.debug("Job " + job.getJobID().toString() + " is added under user " + job.getProfile().getUser() + ", user now has " + i + " jobs"); }
java
{ "resource": "" }
q160914
CapacityTaskScheduler.jobCompleted
train
synchronized void jobCompleted(JobInProgress job) { QueueSchedulingInfo qsi = queueInfoMap.get(job.getProfile().getQueueName()); // qsi shouldn't be null // update numJobsByUser LOG.debug("JOb to be removed for user " + job.getProfile().getUser()); Integer i = qsi.numJobsByUser.get(job.getProfile().getUser()); i--; if (0 == i.intValue()) { qsi.numJobsByUser.remove(job.getProfile().getUser()); // remove job footprint from our TSIs qsi.mapTSI.numSlotsOccupiedByUser.remove(job.getProfile().getUser()); qsi.reduceTSI.numSlotsOccupiedByUser.remove(job.getProfile().getUser()); LOG.debug("No more jobs for user, number of users = " + qsi.numJobsByUser.size()); } else { qsi.numJobsByUser.put(job.getProfile().getUser(), i); LOG.debug("User still has " + i + " jobs, number of users = " + qsi.numJobsByUser.size()); } }
java
{ "resource": "" }
q160915
OpenFileInfo.write
train
public static void write(DataOutput out, OpenFileInfo elem) throws IOException { OpenFileInfo info = new OpenFileInfo(elem.filePath, elem.millisOpen); info.write(out); }
java
{ "resource": "" }
q160916
ConfigManager.reloadConfigIfNecessary
train
public void reloadConfigIfNecessary() { if (whitelistFile == null) { return; } long time = System.currentTimeMillis(); if (time > lastReloadAttempt + CONFIG_RELOAD_INTERVAL) { lastReloadAttempt = time; try { File file = new File(whitelistFile); long lastModified = file.lastModified(); if (lastModified > lastSuccessfulReload && time > lastModified + configReloadWait) { reloadWhitelist(); lastSuccessfulReload = time; lastReloadAttemptFailed = false; } } catch (Exception e) { // Throwing the error further out here won't help - the RPC thread // will catch it and report it in a loop. Instead, just log it and // hope somebody will notice from the log. // We log the error only on the first failure so we don't fill up the // server's log with these messages. if (!lastReloadAttemptFailed) { LOG.error("Failed to reload whitelist file - " + "will use existing allocations.", e); } lastReloadAttemptFailed = true; } } }
java
{ "resource": "" }
q160917
ConfigManager.reloadWhitelist
train
void reloadWhitelist() throws IOException { // read the entire whitelist into memory outside the // FSNamessytem lock. // LinkedList<String> paths = new LinkedList<String>(); FileInputStream fstream = new FileInputStream(whitelistFile); DataInputStream in = new DataInputStream(fstream); BufferedReader br = new BufferedReader(new InputStreamReader(in)); int count = 0; while (true) { String str = br.readLine(); if (str == null) { break; // end of file } str = str.trim(); // remove all whitespace from start and end if (str.startsWith("#")) { continue; // ignore lines with starting with # } paths.add(str); LOG.info("Whitelisted directory [" + count + "] " + str); count++; } in.close(); // acquire the writelock and insert newly read entries into // the Namenode's configuration. namesys.writeLock(); try { namesys.neverDeletePaths.clear(); for (String s: paths) { namesys.neverDeletePaths.add(s); } } finally { namesys.writeUnlock(); } }
java
{ "resource": "" }
q160918
RaidDFSUtil.constructFakeRaidFile
train
public static void constructFakeRaidFile(DistributedFileSystem dfs, String filePath, RaidCodec codec) throws IOException { long blockSize = 512L; byte[] buffer = new byte[(int) (codec.numDataBlocks * blockSize)]; int[] checksum = new int[codec.numDataBlocks]; OutputStream out = dfs.create(new Path(filePath), true, 1, codec.parityReplication, blockSize); random.nextBytes(buffer); out.write(buffer); out.close(); Path parityTmp = new Path(filePath + "_parity"); buffer = new byte[(int) (codec.numParityBlocks * blockSize)]; out = dfs.create(parityTmp, true, 1, codec.parityReplication, blockSize); random.nextBytes(buffer); out.write(buffer); out.close(); FileStatus stat = dfs.getFileStatus(new Path(filePath)); dfs.setTimes(parityTmp, stat.getModificationTime(), stat.getAccessTime()); dfs.merge(parityTmp, new Path(filePath), codec.id, checksum); }
java
{ "resource": "" }
q160919
MetricsUtil.getContext
train
public static MetricsContext getContext(String refName, String contextName) { MetricsContext metricsContext; try { metricsContext = ContextFactory.getFactory().getContext(refName, contextName); if (!metricsContext.isMonitoring()) { metricsContext.startMonitoring(); } } catch (Exception ex) { LOG.error("Unable to create metrics context " + contextName, ex); metricsContext = ContextFactory.getNullContext(contextName); } return metricsContext; }
java
{ "resource": "" }
q160920
MetricsUtil.createRecord
train
public static MetricsRecord createRecord(MetricsContext context, String recordName) { MetricsRecord metricsRecord = context.createRecord(recordName); metricsRecord.setTag("hostName", getHostName()); return metricsRecord; }
java
{ "resource": "" }
q160921
MetricsUtil.getHostName
train
private static String getHostName() { String hostName = null; try { hostName = InetAddress.getLocalHost().getHostName(); } catch (UnknownHostException ex) { LOG.info("Unable to obtain hostName", ex); hostName = "unknown"; } return hostName; }
java
{ "resource": "" }
q160922
DatanodeBenThread.getRunningDatanode
train
private String getRunningDatanode(Configuration conf) throws IOException { FileSystem fs = FileSystem.newInstance(conf); fs.mkdirs(new Path("/tmp")); Path fileName = new Path("/tmp", rtc.task_name + System.currentTimeMillis() + rb.nextInt()); if (fs.exists(fileName)) { fs.delete(fileName); } FSDataOutputStream out = null; byte[] buffer= new byte[1]; buffer[0] = '0'; try { out = fs.create(fileName, (short)1); out.write(buffer, 0, 1); } finally { IOUtils.closeStream(out); } fs = getDFS(fs); assert fs instanceof DistributedFileSystem; DistributedFileSystem dfs = (DistributedFileSystem)fs; BlockLocation[] lbs = dfs.getClient().getBlockLocations( fileName.toUri().getPath(), 0, 1); fs.delete(fileName); return lbs[0].getHosts()[0]; }
java
{ "resource": "" }
q160923
GenReaderThread.prepare
train
@Override public GenThread[] prepare(JobConf conf, Text key, Text value) throws IOException { this.rtc = new RunTimeConstants(); super.prepare(conf, key, value, rtc); Path basePath = new Path(key.toString()); LOG.info("base path is " + basePath); Path checksumPath = null; FileSystem fs = FileSystem.newInstance(conf); if (value.toString().length() != 0) { checksumPath = new Path(value.toString()); } HashMap<String, Long> checksumMap = null; boolean verifyChecksum = false; if (fs.exists(checksumPath)) { LOG.info("checksum path is " + checksumPath); verifyChecksum = true; checksumMap = new HashMap<String, Long>(); SequenceFile.Reader reader = null; try { reader = new SequenceFile.Reader(fs, checksumPath, conf); Writable dir = (Writable) ReflectionUtils.newInstance( reader.getKeyClass(), conf); Writable checksum = (Writable) ReflectionUtils.newInstance( reader.getValueClass(), conf); while(reader.next(dir, checksum)) { LOG.info("dir: " + dir.toString() + " checksum: " + checksum); checksumMap.put( fs.makeQualified(new Path(dir.toString())).toUri().getPath(), Long.parseLong(checksum.toString())); } } catch(Exception e) { LOG.error(e); throw new IOException(e); } finally { IOUtils.closeStream(reader); } } FileStatus[] baseDirs = fs.listStatus(basePath); if (rtc.nthreads != baseDirs.length) { throw new IOException("Number of directory under " + basePath + "(" + baseDirs.length + ") doesn't match number of threads " + "(" + rtc.nthreads + ")."); } GenReaderThread[] threads = new GenReaderThread[(int)rtc.nthreads]; for (int i=0; i < rtc.nthreads; i++) { long checksum = 0; if (verifyChecksum) { String basePathStr = baseDirs[i].getPath().toUri().getPath(); checksum = checksumMap.get(basePathStr); } threads[i] = new GenReaderThread(conf, baseDirs[i].getPath(), checksum, verifyChecksum, rtc); } return threads; }
java
{ "resource": "" }
q160924
HdfsFileStatus.toFileStatus
train
public static FileStatus toFileStatus(HdfsFileStatus stat, String src) { if (stat == null) { return null; } return new FileStatus(stat.getLen(), stat.isDir(), stat.getReplication(), stat.getBlockSize(), stat.getModificationTime(), stat.getAccessTime(), stat.getPermission(), stat.getOwner(), stat.getGroup(), stat.getFullPath(new Path(src))); // full path }
java
{ "resource": "" }
q160925
HdfsFileStatus.getFullName
train
final public String getFullName(final String parent) { if (isEmptyLocalName()) { return parent; } StringBuilder fullName = new StringBuilder(parent); if (!parent.endsWith(Path.SEPARATOR)) { fullName.append(Path.SEPARATOR); } fullName.append(getLocalName()); return fullName.toString(); }
java
{ "resource": "" }
q160926
JournalNodeJspHelper.getValue
train
private static String getValue(Map<String, String> map, String keyName) { String value = map.get(keyName); return value == null ? "-" : value; }
java
{ "resource": "" }
q160927
JournalNodeJspHelper.fetchStats
train
private String fetchStats(InetSocketAddress jn) throws IOException { try { return DFSUtil.getHTMLContentWithTimeout(new URI("http", null, jn.getAddress().getHostAddress(), jn .getPort(), "/journalStats", null, null).toURL(), HTTP_CONNECT_TIMEOUT, HTTP_READ_TIMEOUT); } catch (Exception e) { LOG.error("Problem connecting to " + getHostAddress(jn), e); return null; } }
java
{ "resource": "" }
q160928
JournalNodeJspHelper.getStatsMap
train
private static Map<String, Map<String, String>> getStatsMap(String json) throws IOException { if (json == null || json.isEmpty()) { return new HashMap<String, Map<String, String>>(); } TypeReference<Map<String, Map<String, String>>> type = new TypeReference<Map<String, Map<String, String>>>() { }; return mapper.readValue(json, type); }
java
{ "resource": "" }
q160929
JournalNodeJspHelper.getNodeReport
train
public static String getNodeReport(QJMStatus status) { StringBuilder sb = new StringBuilder(); sb.append("<table border=1 cellpadding=1 cellspacing=0 title=\"Journals\">"); sb.append("<thead><tr><td><b>Journal node</b></td><td><b>Alive</b></td></tr></thead>"); for (Entry<String, Boolean> e : status.getAliveMap().entrySet()) { if (e.getValue()) { sb.append("<tr><td>" + e.getKey() + "</td><td><font color=green>Active</font></td></tr>"); } else { sb.append("<tr><td>" + e.getKey() + "</td><td><font color=red>Failed</font></td></tr>"); } } sb.append("</table>"); return sb.toString(); }
java
{ "resource": "" }
q160930
JournalNodeJspHelper.getJournalReport
train
public static String getJournalReport(QJMStatus status) { StringBuilder sb = new StringBuilder(); sb.append("<table border=1 cellpadding=1 cellspacing=0 title=\"Journals\">"); sb.append("<thead><tr><td><b>JournalId</b></td><td><b>Statistics</b></td></tr></thead>"); for (String journalId : status.getJournalIds()) { sb.append("<tr><td>" + journalId + "</td><td>"); getHTMLTableForASingleJournal(status, journalId, sb); sb.append("</td></tr>"); } sb.append("</table>"); return sb.toString(); }
java
{ "resource": "" }
q160931
JournalNodeJspHelper.getHTMLTableForASingleJournal
train
public static void getHTMLTableForASingleJournal(QJMStatus status, String journalName, StringBuilder sb) { List<StatsDescriptor> stats = status.stats.get(journalName); if (stats == null) { return; } Set<String> statsNames = status.statNames; // header sb.append("<table border=1 align=\"right\" cellpadding=1 " + "cellspacing=0 title=\"Journal statistics\">"); sb.append("<thead><tr><td></td>"); for (StatsDescriptor sd : stats) { sb.append("<td><b>" + sd.journalNode + "</b></td>"); } sb.append("</tr></thead>"); // contents for (String st : statsNames) { // for each available stat sb.append("<tr><td>" + st + "</td>"); // for each available node for (StatsDescriptor sd : stats) { sb.append("<td align=\"right\">" + getValue(sd.statsPerJournal, st) + "</td>"); } sb.append("</tr>"); } sb.append("</table>"); }
java
{ "resource": "" }
q160932
JournalNodeJspHelper.getHostAddress
train
private static String getHostAddress(InetSocketAddress addr) { String hostToAppend = ""; if (addr.isUnresolved()) { hostToAppend = addr.getHostName(); } else { hostToAppend = addr.getAddress().getHostAddress(); } return hostToAppend; }
java
{ "resource": "" }
q160933
PoolManager.getPool
train
public synchronized Pool getPool(String name) { Pool pool = pools.get(name); if (pool == null) { boolean isConfiguredPool = poolNamesInAllocFile.contains(name); pool = new Pool(name, isConfiguredPool); pools.put(name, pool); } return pool; }
java
{ "resource": "" }
q160934
PoolManager.reloadAllocsIfNecessary
train
public boolean reloadAllocsIfNecessary() { if (allocFile == null) { // A warning has been logged when allocFile is null. // We should just return here. return false; } long time = System.currentTimeMillis(); boolean reloaded = false; if (time > lastReloadAttempt + ALLOC_RELOAD_INTERVAL) { lastReloadAttempt = time; try { File file = new File(allocFile); long lastModified = file.lastModified(); if (lastModified > lastSuccessfulReload && time > lastModified + ALLOC_RELOAD_WAIT) { reloadAllocs(); reloaded = true; lastSuccessfulReload = time; lastReloadAttemptFailed = false; } } catch (Exception e) { // Throwing the error further out here won't help - the RPC thread // will catch it and report it in a loop. Instead, just log it and // hope somebody will notice from the log. // We log the error only on the first failure so we don't fill up the // JobTracker's log with these messages. if (!lastReloadAttemptFailed) { LOG.error("Failed to reload allocations file - " + "will use existing allocations.", e); } lastReloadAttemptFailed = true; } } return reloaded; }
java
{ "resource": "" }
q160935
PoolManager.getMinSlots
train
public int getMinSlots(String pool, TaskType taskType) { Map<String, Integer> allocationMap = (taskType == TaskType.MAP ? mapAllocs : reduceAllocs); Integer alloc = allocationMap.get(pool); return (alloc == null ? 0 : alloc); }
java
{ "resource": "" }
q160936
PoolManager.addJob
train
public synchronized void addJob(JobInProgress job) { String poolName = getPoolName(job); LOG.info("Adding job " + job.getJobID() + " to pool " + poolName + ", originally from pool " + job.getJobConf().get(EXPLICIT_POOL_PROPERTY)); getPool(poolName).addJob(job); }
java
{ "resource": "" }
q160937
PoolManager.removeJob
train
public synchronized void removeJob(JobInProgress job) { if (getPool(getPoolName(job)).removeJob(job)) { return; } // Job wasn't found in this pool. Search for the job in all the pools // (the pool may have been created after the job started). for (Pool pool : getPools()) { if (pool.removeJob(job)) { LOG.info("Removed job " + job.jobId + " from pool " + pool.getName() + " instead of pool " + getPoolName(job)); return; } } LOG.error("removeJob: Couldn't find job " + job.jobId + " in any pool, should have been in pool " + getPoolName(job)); }
java
{ "resource": "" }
q160938
PoolManager.setPool
train
public synchronized void setPool(JobInProgress job, String pool) { removeJob(job); job.getJobConf().set(EXPLICIT_POOL_PROPERTY, pool); addJob(job); }
java
{ "resource": "" }
q160939
PoolManager.getPoolName
train
public synchronized String getPoolName(JobInProgress job) { String name = getExplicitPoolName(job).trim(); String redirect = poolRedirectMap.get(name); if (redirect == null) { return name; } else { return redirect; } }
java
{ "resource": "" }
q160940
PoolManager.checkValidPoolProperty
train
public synchronized void checkValidPoolProperty(JobInProgress job) throws InvalidJobConfException { if (!strictPoolsMode) { return; } JobConf conf = job.getJobConf(); // Pool name needs to be lower cased. String poolName = conf.get(EXPLICIT_POOL_PROPERTY); if (poolName == null) { return; } else { poolName = poolName.toLowerCase(); } if (poolNamesInAllocFile.contains(poolName)) { return; } throw new InvalidJobConfException( "checkValidPoolProperty: Pool name " + conf.get(EXPLICIT_POOL_PROPERTY) + " set with Hadoop property " + EXPLICIT_POOL_PROPERTY + " does not exist. " + "Please check for typos in the pool name."); }
java
{ "resource": "" }
q160941
PoolManager.getPoolNames
train
public synchronized Collection<String> getPoolNames() { List<String> list = new ArrayList<String>(); for (Pool pool: getPools()) { list.add(pool.getName()); } Collections.sort(list); return list; }
java
{ "resource": "" }
q160942
PoolManager.canBePreempted
train
public boolean canBePreempted(String pool) { Boolean result = canBePreempted.get(pool); return result == null ? true : result; // Default is true }
java
{ "resource": "" }
q160943
PoolManager.fifoWeight
train
public boolean fifoWeight(String pool) { Boolean result = poolFifoWeight.get(pool); return result == null ? false : result; // Default is false }
java
{ "resource": "" }
q160944
PoolManager.getMinSharePreemptionTimeout
train
public long getMinSharePreemptionTimeout(String pool) { if (minSharePreemptionTimeouts.containsKey(pool)) { return minSharePreemptionTimeouts.get(pool); } else { return defaultMinSharePreemptionTimeout; } }
java
{ "resource": "" }
q160945
PoolManager.getMaxSlots
train
int getMaxSlots(String poolName, TaskType taskType) { Map<String, Integer> maxMap = (taskType == TaskType.MAP ? poolMaxMaps : poolMaxReduces); if (maxMap.containsKey(poolName)) { return maxMap.get(poolName); } else { return Integer.MAX_VALUE; } }
java
{ "resource": "" }
q160946
PoolManager.resetRunningTasks
train
public void resetRunningTasks(TaskType type) { Map<String, Integer> runningMap = (type == TaskType.MAP ? poolRunningMaps : poolRunningReduces); for (String poolName : runningMap.keySet()) { runningMap.put(poolName, 0); } }
java
{ "resource": "" }
q160947
PoolManager.incRunningTasks
train
public void incRunningTasks(String poolName, TaskType type, int inc) { Map<String, Integer> runningMap = (type == TaskType.MAP ? poolRunningMaps : poolRunningReduces); if (!runningMap.containsKey(poolName)) { runningMap.put(poolName, 0); } int runningTasks = runningMap.get(poolName) + inc; runningMap.put(poolName, runningTasks); }
java
{ "resource": "" }
q160948
PoolManager.getRunningTasks
train
public int getRunningTasks(String poolName, TaskType type) { Map<String, Integer> runningMap = (type == TaskType.MAP ? poolRunningMaps : poolRunningReduces); return (runningMap.containsKey(poolName) ? runningMap.get(poolName) : 0); }
java
{ "resource": "" }
q160949
PoolManager.isMaxTasks
train
public boolean isMaxTasks(String poolName, TaskType type) { return getRunningTasks(poolName, type ) >= getMaxSlots(poolName, type); }
java
{ "resource": "" }
q160950
PoolManager.checkMinimumSlotsAvailable
train
public boolean checkMinimumSlotsAvailable( ClusterStatus clusterStatus, TaskType type) { Map<String, Integer> poolToMinSlots = (type == TaskType.MAP) ? mapAllocs : reduceAllocs; int totalSlots = (type == TaskType.MAP) ? clusterStatus.getMaxMapTasks() : clusterStatus.getMaxReduceTasks(); int totalMinSlots = 0; for (int minSlots : poolToMinSlots.values()) { totalMinSlots += minSlots; } if (totalMinSlots > totalSlots) { LOG.warn(String.format( "Bad minimum %s slot configuration. cluster:%s totalMinSlots:%s", type, totalSlots, totalMinSlots)); return false; } LOG.info(String.format( "Minimum %s slots checked. cluster:%s totalMinSlots:%s", type, totalSlots, totalSlots)); return true; }
java
{ "resource": "" }
q160951
HttpImageUploadChannel.start
train
public void start() { try { if (init) { setErrorStatus("Cannot initialize multiple times", null); return; } init = true; HttpPost postRequest = setupRequest(new ByteArrayOutputStream(0)); UploadImageParam.setHeaders(postRequest, journalId, namespaceInfoString, epoch, txid, 0, segmentId++, false); HttpClient httpClient = new DefaultHttpClient(); HttpResponse response = httpClient.execute(postRequest); if (response.getStatusLine().getStatusCode() == HttpServletResponse.SC_NOT_ACCEPTABLE) { throwIOException("Error when starting upload to : " + uri + " status: " + response.getStatusLine().toString()); } // get the session id for (Header h : response.getAllHeaders()) { if (h.getName().equals("sessionId")) { sessionId = Long.parseLong(h.getValue()); break; } } // we must have the session id if (sessionId < 0) { throw new IOException("Session id is missing"); } } catch (Exception e) { setErrorStatus("Exception when starting upload channel for: " + uri, e); } }
java
{ "resource": "" }
q160952
HttpImageUploadChannel.send
train
public void send(ByteArrayOutputStream bos) { try { if (this.isDisabled) { return; } if (available.tryAcquire(WAIT_NEXT_BUFFER_TIME_OUT_SECONDS, TimeUnit.SECONDS)) { tasks.add(sendExecutor.submit(new SendWorker(bos, segmentId++, false))); } else { setErrorStatus("Number of chunks in the queue to be send exceeded the configured number " + maxBufferedChunks, null); } } catch (Exception e) { setErrorStatus("Exception when submitting a task", e); } }
java
{ "resource": "" }
q160953
HttpImageUploadChannel.close
train
public void close() { if (this.isDisabled || this.closed) { // parent stream needs to check for success explicitly return; } closed = true; try { // send close request tasks.add(sendExecutor.submit(new SendWorker( new ByteArrayOutputStream(0), segmentId++, true))); // wait for all tasks to complete for (Future<Void> task : tasks) { task.get(); } } catch (InterruptedException e) { setErrorStatus("Interrupted exception", e); } catch (ExecutionException e) { setErrorStatus("Execution exception", e); } finally { // close the executor sendExecutor.shutdownNow(); } }
java
{ "resource": "" }
q160954
HttpImageUploadChannel.setupRequest
train
private HttpPost setupRequest(ByteArrayOutputStream bos) { ContentBody cb = new ByteArrayBody(bos.toByteArray(), "image"); HttpPost postRequest = new HttpPost(uri + "/uploadImage"); MultipartEntity reqEntity = new MultipartEntity( HttpMultipartMode.BROWSER_COMPATIBLE); // add a single part to the request reqEntity.addPart("file", cb); postRequest.setEntity(reqEntity); return postRequest; }
java
{ "resource": "" }
q160955
HttpImageUploadChannel.setErrorStatus
train
void setErrorStatus(String msg, Exception e) { this.e = new IOException(msg + " " + (e == null ? "" : e.toString())); // no more writes will be accepted this.isDisabled = true; // close the executor sendExecutor.shutdown(); LOG.error(msg, e); }
java
{ "resource": "" }
q160956
FairSchedulerAdmissionControlServlet.showJobsNotAdmitted
train
private void showJobsNotAdmitted( PrintWriter out, Set<String> userFilterSet, Set<String> poolFilterSet) { out.print("<h2>Not Admitted Jobs</h2>\n"); out.print("<b>Filter</b> " + "<input type=\"text\" onkeyup=\"filterTables(this.value)\" " + "id=\"NotAdmittedJobsTableFilter\">" + "<input type=\"checkbox\" id=\"SubmittedTimeFilterToggle\" " + "onChange=\"filterTables(inputRJF.value)\" checked>Submitted Time " + "<input type=\"checkbox\" id=\"JobIDFilterToggle\" " + "onChange=\"filterTables(inputRJF.value)\" checked>JobID " + "<input type=\"checkbox\" id=\"UserFilterToggle\" " + "onChange=\"filterTables(inputRJF.value)\" checked>User " + "<input type=\"checkbox\" id=\"PoolFilterToggle\" " + "onChange=\"filterTables(inputRJF.value)\" checked>Pool " + "<input type=\"checkbox\" id=\"PrioFilterToggle\" " + "onChange=\"filterTables(inputRJF.value)\" checked>Priority" + "<br><br>\n"); out.print("<script type=\"text/javascript\">var inputRJF = " + "document.getElementById('NotAdmittedJobsTableFilter');</script>"); out.print("<table border=\"2\" cellpadding=\"5\" cellspacing=\"2\" " + "id=\"NotAdmittedJobsTable\" class=\"tablesorter\">\n"); out.printf("<thead><tr>" + "<th>Submitted Time</th>" + "<th>JobID</th>" + "<th>User</th>" + "<th>Pool</th>" + "<th>Priority</th>" + "<th>Reason</th>" + "<th>Job Position</th>" + "<th>ETA to Admission (secs)</th>"); out.print("</tr></thead><tbody>\n"); Collection<NotAdmittedJobInfo> notAdmittedJobInfos = scheduler.getNotAdmittedJobs(); for (NotAdmittedJobInfo jobInfo : notAdmittedJobInfos) { if ((userFilterSet != null) && !userFilterSet.contains(jobInfo.getUser())) { continue; } if ((poolFilterSet != null) && !poolFilterSet.contains(jobInfo.getPool())) { continue; } out.printf("<tr id=\"%s\">\n", jobInfo.getJobName()); out.printf("<td>%s</td>\n", DATE_FORMAT.format(jobInfo.getStartDate())); out.printf("<td><a href=\"jobdetails.jsp?jobid=%s\">%s</a></td>", jobInfo.getJobName(), jobInfo.getJobName()); out.printf("<td>%s</td>\n", jobInfo.getUser()); out.printf("<td>%s</td>\n", jobInfo.getPool()); out.printf("<td>%s</td>\n", jobInfo.getPriority()); out.printf("<td>%s</td>\n", jobInfo.getReason()); out.printf("<td>%d</td>\n", jobInfo.getHardAdmissionPosition()); out.printf("<td>%d</td>\n", jobInfo.getEstimatedHardAdmissionEntranceSecs()); out.print("</tr>\n"); } out.print("</tbody></table>\n"); }
java
{ "resource": "" }
q160957
Statistics.addSourceFile
train
public boolean addSourceFile(FileSystem fs, PolicyInfo info, FileStatus src, RaidState.Checker checker, long now, int targetReplication) throws IOException { RaidState state = checker.check(info, src, now, false); Counters counters = stateToSourceCounters.get(state); counters.inc(src); if (state == RaidState.RAIDED) { incRaided(src); long paritySize = computeParitySize(src, targetReplication); estimatedParitySize += paritySize; estimatedDoneParitySize += paritySize; estimatedDoneSourceSize += src.getLen() * targetReplication; return false; } if (state == RaidState.NOT_RAIDED_BUT_SHOULD) { estimatedDoneParitySize += computeParitySize(src, targetReplication); estimatedDoneSourceSize += src.getLen() * targetReplication; return true; } return false; }
java
{ "resource": "" }
q160958
Statistics.getSaving
train
public long getSaving(Configuration conf) { try { DFSClient dfs = ((DistributedFileSystem)FileSystem.get(conf)).getClient(); Counters raidedCounters = stateToSourceCounters.get(RaidState.RAIDED); long physical = raidedCounters.getNumBytes() + parityCounters.getNumBytes(); long logical = raidedCounters.getNumLogical(); return logical * dfs.getDefaultReplication() - physical; } catch (Exception e) { return -1; } }
java
{ "resource": "" }
q160959
Statistics.getDoneSaving
train
public long getDoneSaving(Configuration conf) { try { DFSClient dfs = ((DistributedFileSystem)FileSystem.get(conf)).getClient(); Counters raidedCounters = stateToSourceCounters.get(RaidState.RAIDED); Counters shouldRaidCounters = stateToSourceCounters.get(RaidState.NOT_RAIDED_BUT_SHOULD); long physical = estimatedDoneSourceSize + estimatedDoneParitySize; long logical = raidedCounters.getNumLogical() + shouldRaidCounters.getNumLogical(); return logical * dfs.getDefaultReplication() - physical; } catch (Exception e) { return -1; } }
java
{ "resource": "" }
q160960
Configuration.removeDefaultResource
train
public static synchronized void removeDefaultResource(String name) { if(defaultResources.contains(name)) { defaultResources.remove(name); for(Configuration conf : REGISTRY.keySet()) { if(conf.loadDefaults) { conf.reloadConfiguration(); } } } }
java
{ "resource": "" }
q160961
Configuration.loadEntireJsonObject
train
private void loadEntireJsonObject(JSONObject json) throws JSONException { Iterator<?> it = json.keys(); while (it.hasNext()) { // This key is something like core-site.xml or hdfs-site.xml Object obj = it.next(); if (!(obj instanceof String)) { LOG.warn("Object not instance of string : " + obj + " skipping"); continue; } String key = (String) obj; JSONObject partition = json.getJSONObject(key); loadJsonResource(partition, properties, key); } }
java
{ "resource": "" }
q160962
Configuration.xmlToThrift
train
public String xmlToThrift(String name) { name = name.replace("-custom.xml", ""); name = name.replace(".xml", ""); name = name.replace("-", "_"); return name; }
java
{ "resource": "" }
q160963
Configuration.instantiateJsonObject
train
public JSONObject instantiateJsonObject(InputStream in) throws IOException, JSONException { BufferedReader reader = new BufferedReader(new InputStreamReader(in)); StringBuffer contents = new StringBuffer(); String text = null; while ((text = reader.readLine()) != null) { contents.append(text).append(System.getProperty("line.separator")); } in.close(); JSONObject json = new JSONObject(contents.toString()); return json; }
java
{ "resource": "" }
q160964
Configuration.getJsonConfig
train
public JSONObject getJsonConfig(String name) throws IOException, JSONException { if (name.endsWith(".xml")) { URL url = getResource(MATERIALIZEDJSON); if (url != null) { InputStream in = url.openStream(); if (in != null) { JSONObject json = instantiateJsonObject(in); if (json.has(xmlToThrift(name))) { return json.getJSONObject(xmlToThrift(name)); } } } } return null; }
java
{ "resource": "" }
q160965
Configuration.getJsonConfig
train
public JSONObject getJsonConfig(Path name) throws IOException, JSONException { String pathString = name.toUri().getPath(); String xml = new Path(pathString).getName(); File jsonFile = new File(pathString.replace(xml, MATERIALIZEDJSON)) .getAbsoluteFile(); if (jsonFile.exists()) { InputStream in = new BufferedInputStream(new FileInputStream(jsonFile)); if (in != null) { JSONObject json = instantiateJsonObject(in); // Try to load the xml entity inside the json blob. if (json.has(xmlToThrift(xml))) { return json.getJSONObject(xmlToThrift(xml)); } } } return null; }
java
{ "resource": "" }
q160966
Configuration.convertFile
train
private Object convertFile(Object name) throws IOException, JSONException{ if (name instanceof String) { String file = (String) name; JSONObject json = getJsonConfig(file); if (json!=null) { return json; } } else if (name instanceof Path) { Path file = (Path)name; JSONObject json = getJsonConfig(file); if (json != null) { return json; } } return name; }
java
{ "resource": "" }
q160967
Configuration.loadJsonResource
train
private void loadJsonResource(JSONObject json, Properties properties, Object name) throws JSONException { Iterator<?> keys = json.keys(); while (keys.hasNext()) { Object obj = keys.next(); if (!(obj instanceof String)) { LOG.warn("Object not instance of string : " + obj + " skipping"); continue; } String key = (String) obj; // can't have . in thrift fields so we represent . with _ String keyUnderscoresToDots = key.replace("_", "."); // actual _ are represented as __ in thrift schema keyUnderscoresToDots = keyUnderscoresToDots.replace("..", "_"); if (!json.isNull(key)) { Object value = json.get(key); String stringVal = ""; if (value instanceof String) { stringVal = (String)value; } else if (value instanceof Integer) { stringVal = new Integer((Integer)value).toString(); } else if (value instanceof Long) { stringVal = new Long((Long)value).toString(); } else if (value instanceof Double) { stringVal = new Double((Double)value).toString(); } else if (value instanceof Boolean) { stringVal = new Boolean((Boolean)value).toString(); } else if (value instanceof JSONObject) { loadJsonResource((JSONObject)value, properties, name); continue; } else { LOG.warn("unsupported value in json object: " + value); } if (!finalParameters.contains(keyUnderscoresToDots)) { properties.setProperty(keyUnderscoresToDots, stringVal); updatingResource.put(keyUnderscoresToDots, name.toString()); } else { LOG.warn(name+":a attempt to override final parameter: "+ keyUnderscoresToDots+"; Ignoring."); } } } }
java
{ "resource": "" }
q160968
DataSegmentWriter.writeTo
train
void writeTo(DataOutputStream out) throws IOException { // We do the UTF8 conversion ourselves instead of relying on DataOutput // to ensure we strictly follow UTF-8 standard, as well as better performance, // and save the code to count the UTF-8 bytes (we need that to calculate // the total length. int length = size() - 4; out.writeInt(length); out.writeShort(codecNameUTF8.length); out.write(codecNameUTF8); if (codecNameUTF8.length == 0) { out.writeLong(crc32Value); } out.write(storedData.getData(), 0, storedData.getLength()); }
java
{ "resource": "" }
q160969
DataSegmentWriter.getCodecNameUTF8
train
static byte[] getCodecNameUTF8(String compressionCodecName) { byte[] codecNameBytes = CODEC_NAME_CACHE.get(compressionCodecName); if (codecNameBytes == null) { try { codecNameBytes = compressionCodecName.getBytes("UTF-8"); } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); } CODEC_NAME_CACHE.put(compressionCodecName, codecNameBytes); } return codecNameBytes; }
java
{ "resource": "" }
q160970
BlockSender.sendChunks
train
private int sendChunks(ByteBuffer pkt, int maxChunks, OutputStream out) throws IOException { // Sends multiple chunks in one packet with a single write(). int len = (int) Math.min(endOffset - offset, (((long) bytesPerChecksum) * ((long) maxChunks))); // truncate len so that any partial chunks will be sent as a final packet. // this is not necessary for correctness, but partial chunks are // ones that may be recomputed and sent via buffer copy, so try to minimize // those bytes if (len > bytesPerChecksum && len % bytesPerChecksum != 0) { len -= len % bytesPerChecksum; } if (len == 0) { return 0; } int numChunks = (len + bytesPerChecksum - 1)/bytesPerChecksum; int packetLen = len + numChunks*checksumSize + 4; pkt.clear(); // The packet format is documented in DFSOuputStream.Packet.getBuffer(). // Here we need to use the exact packet format since it can be received // by both of DFSClient, or BlockReceiver in the case of replication, which // uses the same piece of codes as receiving data from DFSOutputStream. // // write packet header pkt.putInt(packetLen); if (pktIncludeVersion) { pkt.putInt(packetVersion); } pkt.putLong(offset); pkt.putLong(seqno); pkt.put((byte)((offset + len >= endOffset) ? 1 : 0)); //why no ByteBuf.putBoolean()? pkt.putInt(len); int checksumOff = pkt.position(); byte[] buf = pkt.array(); blockReader.sendChunks(out, buf, offset, checksumOff, numChunks, len, crcUpdater, packetVersion); if (throttler != null) { // rebalancing so throttle throttler.throttle(packetLen); } return len; }
java
{ "resource": "" }
q160971
InitialReportWorker.insertCurrentStoredBlockIntoList
train
private void insertCurrentStoredBlockIntoList() { // index < 0 - block is already in the DN's list if (currentStoredBlock == null || currentStoredBlockIndex < 0) return; if (head == null) { // local list is empty, // make head and tail point to the input block head = currentStoredBlock; headIndex = currentStoredBlockIndex; tail = currentStoredBlock; tailIndex = currentStoredBlockIndex; // for sanity, make sure the block is not pointing to anything head.setNext(currentStoredBlockIndex, null); head.setPrevious(currentStoredBlockIndex, null); } else { // connect input block with current head head.setPrevious(headIndex, currentStoredBlock); currentStoredBlock.setNext(currentStoredBlockIndex, head); // stored block is the new head head = currentStoredBlock; headIndex = currentStoredBlockIndex; } // increment number of blocks in the local list addedBlocks++; // clear the current stored block information resetCurrentStoredBlock(); }
java
{ "resource": "" }
q160972
InitialReportWorker.processReport
train
static void processReport(FSNamesystem namesystem, Collection<Block> toRetry, BlockListAsLongs newReport, DatanodeDescriptor node, ExecutorService initialBlockReportExecutor) throws IOException { // spawn one thread for blocksPerShardBR blocks int numShards = Math .min( namesystem.parallelProcessingThreads, ((newReport.getNumberOfBlocks() + namesystem.parallelBRblocksPerShard - 1) / namesystem.parallelBRblocksPerShard)); List<Future<List<Block>>> workers = new ArrayList<Future<List<Block>>>( numShards); // submit tasks for execution for (int i = 0; i < numShards; i++) { workers.add(initialBlockReportExecutor.submit(new InitialReportWorker( newReport, i, numShards, node, namesystem.getNameNode() .shouldRetryAbsentBlocks(), namesystem))); } // get results and add to retry list if need try { for (Future<List<Block>> worker : workers) { if (namesystem.getNameNode().shouldRetryAbsentBlocks()) { toRetry.addAll(worker.get()); } else { worker.get(); } } } catch (ExecutionException e) { LOG.warn("Parallel report failed", e); throw new IOException(e); } catch (InterruptedException e) { throw new IOException("Interruption", e); } }
java
{ "resource": "" }
q160973
LeaseRenewal.computeRenewalPeriod
train
private long computeRenewalPeriod() { long hardLeaseLimit = conf.getLong( FSConstants.DFS_HARD_LEASE_KEY, FSConstants.LEASE_HARDLIMIT_PERIOD); long softLeaseLimit = conf.getLong( FSConstants.DFS_SOFT_LEASE_KEY, FSConstants.LEASE_SOFTLIMIT_PERIOD); long renewal = Math.min(hardLeaseLimit, softLeaseLimit) / 2; long hdfsTimeout = Client.getTimeout(conf); if (hdfsTimeout > 0) { renewal = Math.min(renewal, hdfsTimeout/2); } return renewal; }
java
{ "resource": "" }
q160974
InetSocketAddressFactory.createWithResolveRetry
train
public static InetSocketAddress createWithResolveRetry( String hostname, int port ) { return createWithResolveRetry( hostname, port, DEFAULT_DELAY_MILLIS, DEFAULT_MAX_ATTEMPTS ); }
java
{ "resource": "" }
q160975
InetSocketAddressFactory.createWithResolveRetry
train
public static InetSocketAddress createWithResolveRetry( String hostname, int port, int delayMillis, int maxAttempt ) { InetSocketAddress socketAddress; int attempts = 0; do { socketAddress = new InetSocketAddress(hostname, port); // if dns failed, try one more time if (socketAddress.isUnresolved()) { attempts++; LOG.info(String.format( "failed to resolve host %s, attempt %d", hostname, attempts )); try { Thread.sleep(delayMillis); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } else if (attempts > 0) { LOG.info( String.format("successful resolution on attempt %d", attempts) ); } } while (socketAddress.isUnresolved() && attempts < maxAttempt); return socketAddress; }
java
{ "resource": "" }
q160976
StructureGenerator.run
train
public int run(String[] args) throws Exception { int exitCode = 0; exitCode = init(args); if (exitCode != 0) { return exitCode; } genDirStructure(); output(new File(outDir, DIR_STRUCTURE_FILE_NAME)); genFileStructure(); outputFiles(new File(outDir, FILE_STRUCTURE_FILE_NAME)); return exitCode; }
java
{ "resource": "" }
q160977
StructureGenerator.getLeaves
train
private List<INode> getLeaves() { List<INode> leaveDirs = new ArrayList<INode>(); root.getLeaves(leaveDirs); return leaveDirs; }
java
{ "resource": "" }
q160978
StructureGenerator.genFileStructure
train
private void genFileStructure() { List<INode> leaves = getLeaves(); int totalLeaves = leaves.size(); for (int i=0; i<numOfFiles; i++) { int leaveNum = r.nextInt(totalLeaves); double fileSize; do { fileSize = r.nextGaussian()+avgFileSize; } while (fileSize<0); leaves.get(leaveNum).addChild( new FileINode(FILE_NAME_PREFIX+i, fileSize)); } }
java
{ "resource": "" }
q160979
StructureGenerator.output
train
private void output(File outFile) throws FileNotFoundException { System.out.println("Printing to " + outFile.toString()); PrintStream out = new PrintStream(outFile); root.output(out, null); out.close(); }
java
{ "resource": "" }
q160980
NotAdmittedJobInfo.getReasoning
train
public static String getReasoning(final BlockedAdmissionReason reason, final int reasonLimit, final int reasonActualValue, final int hardAdmissionPosition, JobAdmissionWaitInfo jobAdmissionWaitInfo) { if (reason == BlockedAdmissionReason.HARD_CLUSTER_WIDE_MAX_TASKS_EXCEEDED) { if (jobAdmissionWaitInfo == null) { return reason.toString() + "."; } else { StringBuffer sb = new StringBuffer(); sb.append(reason.toString() + ". In order to protect the jobtracker " + "from exceeding hard memory limits based on the number of " + "total tracked tasks, the cluster is now in cluster-wide " + "hard admission control and accepts jobs on a first come, " + "first served (FIFO) basis. Your job will be admitted " + "according to this policy."); if (jobAdmissionWaitInfo.getAverageCount() > 0) { sb.append(" The past " + jobAdmissionWaitInfo.getAverageCount() + " jobs admitted while in hard admission control were " + "added in an average of " + jobAdmissionWaitInfo.getAverageWaitMsecsPerHardAdmissionJob() + " msecs, giving this job a rough estimated wait time of " + (jobAdmissionWaitInfo.getAverageWaitMsecsPerHardAdmissionJob() * (hardAdmissionPosition + 1)) + " msecs."); } return sb.toString(); } } else if (reason == BlockedAdmissionReason.SOFT_CLUSTER_WIDE_MAX_TASKS_EXCEEEDED) { return reason.toString() + "."; } else { return reason.toString() + " " + reasonActualValue + " exceeds " + reasonLimit + "."; } }
java
{ "resource": "" }
q160981
PipesPartitioner.getPartition
train
public int getPartition(K key, V value, int numPartitions) { Integer result = cache.get(); if (result == null) { return part.getPartition(key, value, numPartitions); } else { return result; } }
java
{ "resource": "" }
q160982
BlockReconstructor.isParityFile
train
boolean isParityFile(Path p, Codec c) { return isParityFile(p.toUri().getPath(), c); }
java
{ "resource": "" }
q160983
BlockReconstructor.reconstructFile
train
boolean reconstructFile(Path srcPath, Context context) throws IOException, InterruptedException { Progressable progress = context; if (progress == null) { progress = RaidUtils.NULL_PROGRESSABLE; } FileSystem fs = srcPath.getFileSystem(getConf()); FileStatus srcStat = null; try { srcStat = fs.getFileStatus(srcPath); } catch (FileNotFoundException ex) { return false; } if (RaidNode.isParityHarPartFile(srcPath)) { return processParityHarPartFile(srcPath, progress); } // Reconstruct parity file for (Codec codec : Codec.getCodecs()) { if (isParityFile(srcPath, codec)) { Decoder decoder = new Decoder(getConf(), codec); decoder.connectToStore(srcPath); return processParityFile(srcPath, decoder, context); } } // Reconstruct source file without connecting to stripe store for (Codec codec : Codec.getCodecs()) { ParityFilePair ppair = ParityFilePair.getParityFile( codec, srcStat, getConf()); if (ppair != null) { Decoder decoder = new Decoder(getConf(), codec); decoder.connectToStore(srcPath); return processFile(srcPath, ppair, decoder, false, context); } } // Reconstruct source file through stripe store for (Codec codec : Codec.getCodecs()) { if (!codec.isDirRaid) { continue; } try { // try to fix through the stripe store. Decoder decoder = new Decoder(getConf(), codec); decoder.connectToStore(srcPath); if (processFile(srcPath, null, decoder, true, context)) { return true; } } catch (Exception ex) { LogUtils.logRaidReconstructionMetrics(LOGRESULTS.FAILURE, 0, codec, srcPath, -1, LOGTYPES.OFFLINE_RECONSTRUCTION_USE_STRIPE, fs, ex, context); } } return false; }
java
{ "resource": "" }
q160984
BlockReconstructor.sortLostFiles
train
void sortLostFiles(List<String> files) { // TODO: We should first fix the files that lose more blocks Comparator<String> comp = new Comparator<String>() { public int compare(String p1, String p2) { Codec c1 = null; Codec c2 = null; for (Codec codec : Codec.getCodecs()) { if (isParityFile(p1, codec)) { c1 = codec; } else if (isParityFile(p2, codec)) { c2 = codec; } } if (c1 == null && c2 == null) { return 0; // both are source files } if (c1 == null && c2 != null) { return -1; // only p1 is a source file } if (c2 == null && c1 != null) { return 1; // only p2 is a source file } return c2.priority - c1.priority; // descending order } }; Collections.sort(files, comp); }
java
{ "resource": "" }
q160985
BlockReconstructor.getDFS
train
protected DistributedFileSystem getDFS(Path p) throws IOException { FileSystem fs = p.getFileSystem(getConf()); DistributedFileSystem dfs = null; if (fs instanceof DistributedFileSystem) { dfs = (DistributedFileSystem) fs; } else if (fs instanceof FilterFileSystem) { FilterFileSystem ffs = (FilterFileSystem) fs; if (ffs.getRawFileSystem() instanceof DistributedFileSystem) { dfs = (DistributedFileSystem) ffs.getRawFileSystem(); } } return dfs; }
java
{ "resource": "" }
q160986
BlockReconstructor.checkLostBlocks
train
void checkLostBlocks(List<Block> blocksLostChecksum, List<Block> blocksLostStripe, Path p, Codec codec) throws IOException { StringBuilder message = new StringBuilder(); if (blocksLostChecksum.size() > 0) { message.append("Lost " + blocksLostChecksum.size() + " checksums in blocks:"); for (Block blk : blocksLostChecksum) { message.append(" "); message.append(blk.toString()); } } if (blocksLostStripe.size() > 0) { message.append("Lost " + blocksLostStripe.size() + " stripes in blocks:"); for (Block blk : blocksLostStripe) { message.append(" "); message.append(blk.toString()); } } if (message.length() == 0) return; message.append(" in file " + p); throw new IOException(message.toString()); }
java
{ "resource": "" }
q160987
BlockReconstructor.processParityHarPartFile
train
boolean processParityHarPartFile(Path partFile, Progressable progress) throws IOException { LOG.info("Processing parity HAR file " + partFile); // Get some basic information. DistributedFileSystem dfs = getDFS(partFile); FileStatus partFileStat = dfs.getFileStatus(partFile); long partFileBlockSize = partFileStat.getBlockSize(); LOG.info(partFile + " has block size " + partFileBlockSize); // Find the path to the index file. // Parity file HARs are only one level deep, so the index files is at the // same level as the part file. // Parses through the HAR index file. HarIndex harIndex = HarIndex.getHarIndex(dfs, partFile); String uriPath = partFile.toUri().getPath(); int numBlocksReconstructed = 0; List<LocatedBlockWithMetaInfo> lostBlocks = lostBlocksInFile(dfs, uriPath, partFileStat); if (lostBlocks.size() == 0) { LOG.warn("Couldn't find any lost blocks in HAR file " + partFile + ", ignoring..."); return false; } for (LocatedBlockWithMetaInfo lb: lostBlocks) { Block lostBlock = lb.getBlock(); long lostBlockOffset = lb.getStartOffset(); File localBlockFile = File.createTempFile(lostBlock.getBlockName(), ".tmp"); localBlockFile.deleteOnExit(); try { processParityHarPartBlock(dfs, partFile, lostBlockOffset, partFileStat, harIndex, localBlockFile, progress); // Now that we have recovered the part file block locally, send it. computeMetadataAndSendReconstructedBlock(localBlockFile, lostBlock, localBlockFile.length(), lb.getLocations(), lb.getDataProtocolVersion(), lb.getNamespaceID(), progress); numBlocksReconstructed++; } finally { localBlockFile.delete(); } progress.progress(); } LOG.info("Reconstructed " + numBlocksReconstructed + " blocks in " + partFile); return true; }
java
{ "resource": "" }
q160988
BlockReconstructor.processParityHarPartBlock
train
private void processParityHarPartBlock(FileSystem dfs, Path partFile, long blockOffset, FileStatus partFileStat, HarIndex harIndex, File localBlockFile, Progressable progress) throws IOException { String partName = partFile.toUri().getPath(); // Temporarily. partName = partName.substring(1 + partName.lastIndexOf(Path.SEPARATOR)); OutputStream out = new FileOutputStream(localBlockFile); try { // A HAR part file block could map to several parity files. We need to // use all of them to recover this block. final long blockEnd = Math.min(blockOffset + partFileStat.getBlockSize(), partFileStat.getLen()); for (long offset = blockOffset; offset < blockEnd; ) { HarIndex.IndexEntry entry = harIndex.findEntry(partName, offset); if (entry == null) { String msg = "Lost index file has no matching index entry for " + partName + ":" + offset; LOG.warn(msg); throw new IOException(msg); } Path parityFile = new Path(entry.fileName); Encoder encoder = null; for (Codec codec : Codec.getCodecs()) { if (isParityFile(parityFile, codec)) { encoder = new Encoder(getConf(), codec); } } if (encoder == null) { String msg = "Could not figure out codec correctly for " + parityFile; LOG.warn(msg); throw new IOException(msg); } Path srcFile = RaidUtils.sourcePathFromParityPath(parityFile, dfs); if (null == srcFile) { String msg = "Can not find the source path for parity file: " + parityFile; LOG.warn(msg); throw new IOException(msg); } FileStatus srcStat = dfs.getFileStatus(srcFile); if (srcStat.getModificationTime() != entry.mtime) { String msg = "Modification times of " + parityFile + " and " + srcFile + " do not match."; LOG.warn(msg); throw new IOException(msg); } long lostOffsetInParity = offset - entry.startOffset; LOG.info(partFile + ":" + offset + " maps to " + parityFile + ":" + lostOffsetInParity + " and will be recovered from " + srcFile); encoder.recoverParityBlockToStream(dfs, srcStat, srcStat.getBlockSize(), parityFile, lostOffsetInParity, out, progress); // Finished recovery of one parity block. Since a parity block has the // same size as a source block, we can move offset by source block // size. offset += srcStat.getBlockSize(); LOG.info("Recovered " + srcStat.getBlockSize() + " part file bytes "); if (offset > blockEnd) { String msg = "Recovered block spills across part file blocks. Cannot continue"; throw new IOException(msg); } progress.progress(); } } finally { out.close(); } }
java
{ "resource": "" }
q160989
BlockReconstructor.computeMetadata
train
DataInputStream computeMetadata(Configuration conf, InputStream dataStream) throws IOException { ByteArrayOutputStream mdOutBase = new ByteArrayOutputStream(1024*1024); DataOutputStream mdOut = new DataOutputStream(mdOutBase); // First, write out the version. mdOut.writeShort(FSDataset.FORMAT_VERSION_NON_INLINECHECKSUM); // Create a summer and write out its header. int bytesPerChecksum = conf.getInt("io.bytes.per.checksum", 512); DataChecksum sum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, bytesPerChecksum); sum.writeHeader(mdOut); // Buffer to read in a chunk of data. byte[] buf = new byte[bytesPerChecksum]; // Buffer to store the checksum bytes. byte[] chk = new byte[sum.getChecksumSize()]; // Read data till we reach the end of the input stream. int bytesSinceFlush = 0; while (true) { // Read some bytes. int bytesRead = dataStream.read(buf, bytesSinceFlush, bytesPerChecksum - bytesSinceFlush); if (bytesRead == -1) { if (bytesSinceFlush > 0) { boolean reset = true; sum.writeValue(chk, 0, reset); // This also resets the sum. // Write the checksum to the stream. mdOut.write(chk, 0, chk.length); bytesSinceFlush = 0; } break; } // Update the checksum. sum.update(buf, bytesSinceFlush, bytesRead); bytesSinceFlush += bytesRead; // Flush the checksum if necessary. if (bytesSinceFlush == bytesPerChecksum) { boolean reset = true; sum.writeValue(chk, 0, reset); // This also resets the sum. // Write the checksum to the stream. mdOut.write(chk, 0, chk.length); bytesSinceFlush = 0; } } byte[] mdBytes = mdOutBase.toByteArray(); return new DataInputStream(new ByteArrayInputStream(mdBytes)); }
java
{ "resource": "" }
q160990
BlockReconstructor.sendReconstructedBlock
train
private void sendReconstructedBlock(String datanode, final FileInputStream blockContents, final DataInputStream metadataIn, Block block, long blockSize, int dataTransferVersion, int namespaceId, Progressable progress) throws IOException { InetSocketAddress target = NetUtils.createSocketAddr(datanode); Socket sock = SocketChannel.open().socket(); int readTimeout = getConf().getInt(BlockIntegrityMonitor.BLOCKFIX_READ_TIMEOUT, HdfsConstants.READ_TIMEOUT); NetUtils.connect(sock, target, readTimeout); sock.setSoTimeout(readTimeout); int writeTimeout = getConf().getInt(BlockIntegrityMonitor.BLOCKFIX_WRITE_TIMEOUT, HdfsConstants.WRITE_TIMEOUT); OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout); DataOutputStream out = new DataOutputStream(new BufferedOutputStream(baseStream, FSConstants. SMALL_BUFFER_SIZE)); boolean corruptChecksumOk = false; boolean chunkOffsetOK = false; boolean verifyChecksum = true; boolean transferToAllowed = false; try { LOG.info("Sending block " + block + " from " + sock.getLocalSocketAddress().toString() + " to " + sock.getRemoteSocketAddress().toString()); BlockSender blockSender = new BlockSender(namespaceId, block, blockSize, 0, blockSize, corruptChecksumOk, chunkOffsetOK, verifyChecksum, transferToAllowed, dataTransferVersion >= DataTransferProtocol.PACKET_INCLUDE_VERSION_VERSION, new BlockWithChecksumFileReader.InputStreamWithChecksumFactory() { @Override public InputStream createStream(long offset) throws IOException { // we are passing 0 as the offset above, // so we can safely ignore // the offset passed return blockContents; } @Override public DataInputStream getChecksumStream() throws IOException { return metadataIn; } @Override public BlockDataFile.Reader getBlockDataFileReader() throws IOException { return BlockDataFile.getDummyDataFileFromFileChannel( blockContents.getChannel()).getReader(null); } }); WriteBlockHeader header = new WriteBlockHeader(new VersionAndOpcode( dataTransferVersion, DataTransferProtocol.OP_WRITE_BLOCK)); header.set(namespaceId, block.getBlockId(), block.getGenerationStamp(), 0, false, true, new DatanodeInfo(), 0, null, ""); header.writeVersionAndOpCode(out); header.write(out); blockSender.sendBlock(out, baseStream, null, progress); LOG.info("Sent block " + block + " to " + datanode); } finally { sock.close(); out.close(); } }
java
{ "resource": "" }
q160991
UserGroupInformation.login
train
public static UserGroupInformation login(Configuration conf ) throws LoginException { if (LOGIN_UGI == null) { LOGIN_UGI = UnixUserGroupInformation.login(conf); } return LOGIN_UGI; }
java
{ "resource": "" }
q160992
UserGroupInformation.getUGI
train
public static UserGroupInformation getUGI(Configuration conf) throws LoginException { UserGroupInformation ugi = null; if (conf.getBoolean(UGI_SOURCE, true)) { // get the ugi from configuration ugi = UnixUserGroupInformation.readFromConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME); } else { // get the ugi from Subject ugi = UserGroupInformation.getCurrentUGI(); } // get the ugi from unix if (ugi == null) { ugi = UnixUserGroupInformation.login(); UnixUserGroupInformation.saveToConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME, (UnixUserGroupInformation)ugi); } return ugi; }
java
{ "resource": "" }
q160993
UserGroupInformation.doAs
train
public <T> T doAs(PrivilegedAction<T> action) { return Subject.doAs(null, action); }
java
{ "resource": "" }
q160994
UserGroupInformation.doAs
train
public <T> T doAs(PrivilegedExceptionAction<T> action ) throws IOException, InterruptedException { try { return Subject.doAs(null, action); } catch (PrivilegedActionException pae) { Throwable cause = pae.getCause(); LOG.error("PriviledgedActionException as:"+this+" cause:"+cause); if (cause instanceof IOException) { throw (IOException) cause; } else if (cause instanceof Error) { throw (Error) cause; } else if (cause instanceof RuntimeException) { throw (RuntimeException) cause; } else if (cause instanceof InterruptedException) { throw (InterruptedException) cause; } else { throw new UndeclaredThrowableException(pae,"Unknown exception in doAs"); } } }
java
{ "resource": "" }
q160995
ValueAggregatorReducer.map
train
public void map(K1 arg0, V1 arg1, OutputCollector<Text, Text> arg2, Reporter arg3) throws IOException { throw new IOException ("should not be called\n"); }
java
{ "resource": "" }
q160996
RaidUtils.getFileRaidInfo
train
public static RaidInfo getFileRaidInfo(final FileStatus stat, Configuration conf, boolean skipHarChecking) throws IOException { // now look for the parity file ParityFilePair ppair = null; for (Codec c : Codec.getCodecs()) { ppair = ParityFilePair.getParityFile(c, stat, conf, skipHarChecking); if (ppair != null) { return new RaidInfo(c, ppair, c.parityLength); } } return new RaidInfo(null, ppair, 0); }
java
{ "resource": "" }
q160997
RaidUtils.getParityBlocks
train
private static BlockLocation[] getParityBlocks(final Path filePath, final long blockSize, final long numStripes, final RaidInfo raidInfo) throws IOException { FileSystem parityFS = raidInfo.parityPair.getFileSystem(); // get parity file metadata FileStatus parityFileStatus = raidInfo.parityPair.getFileStatus(); long parityFileLength = parityFileStatus.getLen(); if (parityFileLength != numStripes * raidInfo.parityBlocksPerStripe * blockSize) { throw new IOException("expected parity file of length" + (numStripes * raidInfo.parityBlocksPerStripe * blockSize) + " but got parity file of length " + parityFileLength); } BlockLocation[] parityBlocks = parityFS.getFileBlockLocations(parityFileStatus, 0L, parityFileLength); if (parityFS instanceof DistributedFileSystem || parityFS instanceof DistributedRaidFileSystem) { long parityBlockSize = parityFileStatus.getBlockSize(); if (parityBlockSize != blockSize) { throw new IOException("file block size is " + blockSize + " but parity file block size is " + parityBlockSize); } } else if (parityFS instanceof HarFileSystem) { LOG.debug("HAR FS found"); } else { LOG.warn("parity file system is not of a supported type"); } return parityBlocks; }
java
{ "resource": "" }
q160998
RaidUtils.checkParityBlocks
train
private static void checkParityBlocks(final Path filePath, final Map<Integer, Integer> corruptBlocksPerStripe, final long blockSize, final long startStripeIdx, final long endStripeIdx, final long numStripes, final RaidInfo raidInfo) throws IOException { // get the blocks of the parity file // because of har, multiple blocks may be returned as one container block BlockLocation[] containerBlocks = getParityBlocks(filePath, blockSize, numStripes, raidInfo); long parityStripeLength = blockSize * ((long) raidInfo.parityBlocksPerStripe); long parityBlocksFound = 0L; for (BlockLocation cb: containerBlocks) { if (cb.getLength() % blockSize != 0) { throw new IOException("container block size is not " + "multiple of parity block size"); } if (LOG.isDebugEnabled()) { LOG.debug("found container with offset " + cb.getOffset() + ", length " + cb.getLength()); } for (long offset = cb.getOffset(); offset < cb.getOffset() + cb.getLength(); offset += blockSize) { long block = offset / blockSize; int stripe = (int) (offset / parityStripeLength); if (stripe < 0) { // before the beginning of the parity file continue; } if (stripe >= numStripes) { // past the end of the parity file break; } parityBlocksFound++; if (stripe < startStripeIdx || stripe >= endStripeIdx) { continue; } if (isBlockCorrupt(cb)) { if (LOG.isDebugEnabled()) { LOG.debug("parity file for " + filePath.toString() + " corrupt in block " + block + ", stripe " + stripe + "/" + numStripes); } incCorruptBlocksPerStripe(corruptBlocksPerStripe, stripe); } else { if (LOG.isDebugEnabled()) { LOG.debug("parity file for " + filePath.toString() + " OK in block " + block + ", stripe " + stripe + "/" + numStripes); } } } } long parityBlocksExpected = raidInfo.parityBlocksPerStripe * numStripes; if (parityBlocksFound != parityBlocksExpected ) { throw new IOException("expected " + parityBlocksExpected + " parity blocks but got " + parityBlocksFound); } }
java
{ "resource": "" }
q160999
RaidUtils.sourcePathFromParityPath
train
public static Path sourcePathFromParityPath(Path parityPath, FileSystem fs) throws IOException { String parityPathStr = parityPath.toUri().getPath(); for (Codec codec : Codec.getCodecs()) { String prefix = codec.getParityPrefix(); if (parityPathStr.startsWith(prefix)) { // Remove the prefix to get the source file. String src = parityPathStr.replaceFirst(prefix, Path.SEPARATOR); Path srcPath = new Path(src); if (fs.exists(srcPath)) { return srcPath; } } } return null; }
java
{ "resource": "" }