_id
stringlengths
2
7
title
stringlengths
3
140
partition
stringclasses
3 values
text
stringlengths
73
34.1k
language
stringclasses
1 value
meta_information
dict
q160200
PoolGroupSchedulable.getPreemptQueue
train
public Queue<PoolSchedulable> getPreemptQueue() { // TODO: For now, we support only one kind of scheduling. // Also note that FAIR is PRIORITY with equal priorities (by default) ScheduleComparator sPreempt = null; if (preemptQueue == null) { ScheduleComparator sc = configManager.getPoolGroupComparator(getName()); if (sc == ScheduleComparator.PRIORITY) { sPreempt = ScheduleComparator.PRIORITY_PREEMPT; } else { throw new IllegalArgumentException("Unknown/misconfigured poolgroup"); } preemptQueue = createPoolQueue(sPreempt); } return preemptQueue; }
java
{ "resource": "" }
q160201
PoolGroupSchedulable.createPoolQueue
train
private Queue<PoolSchedulable> createPoolQueue( ScheduleComparator comparator) { int initCapacity = snapshotPools.size() == 0 ? 1 : snapshotPools.size(); Queue<PoolSchedulable> poolQueue = new PriorityQueue<PoolSchedulable>(initCapacity, comparator); poolQueue.addAll(snapshotPools); return poolQueue; }
java
{ "resource": "" }
q160202
PoolGroupSchedulable.getPool
train
public PoolSchedulable getPool(PoolInfo poolInfo) { PoolSchedulable pool = nameToMap.get(poolInfo); if (pool == null) { pool = new PoolSchedulable(poolInfo, getType(), configManager); PoolSchedulable prevPool = nameToMap.putIfAbsent(poolInfo, pool); if (prevPool != null) { pool = prevPool; } } return pool; }
java
{ "resource": "" }
q160203
JobContext.getCombinerClass
train
@SuppressWarnings("unchecked") public Class<? extends Reducer<?,?,?,?>> getCombinerClass() throws ClassNotFoundException { return (Class<? extends Reducer<?,?,?,?>>) conf.getClass(COMBINE_CLASS_ATTR, null); }
java
{ "resource": "" }
q160204
LongWritable.compareTo
train
public int compareTo(Object o) { long thisValue = this.value; long thatValue = ((LongWritable)o).value; return (thisValue<thatValue ? -1 : (thisValue==thatValue ? 0 : 1)); }
java
{ "resource": "" }
q160205
JSPUtil.processButtons
train
public static void processButtons(HttpServletRequest request, HttpServletResponse response, JobTracker tracker) throws IOException { if (conf.getBoolean(PRIVATE_ACTIONS_KEY, false) && request.getParameter("killJobs") != null) { String[] jobs = request.getParameterValues("jobCheckBox"); if (jobs != null) { for (String job : jobs) { tracker.killJob(JobID.forName(job)); } } } if (conf.getBoolean(PRIVATE_ACTIONS_KEY, false) && request.getParameter("changeJobPriority") != null) { String[] jobs = request.getParameterValues("jobCheckBox"); if (jobs != null) { JobPriority jobPri = JobPriority.valueOf(request .getParameter("setJobPriority")); for (String job : jobs) { tracker.setJobPriority(JobID.forName(job), jobPri); } } } }
java
{ "resource": "" }
q160206
JSPUtil.getJobDetailsHistoryLink
train
public static String getJobDetailsHistoryLink(JobTracker tracker, String jobId) { RetireJobInfo info = tracker.retireJobs.get(JobID.forName(jobId)); String historyFileUrl = getHistoryFileUrl(info); String result = (historyFileUrl == null ? "" : "jobdetailshistory.jsp?jobid=" + jobId + "&logFile=" + historyFileUrl); return result; }
java
{ "resource": "" }
q160207
JSPUtil.getHistoryFileUrl
train
private static String getHistoryFileUrl(RetireJobInfo info) { String historyFile = info.getHistoryFile(); String historyFileUrl = null; if (historyFile != null && !historyFile.equals("")) { try { historyFileUrl = URLEncoder.encode(info.getHistoryFile(), "UTF-8"); } catch (UnsupportedEncodingException e) { LOG.warn("Can't create history url ", e); } } return historyFileUrl; }
java
{ "resource": "" }
q160208
JSPUtil.generateClusterResTable
train
public static String generateClusterResTable(JobTracker tracker) throws IOException { ResourceReporter reporter = tracker.getResourceReporter(); if (reporter == null) { return ""; } StringBuffer sb = new StringBuffer(); sb.append("<table border=\"1\" cellpadding=\"5\" cellspacing=\"0\">\n"); sb.append("<tr>\n"); sb.append("<th colspan=3>CPU</th>\n"); sb.append("<th colspan=3>MEM</th>\n"); sb.append("<th rowspan=2>Reported</th>\n"); sb.append("</tr>\n"); sb.append("<tr>\n"); sb.append("<th>Total</th><th>Used</th><th>%</th>\n"); sb.append("<th>Total</th><th>Used</th><th>%</th>\n"); sb.append("</tr>\n"); sb.append("<tr>\n"); sb.append(String.format( "<td>%.1f GHz</td><td>%.1f GHz</td><td>%.1f%%</td>\n", reporter.getClusterCpuTotalGHz(), reporter.getClusterCpuUsageGHz(), Math.min(reporter.getClusterCpuUsageGHz() / reporter.getClusterCpuTotalGHz() * 100D, 100D))); sb.append(String.format( "<td>%.1f GB</td><td>%.1f GB</td><td>%.1f%%</td><td>%d</td>\n", reporter.getClusterMemTotalGB(), reporter.getClusterMemUsageGB(), reporter.getClusterMemUsageGB() / reporter.getClusterMemTotalGB() * 100D, reporter.getReportedTaskTrackers())); sb.append("</tr>\n"); sb.append("</table>\n"); return sb.toString(); }
java
{ "resource": "" }
q160209
URLImageInputStream.setupInputStream
train
private void setupInputStream() throws IOException { HttpURLConnection connection = (HttpURLConnection) url.openConnection(); // set timeout for connecting and reading connection.setConnectTimeout(httpTimeout); connection.setReadTimeout(httpTimeout); if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) { throw new IOException("Fetch of " + url + " failed with status code " + connection.getResponseCode() + "\nResponse message:\n" + connection.getResponseMessage()); } String contentLength = connection .getHeaderField(TransferFsImage.CONTENT_LENGTH); if (contentLength != null) { // store image size advertisedSize = Long.parseLong(contentLength); if (advertisedSize <= 0) { throw new IOException("Invalid " + TransferFsImage.CONTENT_LENGTH + " header: " + contentLength); } } else { throw new IOException(TransferFsImage.CONTENT_LENGTH + " header is not provided " + "by the server when trying to fetch " + url); } // get the digest digest = TransferFsImage.parseMD5Header(connection); if (digest == null) { // digest must be provided, otherwise the image is not valid throw new IOException("Image digest not provided for url: " + url); } // get the input stream directly from the connection inputStream = connection.getInputStream(); initialized = true; }
java
{ "resource": "" }
q160210
CoronaConf.getCpuToResourcePartitioning
train
public Map<Integer, Map<ResourceType, Integer>> getCpuToResourcePartitioning() { if (cachedCpuToResourcePartitioning == null) { cachedCpuToResourcePartitioning = getUncachedCpuToResourcePartitioning(this); } return cachedCpuToResourcePartitioning; }
java
{ "resource": "" }
q160211
CoronaConf.getPoolInfo
train
public PoolInfo getPoolInfo() { String poolNameProperty = get(IMPLICIT_POOL_PROPERTY, "user.name"); String explicitPool = get(EXPLICIT_POOL_PROPERTY, get(poolNameProperty, "")).trim(); String[] poolInfoSplitString = explicitPool.split("[.]"); if (poolInfoSplitString != null && poolInfoSplitString.length == 2) { return new PoolInfo(poolInfoSplitString[0], poolInfoSplitString[1]); } else if (!explicitPool.isEmpty()) { return new PoolInfo(PoolGroupManager.DEFAULT_POOL_GROUP, explicitPool); } else { return PoolGroupManager.DEFAULT_POOL_INFO; } }
java
{ "resource": "" }
q160212
InterleavedInputStream.readMetaDataIfNeeded
train
public boolean readMetaDataIfNeeded() throws IOException { if (eofReached) { return false; } if (rawBlockOffset == 0) { try { metaDataConsumer.readMetaData(in, metaDataBlockSize); rawBlockOffset += metaDataBlockSize; } catch (EOFException e) { eofReached = true; return false; } } return true; }
java
{ "resource": "" }
q160213
InterleavedInputStream.seekOrSkip
train
private boolean seekOrSkip(long bytes, boolean toNewSource) throws IOException { if (seekableIn != null) { // Use Seekable interface to speed up skip. int available = in.available(); try { if (toNewSource) { return seekableIn.seekToNewSource(seekableIn.getPos() + bytes); } else { seekableIn.seek(seekableIn.getPos() + bytes); return true; } } catch (IOException e) { if (bytes > available && "Cannot seek after EOF".equals(e.getMessage())) { eofReached = true; throw new EOFException(e.getMessage()); } } } else { // Do raw skip. long toSkip = bytes; while (toSkip > 0) { long skipped = in.skip(toSkip); if (skipped <= 0) { throw new EOFException("skip returned " + skipped); } toSkip -= skipped; }; } return true; }
java
{ "resource": "" }
q160214
InterleavedInputStream.rawSkip
train
protected boolean rawSkip(long bytes, boolean toNewSource) throws IOException { boolean result = seekOrSkip(bytes, toNewSource); setRawOffset(getRawOffset() + bytes); // Check validity if (rawBlockOffset > 0 && rawBlockOffset < metaDataBlockSize) { throw new IOException("Cannot jump into the middle of a MetaDataBlock. MetaDataBlockSize = " + metaDataBlockSize + " and we are at " + rawBlockOffset); } return result; }
java
{ "resource": "" }
q160215
Environment.prepare
train
public static void prepare(String fname) { if (!"Linux".equalsIgnoreCase(System.getProperty("os.name"))) { System.err.println("Linux system required for FailMon. Exiting..."); System.exit(0); } System.setProperty("log4j.configuration", "conf/log4j.properties"); PropertyConfigurator.configure("conf/log4j.properties"); LOG = LogFactory.getLog("org.apache.hadoop.contrib.failmon"); logInfo("********** FailMon started ***********"); // read parseState file PersistentState.readState("conf/parsing.state"); try { FileInputStream propFile = new FileInputStream(fname); fmProperties.load(propFile); propFile.close(); } catch (FileNotFoundException e1) { e1.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } ready = true; try { String sudo_prompt = "passwd_needed:"; String echo_txt = "access_ok"; Process p = Runtime.getRuntime().exec("sudo -S -p " + sudo_prompt + " echo " + echo_txt ); InputStream inps = p.getInputStream(); InputStream errs = p.getErrorStream(); while (inps.available() < echo_txt.length() && errs.available() < sudo_prompt.length()) Thread.sleep(100); byte [] buf; String s; if (inps.available() >= echo_txt.length()) { buf = new byte[inps.available()]; inps.read(buf); s = new String(buf); if (s.startsWith(echo_txt)) { superuser = true; logInfo("Superuser privileges found!"); } else { // no need to read errs superuser = false; logInfo("Superuser privileges not found."); } } } catch (IOException e) { e.printStackTrace(); } catch (InterruptedException e) { e.printStackTrace(); } }
java
{ "resource": "" }
q160216
Environment.getInterval
train
public static int getInterval(ArrayList<MonitorJob> monitors) { String tmp = getProperty("executor.interval.min"); if (tmp != null) MIN_INTERVAL = Integer.parseInt(tmp); int[] monIntervals = new int[monitors.size()]; for (int i = 0; i < monitors.size(); i++) monIntervals[i] = monitors.get(i).interval; return Math.max(MIN_INTERVAL, gcd(monIntervals)); }
java
{ "resource": "" }
q160217
Environment.checkExistence
train
public static boolean checkExistence(String cmd) { StringBuffer sb = runCommand("which " + cmd); if (sb.length() > 1) return true; return false; }
java
{ "resource": "" }
q160218
Environment.runCommand
train
public static StringBuffer runCommand(String[] cmd) { StringBuffer retval = new StringBuffer(MAX_OUTPUT_LENGTH); Process p; try { p = Runtime.getRuntime().exec(cmd); InputStream tmp = p.getInputStream(); p.waitFor(); int c; while ((c = tmp.read()) != -1) retval.append((char) c); } catch (IOException e) { e.printStackTrace(); } catch (InterruptedException e) { e.printStackTrace(); } return retval; }
java
{ "resource": "" }
q160219
FileDataServlet.createUri
train
protected URI createUri(FileStatus i, UnixUserGroupInformation ugi, ClientProtocol nnproxy, HttpServletRequest request) throws IOException, URISyntaxException { return createUri(i.getPath().toString(), pickSrcDatanode(i, nnproxy), ugi, request); }
java
{ "resource": "" }
q160220
FileDataServlet.pickSrcDatanode
train
private static DatanodeInfo[] pickSrcDatanode(FileStatus i, ClientProtocol nnproxy) throws IOException { // a race condition can happen by initializing a static member this way. // A proper fix should make JspHelper a singleton. Since it doesn't affect // correctness, we leave it as is for now. if (jspHelper == null) jspHelper = new JspHelper(); final LocatedBlocks blks = nnproxy.getBlockLocations( i.getPath().toUri().getPath(), 0, 1); if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) { // pick a random datanode return new DatanodeInfo[] { jspHelper.randomNode() }; } return jspHelper.bestNode(blks); }
java
{ "resource": "" }
q160221
UTF8.skip
train
public static void skip(DataInput in) throws IOException { int length = in.readUnsignedShort(); WritableUtils.skipFully(in, length); }
java
{ "resource": "" }
q160222
UTF8.compareTo
train
public int compareTo(Object o) { UTF8 that = (UTF8)o; return WritableComparator.compareBytes(bytes, 0, length, that.bytes, 0, that.length); }
java
{ "resource": "" }
q160223
UTF8.getBytes
train
public static byte[] getBytes(String string) { byte[] result = new byte[utf8Length(string)]; try { // avoid sync'd allocations writeChars(result, string, 0, string.length()); } catch (IOException e) { throw new RuntimeException(e); } return result; }
java
{ "resource": "" }
q160224
UTF8.readString
train
public static String readString(DataInput in) throws IOException { int bytes = in.readUnsignedShort(); return readChars(in, bytes); }
java
{ "resource": "" }
q160225
UTF8.writeString
train
public static int writeString(DataOutput out, String s) throws IOException { if (s.length() > 0xffff/3) { // maybe too long LOG.warn("truncating long string: " + s.length() + " chars, starting with " + s.substring(0, 20)); s = s.substring(0, 0xffff/3); } int len = utf8Length(s); if (len > 0xffff) // double-check length throw new IOException("string too long!"); out.writeShort(len); writeChars(out, s, 0, s.length()); return len; }
java
{ "resource": "" }
q160226
UTF8.utf8Length
train
private static int utf8Length(String string) { int stringLength = string.length(); int utf8Length = 0; for (int i = 0; i < stringLength; i++) { int c = string.charAt(i); if (c <= 0x007F) { utf8Length++; } else if (c > 0x07FF) { utf8Length += 3; } else { utf8Length += 2; } } return utf8Length; }
java
{ "resource": "" }
q160227
EditLogFileOutputStream.create
train
@Override public void create() throws IOException { fc.truncate(0); fc.position(0); doubleBuf.getCurrentBuf().writeInt(FSConstants.LAYOUT_VERSION); setReadyToFlush(); flush(); }
java
{ "resource": "" }
q160228
EditLogFileOutputStream.setReadyToFlush
train
@Override public void setReadyToFlush() throws IOException { doubleBuf.getCurrentBuf().write(FSEditLogOpCodes.OP_INVALID.getOpCode()); // insert eof marker doubleBuf.setReadyToFlush(); }
java
{ "resource": "" }
q160229
EditLogFileOutputStream.flushAndSync
train
@Override protected void flushAndSync(boolean durable) throws IOException { if (fp == null) { throw new IOException("Trying to use aborted output stream"); } preallocate(); // preallocate file if necessary if (doubleBuf.isFlushed()) { return; } doubleBuf.flushTo(fp); if (durable) { fc.force(false); // metadata updates not needed } fc.position(fc.position() - 1); // skip back the end-of-file marker }
java
{ "resource": "" }
q160230
EditLogFileOutputStream.preallocate
train
private void preallocate() throws IOException { long position = fc.position(); long triggerSize = Math.max(FSEditLog.preallocateSize / 100, 4096); if (position + triggerSize >= fc.size()) { if(FSNamesystem.LOG.isDebugEnabled()) { FSNamesystem.LOG.debug("Preallocating Edit log, current size " + fc.size()); } fill.position(0); int written = fc.write(fill, position); if(FSNamesystem.LOG.isDebugEnabled()) { FSNamesystem.LOG.debug("Edit log size is now " + fc.size() + " written " + written + " bytes " + " at offset " + position); } } }
java
{ "resource": "" }
q160231
BlockPlacementPolicyHBase.directoryDataNodeUsage
train
private HashMap<DatanodeDescriptor, Integer> directoryDataNodeUsage(INodeDirectory dir, int threshold) { HashMap<DatanodeDescriptor, Integer> dataNodeUsage = new HashMap<DatanodeDescriptor, Integer>(); List<INode> children; nameSystem.readLock(); try { if (dir.getChildrenRaw() == null) { return dataNodeUsage; } children = new ArrayList<INode>(dir.getChildrenRaw()); Collections.shuffle(children); for (INode node : children) { if (!(node instanceof INodeFile)) { // The condition is always false. continue; } INodeFile file = (INodeFile) node; BlockInfo[] blocks = file.getBlocks(); for (BlockInfo block : blocks) { if (threshold == 0) { return dataNodeUsage; } int replication = block.numNodes(); for (int i = 0; i < replication; i++) { DatanodeDescriptor datanode = block.getDatanode(i); Integer currentUsage = dataNodeUsage.get(datanode); dataNodeUsage.put(datanode, currentUsage == null ? 1 : currentUsage + 1); } threshold--; } } } finally { nameSystem.readUnlock(); } return dataNodeUsage; }
java
{ "resource": "" }
q160232
DistTool.checkSource
train
protected static void checkSource(Configuration conf, List<Path> srcs ) throws InvalidInputException { List<IOException> ioes = new ArrayList<IOException>(); for(Path p : srcs) { try { if (!p.getFileSystem(conf).exists(p)) { ioes.add(new FileNotFoundException("Source "+p+" does not exist.")); } } catch(IOException e) {ioes.add(e);} } if (!ioes.isEmpty()) { throw new InvalidInputException(ioes); } }
java
{ "resource": "" }
q160233
UtilizationCollectorCached.connect
train
protected void connect() { LOG.info("Connecting to collector..."); try { conf.setStrings(UnixUserGroupInformation.UGI_PROPERTY_NAME, new String[]{"hadoop", "hadoop"}); rpcCollector = (UtilizationCollectorProtocol) RPC.getProxy(UtilizationCollectorProtocol.class, UtilizationCollectorProtocol.versionID, UtilizationCollector.getAddress(conf), conf); } catch (IOException e) { LOG.error("Cannot connect to UtilizationCollector server. Retry in " + DEFAULT_MIRROR_PERIOD + " milliseconds."); return; } LOG.info("Connection established"); }
java
{ "resource": "" }
q160234
UtilizationCollectorCached.fetchData
train
protected void fetchData() throws IOException { // if not connected to the collector, wait for a while then try connecting if (rpcCollector == null) { try { Thread.sleep(RECONNECT_PERIOD); } catch (InterruptedException e) { // do nothing } connect(); } try { clusterUtil = rpcCollector.getClusterUtilization(); for (JobUtilization job: rpcCollector.getAllRunningJobUtilization()) { allJobUtil.put(job.getJobId(), job); } for (TaskTrackerUtilization tt : rpcCollector.getAllTaskTrackerUtilization()) { allTaskTrackerUtil.put(tt.getHostName(), tt); } } catch (Exception e) { // When the Collector is down, clear the old data clusterUtil = null; allJobUtil.clear(); allTaskTrackerUtil.clear(); LOG.warn("Error obtaining data from Collector."); } }
java
{ "resource": "" }
q160235
CapacitySchedulerConf.getCapacity
train
public float getCapacity(String queue) { //Check done in order to return default capacity which can be negative //In case of both capacity and default capacity not configured. //Last check is if the configuration is specified and is marked as //negative we throw exception String raw = rmConf.getRaw(toFullPropertyName(queue, CAPACITY_PROPERTY)); if(raw == null) { return -1; } float result = rmConf.getFloat( toFullPropertyName(queue, CAPACITY_PROPERTY), -1); if (result < 0.0 || result > 100.0) { throw new IllegalArgumentException( "Illegal capacity for queue " + queue + " of " + result); } return result; }
java
{ "resource": "" }
q160236
CapacitySchedulerConf.setCapacity
train
public void setCapacity(String queue,float capacity) { rmConf.setFloat(toFullPropertyName(queue, CAPACITY_PROPERTY),capacity); }
java
{ "resource": "" }
q160237
CapacitySchedulerConf.getMaxCapacity
train
public float getMaxCapacity(String queue) { float result = rmConf.getFloat( toFullPropertyName(queue, MAX_CAPACITY_PROPERTY), -1); //if result is 0 or less than 0 set it to -1 result = (result <= 0) ? -1 : result; if (result > 100.0) { throw new IllegalArgumentException( "Illegal " + MAX_CAPACITY_PROPERTY + " for queue " + queue + " of " + result); } if ((result != -1) && (result < getCapacity(queue))) { throw new IllegalArgumentException( MAX_CAPACITY_PROPERTY + " " + result + " for a queue should be greater than or equal to capacity "); } return result; }
java
{ "resource": "" }
q160238
CapacitySchedulerConf.setMaxCapacity
train
public void setMaxCapacity(String queue,float maxCapacity) { rmConf.setFloat( toFullPropertyName(queue, MAX_CAPACITY_PROPERTY), maxCapacity); }
java
{ "resource": "" }
q160239
CapacitySchedulerConf.getMinimumUserLimitPercent
train
public int getMinimumUserLimitPercent(String queue) { int userLimit = rmConf.getInt(toFullPropertyName(queue, "minimum-user-limit-percent"), defaultUlimitMinimum); if(userLimit <= 0 || userLimit > 100) { throw new IllegalArgumentException("Invalid user limit : " + userLimit + " for queue : " + queue); } return userLimit; }
java
{ "resource": "" }
q160240
CapacitySchedulerConf.getMaxJobsPerUserToInitialize
train
public int getMaxJobsPerUserToInitialize(String queue) { int maxJobsPerUser = rmConf.getInt(toFullPropertyName(queue, "maximum-initialized-jobs-per-user"), defaultMaxJobsPerUsersToInitialize); if(maxJobsPerUser <= 0) { throw new IllegalArgumentException( "Invalid maximum jobs per user configuration " + maxJobsPerUser); } return maxJobsPerUser; }
java
{ "resource": "" }
q160241
DatanodeDescriptor.addBlock
train
boolean addBlock(BlockInfo b) { int dnIndex = b.addNode(this); if(dnIndex < 0) return false; // add to the head of the data-node list blockList = b.listInsert(blockList, this, dnIndex); numOfBlocks++; return true; }
java
{ "resource": "" }
q160242
DatanodeDescriptor.insertIntoList
train
void insertIntoList(BlockInfo head, int headIndex, BlockInfo tail, int tailIndex, int count) { if (head == null) return; // connect tail to now-head tail.setNext(tailIndex, blockList); if (blockList != null) blockList.setPrevious(blockList.findDatanode(this), tail); // create new head blockList = head; blockList.setPrevious(headIndex, null); // add new blocks to the count numOfBlocks += count; }
java
{ "resource": "" }
q160243
DatanodeDescriptor.removeBlock
train
boolean removeBlock(BlockInfo b) { blockList = b.listRemove(blockList, this); if ( b.removeNode(this) ) { numOfBlocks--; return true; } else { return false; } }
java
{ "resource": "" }
q160244
DatanodeDescriptor.moveBlockToHead
train
void moveBlockToHead(BlockInfo b) { blockList = b.listRemove(blockList, this); blockList = b.listInsert(blockList, this, -1); }
java
{ "resource": "" }
q160245
DatanodeDescriptor.listMoveToHead
train
protected BlockInfo listMoveToHead(BlockInfo block, BlockInfo head, DatanodeIndex indexes) { assert head != null : "Head can not be null"; if (head == block) { return head; } BlockInfo next = block.getSetNext(indexes.currentIndex, head); BlockInfo prev = block.getSetPrevious(indexes.currentIndex, null); head.setPrevious(indexes.headIndex, block); indexes.headIndex = indexes.currentIndex; prev.setNext(prev.findDatanode(this), next); if (next != null) next.setPrevious(next.findDatanode(this), prev); return block; }
java
{ "resource": "" }
q160246
DatanodeDescriptor.addBlockToBeReplicated
train
void addBlockToBeReplicated(Block block, DatanodeDescriptor[] targets) { assert(block != null && targets != null && targets.length > 0); replicateBlocks.offer(block, targets); }
java
{ "resource": "" }
q160247
DatanodeDescriptor.addBlockToBeRecovered
train
void addBlockToBeRecovered(Block block, DatanodeDescriptor[] targets) { assert(block != null && targets != null && targets.length > 0); recoverBlocks.offer(block, targets); }
java
{ "resource": "" }
q160248
DatanodeDescriptor.addBlocksToBeInvalidated
train
void addBlocksToBeInvalidated(List<Block> blocklist) { assert(blocklist != null && blocklist.size() > 0); synchronized (invalidateBlocks) { for(Block blk : blocklist) { invalidateBlocks.add(blk); } } }
java
{ "resource": "" }
q160249
DatanodeDescriptor.getInvalidateBlocks
train
BlockCommand getInvalidateBlocks(int maxblocks) { Block[] deleteList = null; synchronized (invalidateBlocks) { deleteList = invalidateBlocks.pollToArray(new Block[Math.min( invalidateBlocks.size(), maxblocks)]); } return (deleteList == null || deleteList.length == 0) ? null: new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, deleteList); }
java
{ "resource": "" }
q160250
DatanodeDescriptor.readFieldsFromFSEditLog
train
void readFieldsFromFSEditLog(DataInput in) throws IOException { this.name = UTF8.readString(in); this.storageID = UTF8.readString(in); this.infoPort = in.readShort() & 0x0000ffff; this.capacity = in.readLong(); this.dfsUsed = in.readLong(); this.remaining = in.readLong(); this.lastUpdate = in.readLong(); this.xceiverCount = in.readInt(); this.location = Text.readString(in); this.hostName = Text.readString(in); setAdminState(WritableUtils.readEnum(in, AdminStates.class)); }
java
{ "resource": "" }
q160251
DatanodeDescriptor.rollBlocksScheduled
train
private void rollBlocksScheduled(long now) { if ((now - lastBlocksScheduledRollTime) > BLOCKS_SCHEDULED_ROLL_INTERVAL) { prevApproxBlocksScheduled = currApproxBlocksScheduled; currApproxBlocksScheduled = 0; lastBlocksScheduledRollTime = now; } }
java
{ "resource": "" }
q160252
InvalidInputException.getMessage
train
public String getMessage() { StringBuffer result = new StringBuffer(); Iterator<IOException> itr = problems.iterator(); while(itr.hasNext()) { result.append(itr.next().getMessage()); if (itr.hasNext()) { result.append("\n"); } } return result.toString(); }
java
{ "resource": "" }
q160253
ImageLibrary.newImage
train
private boolean newImage(String name, String filename) { ImageDescriptor id; boolean success; try { URL fileURL = FileLocator.find(bundle, new Path(RESOURCE_DIR + filename), null); id = ImageDescriptor.createFromURL(FileLocator.toFileURL(fileURL)); success = true; } catch (Exception e) { e.printStackTrace(); id = ImageDescriptor.getMissingImageDescriptor(); // id = getSharedByName(ISharedImages.IMG_OBJS_ERROR_TSK); success = false; } descMap.put(name, id); imageMap.put(name, id.createImage(true)); return success; }
java
{ "resource": "" }
q160254
QuorumException.create
train
public static <K, V> QuorumException create( String simpleMsg, Map<K, V> successes, Map<K, Throwable> exceptions) { Preconditions.checkArgument(!exceptions.isEmpty(), "Must pass exceptions"); StringBuilder msg = new StringBuilder(); msg.append(simpleMsg).append(". "); if (!successes.isEmpty()) { msg.append(successes.size()).append(" successful responses:\n"); Joiner.on("\n") .useForNull("null [success]") .withKeyValueSeparator(": ") .appendTo(msg, successes); msg.append("\n"); } msg.append(exceptions.size() + " exceptions thrown:\n"); boolean isFirst = true; for (Map.Entry<K, Throwable> e : exceptions.entrySet()) { if (!isFirst) { msg.append("\n"); } isFirst = false; msg.append(e.getKey()).append(": "); if (e.getValue() instanceof RuntimeException) { msg.append(StringUtils.stringifyException(e.getValue())); } else if (e.getValue().getLocalizedMessage() != null) { msg.append(e.getValue().getLocalizedMessage()); } else { msg.append(StringUtils.stringifyException(e.getValue())); } } return new QuorumException(msg.toString()); }
java
{ "resource": "" }
q160255
DefaultStringifier.store
train
public static <K> void store(Configuration conf, K item, String keyName) throws IOException { DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf, GenericsUtil.getClass(item)); conf.set(keyName, stringifier.toString(item)); stringifier.close(); }
java
{ "resource": "" }
q160256
DefaultStringifier.load
train
public static <K> K load(Configuration conf, String keyName, Class<K> itemClass) throws IOException { DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf, itemClass); try { String itemStr = conf.get(keyName); return stringifier.fromString(itemStr); } finally { stringifier.close(); } }
java
{ "resource": "" }
q160257
DefaultStringifier.storeArray
train
public static <K> void storeArray(Configuration conf, K[] items, String keyName) throws IOException { DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf, GenericsUtil.getClass(items[0])); try { StringBuilder builder = new StringBuilder(); for (K item : items) { builder.append(stringifier.toString(item)).append(SEPARATOR); } conf.set(keyName, builder.toString()); } finally { stringifier.close(); } }
java
{ "resource": "" }
q160258
DefaultStringifier.loadArray
train
public static <K> K[] loadArray(Configuration conf, String keyName, Class<K> itemClass) throws IOException { DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf, itemClass); try { String itemStr = conf.get(keyName); ArrayList<K> list = new ArrayList<K>(); String[] parts = itemStr.split(SEPARATOR); for (String part : parts) { if (!part.equals("")) list.add(stringifier.fromString(part)); } return GenericsUtil.toArray(itemClass, list); } finally { stringifier.close(); } }
java
{ "resource": "" }
q160259
INodeFile.getBlockReplication
train
public short getBlockReplication(BlockInfo block) { if (storage.isSourceBlock(block)) { return getReplication(); } else { if (storage.getStorageType() == StorageType.RAID_STORAGE) { return ((INodeRaidStorage)storage).getCodec().parityReplication; } else { throw new IllegalStateException("parity block " + block + " belongs to a non-raid file"); } } }
java
{ "resource": "" }
q160260
StandbySafeMode.reportRegister
train
protected void reportRegister(DatanodeID node) { if (node != null && shouldUpdateNodes()) { if (!liveDatanodes.contains(node)) { // A new node has checked in, we want to send a ClearPrimary command to // it as well. outStandingHeartbeats.add(node); liveDatanodes.add(node); } } }
java
{ "resource": "" }
q160261
StandbySafeMode.reportHeartBeat
train
protected boolean reportHeartBeat(DatanodeID node) { if (node != null && shouldUpdateNodes()) { reportRegister(node); synchronized(this) { if (outStandingHeartbeats.remove(node)) { outStandingReports.add(node); return true; } } } return false; }
java
{ "resource": "" }
q160262
StandbySafeMode.reportPrimaryCleared
train
protected void reportPrimaryCleared(DatanodeID node) { if (node != null && shouldUpdateNodes()) { if (outStandingReports.remove(node)) { LOG.info("Failover: Outstanding reports: " + outStandingReports.size()); } } }
java
{ "resource": "" }
q160263
StandbySafeMode.triggerFailover
train
protected void triggerFailover() throws IOException { clearDataStructures(); // stop sending PREPARE_FAILOVER command // we are performing failover now prepareFailover = false; for (DatanodeInfo node : namesystem.datanodeReport(DatanodeReportType.LIVE)) { liveDatanodes.add(node); outStandingHeartbeats.add(node); } InjectionHandler .processEvent(InjectionEvent.STANDBY_ENTER_SAFE_MODE); safeModeState = SafeModeState.FAILOVER_IN_PROGRESS; InjectionHandler.processEvent(InjectionEvent.STANDBY_FAILOVER_INPROGRESS); safeModeMonitor = new Daemon(new SafeModeMonitor(namesystem, this)); safeModeMonitor.start(); try { safeModeMonitor.join(); } catch (InterruptedException ie) { throw new IOException("triggerSafeMode() interruped()"); } if (safeModeState != SafeModeState.AFTER_FAILOVER) { throw new IOException("safeModeState is : " + safeModeState + " which does not indicate a successfull exit of safemode"); } }
java
{ "resource": "" }
q160264
StandbySafeMode.datanodeReportsReceived
train
private synchronized boolean datanodeReportsReceived(boolean checkDatanodes) { try { boolean received = this.getDatanodeReportRatio() >= this.outStandingReportThreshold; if (!received && checkDatanodes) { checkDatanodes(); return this.getDatanodeReportRatio() >= this.outStandingReportThreshold; } return received; } catch (Exception e) { LOG.warn("Failover - caught exception when checking reports", e); return false; } }
java
{ "resource": "" }
q160265
NameSpaceAddressManager.setPrimary
train
public void setPrimary(InstanceId ofPrimary) throws IOException { if (ofPrimary == null) { // failover in progress primaryNode = null; standbyNode = null; return; } switch (ofPrimary) { case NODEZERO: primaryNode = getNodeZero(); standbyNode = getNodeOne(); case NODEONE: primaryNode = getNodeOne(); standbyNode = getNodeZero(); } }
java
{ "resource": "" }
q160266
NameSpaceAddressManager.setConfSource
train
public void setConfSource(Configurable src) { validateConfigFile(src.getConf()); confSrc = src; zkClient = new AvatarZooKeeperClient(confSrc.getConf(), null, true); }
java
{ "resource": "" }
q160267
LookasideCache.addCache
train
void addCache(Path hdfsPath, Path localPath, long size) throws IOException { localMetrics.numAdd++; CacheEntry c = new CacheEntry(hdfsPath, localPath, size); CacheEntry found = cacheMap.putIfAbsent(hdfsPath, c); if (found != null) { // If entry was already in the cache, update its timestamp assert size == found.entrySize; assert localPath.equals(found.localPath); found.setGenstamp(globalStamp.incrementAndGet()); localMetrics.numAddExisting++; if (LOG.isDebugEnabled()) { LOG.debug("LookasideCache updating path " + hdfsPath); } } else { // We just inserted an entry in the cache. Increment the // recorded size of the cache. cacheSize.addAndGet(size); localMetrics.numAddNew++; if (LOG.isDebugEnabled()) { LOG.debug("LookasideCache add new path:" + hdfsPath + " cachedPath:" + localPath + " size " + size); } } // check if we need to evict because cache is full if (cacheSize.get() > cacheSizeMax) { checkEvict(); } }
java
{ "resource": "" }
q160268
LookasideCache.renameCache
train
void renameCache(Path oldhdfsPath, Path newhdfsPath, Path localPath) throws IOException { CacheEntry found = cacheMap.remove(oldhdfsPath); if (found == null) { String msg = "LookasideCache error renaming path: " + oldhdfsPath + " to: " + newhdfsPath + " Path " + newhdfsPath + " because it does not exists in the cache."; LOG.warn(msg); return; } // Update its timestamp and localPath found.hdfsPath = newhdfsPath; found.setGenstamp(globalStamp.incrementAndGet()); found.localPath = localPath; // add it back to the cache CacheEntry empty = cacheMap.putIfAbsent(newhdfsPath, found); if (empty != null) { String msg = "LookasideCache error renaming path: " + oldhdfsPath + " to: " + newhdfsPath + " Path " + newhdfsPath + " already exists in the cache."; LOG.warn(msg); throw new IOException(msg); } localMetrics.numRename++; if (LOG.isDebugEnabled()) { LOG.debug("LookasideCache renamed path:" + oldhdfsPath + " to:" + newhdfsPath + " cachedPath: " + localPath); } }
java
{ "resource": "" }
q160269
LookasideCache.removeCache
train
void removeCache(Path hdfsPath) { CacheEntry c = cacheMap.remove(hdfsPath); if (c != null) { cacheSize.addAndGet(-c.entrySize); localMetrics.numRemove++; if (LOG.isDebugEnabled()) { LOG.debug("LookasideCache removed path:" + hdfsPath + " freed up size: " + c.entrySize); } } }
java
{ "resource": "" }
q160270
LookasideCache.evictCache
train
void evictCache(Path hdfsPath) throws IOException { CacheEntry c = cacheMap.remove(hdfsPath); if (c != null) { cacheSize.addAndGet(-c.entrySize); if (evictionIface != null) { evictionIface.evictCache(c.hdfsPath, c.localPath, c.entrySize); } localMetrics.numEvict++; if (LOG.isDebugEnabled()) { LOG.debug("LookasideCache removed path:" + hdfsPath + " freed up size: " + c.entrySize); } } }
java
{ "resource": "" }
q160271
LookasideCache.getCache
train
Path getCache(Path hdfsPath) { CacheEntry c = cacheMap.get(hdfsPath); localMetrics.numGetAttempts++; if (c != null) { // update the accessTime before returning to caller c.setGenstamp(globalStamp.incrementAndGet()); localMetrics.numGetHits++; return c.localPath; } return null; // not in cache }
java
{ "resource": "" }
q160272
LookasideCache.checkEvict
train
synchronized void checkEvict() throws IOException { if (cacheSize.get() < cacheSizeMax) { return; // nothing to do, plenty of free space } // Only one thread should be doing the eviction. Do not block // current thread, it is ok to oversubscribe the cache size // temporarily. if (evictionInProgress) { return; } // record the fact that eviction has started. evictionInProgress = true; try { // if the cache has reached a threshold size, then free old entries. long curSize = cacheSize.get(); // how much to evict in one iteration long targetSize = cacheSizeMax - (cacheSizeMax * cacheEvictPercent)/100; if (LOG.isDebugEnabled()) { LOG.debug("Cache size " + curSize + " has exceeded the " + " maximum configured cacpacity " + cacheSizeMax + ". Eviction has to reduce cache size to " + targetSize); } // sort all entries based on their accessTimes Collection<CacheEntry> values = cacheMap.values(); CacheEntry[] records = values.toArray(new CacheEntry[values.size()]); Arrays.sort(records, LRU_COMPARATOR); for (int i = 0; i < records.length; i++) { if (cacheSize.get() <= targetSize) { break; // we reclaimed everything we wanted to } CacheEntry c = records[i]; evictCache(c.hdfsPath); } } finally { evictionInProgress = false; // eviction done. } if (LOG.isDebugEnabled()) { LOG.debug("Cache eviction complete. Current cache size is " + cacheSize.get()); } }
java
{ "resource": "" }
q160273
DataNode.getSecureRandom
train
public synchronized static Random getSecureRandom() { if (cachedSecureRandom != null) return cachedSecureRandom; try { return SecureRandom.getInstance("SHA1PRNG"); } catch (NoSuchAlgorithmException e) { return R; } }
java
{ "resource": "" }
q160274
DataNode.initGlobalSetting
train
protected void initGlobalSetting(Configuration conf, AbstractList<File> dataDirs) throws IOException { this.dataDirs = dataDirs; this.conf = conf; storage = new DataStorage(this); // global DN settings initConfig(conf); registerMXBean(); initDataXceiver(conf); startInfoServer(conf); initIpcServer(conf); myMetrics = new DataNodeMetrics(conf, storage.getStorageID()); setCountingLoggers(myMetrics); threadLivenessReporter = new DatanodeThreadLivenessReporter(conf.getLong( "dfs.datanode.thread.liveness.threshold", 240 * 1000), myMetrics.threadActiveness); }
java
{ "resource": "" }
q160275
DataNode.initDataSetAndScanner
train
protected void initDataSetAndScanner(Configuration conf, AbstractList<File> dataDirs, int numOfNameSpaces) throws IOException { initFsDataSet(conf, dataDirs, numOfNameSpaces); initDataBlockScanner(conf); initDirectoryScanner(conf); }
java
{ "resource": "" }
q160276
DataNode.startDataNode
train
void startDataNode(Configuration conf, AbstractList<File> dataDirs ) throws IOException { initGlobalSetting(conf, dataDirs); /* Initialize namespace manager */ List<InetSocketAddress> nameNodeAddrs = DFSUtil.getNNServiceRpcAddresses(conf); //TODO this will be no longer valid, since we will have multiple namenodes // We might want to keep it and assign the first NN to it. DataNode.nameNodeAddr = nameNodeAddrs.get(0); namespaceManager = new NamespaceManager(conf, nameNodeAddrs); initDataSetAndScanner(conf, dataDirs, nameNodeAddrs.size()); }
java
{ "resource": "" }
q160277
DataNode.getNameNodeAddress
train
public static InetSocketAddress getNameNodeAddress(Configuration conf) { InetSocketAddress addr = null; addr = NameNode.getDNProtocolAddress(conf); if (addr != null) { return addr; } return NameNode.getClientProtocolAddress(conf); }
java
{ "resource": "" }
q160278
DataNode.getNSNamenode
train
public DatanodeProtocol getNSNamenode(int namespaceId) throws IOException { NamespaceService nsos = namespaceManager.get(namespaceId); if(nsos == null || nsos.getDatanodeProtocol() == null) { throw new IOException("cannot find a namnode proxy for namespaceId=" + namespaceId); } return nsos.getDatanodeProtocol(); }
java
{ "resource": "" }
q160279
DataNode.getDNRegistrationForNS
train
public DatanodeRegistration getDNRegistrationForNS(int namespaceId) throws IOException { NamespaceService nsos = namespaceManager.get(namespaceId); if(nsos==null || nsos.getNsRegistration()==null) { throw new IOException("cannot find NSOfferService for namespaceId="+namespaceId); } return nsos.getNsRegistration(); }
java
{ "resource": "" }
q160280
DataNode.shutdown
train
public void shutdown() { if (this.shuttingDown.getAndSet(true)) { // Already being shut down LOG.warn("DataNode.shutdown() was called while shutting down."); return; } if (infoServer != null) { try { infoServer.stop(); } catch (Exception e) { LOG.warn("Exception shutting down DataNode", e); } } if (ipcServer != null) { ipcServer.stop(); } this.shouldRun = false; if (dataXceiverServer != null) { ((DataXceiverServer) this.dataXceiverServer.getRunnable()).kill(); this.dataXceiverServer.interrupt(); // wait for all data receiver threads to exit if (this.threadGroup != null) { int retries = 0; while (true) { this.threadGroup.interrupt(); LOG.info("Waiting for threadgroup to exit, active threads is " + this.threadGroup.activeCount()); if (this.threadGroup.activeCount() == 0) { break; } try { if (++retries > 600) { Thread[] activeThreads = new Thread[this.threadGroup.activeCount()]; this.threadGroup.enumerate(activeThreads, true); LOG.info("Active Threads: " + Arrays.toString(activeThreads)); LOG.warn("Waited for ThreadGroup to be empty for 10 minutes." + " SHUTTING DOWN NOW"); break; } Thread.sleep(1000); } catch (InterruptedException e) {} } } // wait for dataXceiveServer to terminate try { this.dataXceiverServer.join(); } catch (InterruptedException ie) { } } if (blockCopyExecutor != null && !blockCopyExecutor.isShutdown()) { blockCopyExecutor.shutdownNow(); } if (namespaceManager != null) { namespaceManager.shutDownAll(); } if (blockScanner != null) { blockScanner.shutdown(); } if(directoryScanner != null) { shutdownDirectoryScanner(); } if (storage != null) { try { this.storage.unlockAll(); } catch (IOException ie) { } } if (data != null) { data.shutdown(); } if (myMetrics != null) { setCountingLoggers(null); myMetrics.shutdown(); } this.shutdownMXBean(); }
java
{ "resource": "" }
q160281
DataNode.checkDiskError
train
protected void checkDiskError(Exception e ) throws IOException { if (e instanceof ClosedByInterruptException || e instanceof java.io.InterruptedIOException) { return; } LOG.warn("checkDiskError: exception: ", e); if (e.getMessage() != null && e.getMessage().startsWith("No space left on device")) { throw new DiskOutOfSpaceException("No space left on device"); } else { checkDiskError(); } }
java
{ "resource": "" }
q160282
DataNode.checkDiskError
train
protected void checkDiskError( ) throws IOException{ // We disallow concurrent disk checks as it doesn't help // but can significantly impact performance and reliability of // the system. // boolean setSuccess = checkingDisk.compareAndSet(false, true); if (!setSuccess) { LOG.info("checkDiskError is already running."); return; } try { // We don't check disks if it's not long since last check. // long curTime = System.currentTimeMillis(); if (curTime - timeLastCheckDisk < minDiskCheckIntervalMsec) { LOG.info("checkDiskError finished within " + minDiskCheckIntervalMsec + " mses. Skip this one."); return; } data.checkDataDir(); timeLastCheckDisk = System.currentTimeMillis(); } catch(DiskErrorException de) { handleDiskError(de.getMessage()); } finally { checkingDisk.set(false); } }
java
{ "resource": "" }
q160283
DataNode.getListOfDataDirs
train
public static String[] getListOfDataDirs(Configuration conf) { String[] configFilePath = conf.getStrings("dfs.datadir.confpath"); String[] dataDirs = null; if(configFilePath != null && (configFilePath.length != 0)) { try { DataDirFileReader reader = new DataDirFileReader(configFilePath[0]); dataDirs = reader.getArrayOfCurrentDataDirectories(); if(dataDirs == null) { LOG.warn("File is empty, using dfs.data.dir directories"); } } catch (Exception e) { LOG.warn("Could not read file, using directories from dfs.data.dir" + " Exception: ", e); } } else { LOG.warn("No dfs.datadir.confpath not defined, now using default " + "directories"); } if(dataDirs == null) { dataDirs = conf.getStrings("dfs.data.dir"); } return dataDirs; }
java
{ "resource": "" }
q160284
DataNode.createDataNode
train
public static DataNode createDataNode(String args[], Configuration conf) throws IOException { DataNode dn = instantiateDataNode(args, conf); if (dn != null) { dn.runDatanodeDaemon(); } return dn; }
java
{ "resource": "" }
q160285
DataNode.getNamenodeAddresses
train
@Override // DataNodeMXBean public String getNamenodeAddresses() { final Map<String, Integer> info = new HashMap<String, Integer>(); for (NamespaceService ns : namespaceManager.getAllNamenodeThreads()) { if (ns != null && ns.initialized()) { info.put(ns.getNNSocketAddress().getAddress().getHostAddress(), ns.getNamespaceId()); } } return JSON.toString(info); }
java
{ "resource": "" }
q160286
DataNode.getVolumeInfo
train
@Override // DataNodeMXBean public String getVolumeInfo() { final Map<String, Object> info = new HashMap<String, Object>(); try { FSVolume[] volumes = ((FSDataset)this.data).volumes.getVolumes(); for (FSVolume v : volumes) { final Map<String, Object> innerInfo = new HashMap<String, Object>(); innerInfo.put("usedSpace", v.getDfsUsed()); innerInfo.put("freeSpace", v.getAvailable()); innerInfo.put("reservedSpace", v.getReserved()); info.put(v.getDir().toString(), innerInfo); } return JSON.toString(info); } catch (IOException e) { LOG.info("Cannot get volume info.", e); return "ERROR"; } }
java
{ "resource": "" }
q160287
DataNode.sendBlocksBeingWrittenReport
train
public void sendBlocksBeingWrittenReport(DatanodeProtocol node, int namespaceId, DatanodeRegistration nsRegistration) throws IOException { Block[] blocks = data.getBlocksBeingWrittenReport(namespaceId); if (blocks != null && blocks.length != 0) { long[] blocksAsLong = BlockListAsLongs.convertToArrayLongs(blocks); BlockReport bbwReport = new BlockReport(blocksAsLong); node.blocksBeingWrittenReport(nsRegistration, bbwReport); } }
java
{ "resource": "" }
q160288
BlockPlacementPolicyConfigurable.randomIntInWindow
train
protected int randomIntInWindow(int begin, int windowSize, int n, Set<Integer> excludeSet) { final int size = Math.min(windowSize, n); if (size <= 0) { return -1; } int adjustment = 0; for (Integer v: excludeSet) { int vindex = (v.intValue() - begin + n) % n; if (vindex < size) { adjustment++; // calculates excluded elements within window } } if (adjustment >= size) { return -1; } int rindex = r.nextInt(size - adjustment); // ith element is chosen int iterator = begin; for (int i = 0; i <= rindex; i++) { while (excludeSet.contains(iterator)) { iterator = (iterator + 1) % n; } if (i != rindex) { iterator = (iterator + 1) % n; } } return iterator; }
java
{ "resource": "" }
q160289
BlockPlacementPolicyConfigurable.chooseTarget
train
@Override public DatanodeDescriptor[] chooseTarget(FSInodeInfo srcInode, int numOfReplicas, DatanodeDescriptor writer, List<DatanodeDescriptor> chosenNodes, List<Node> excludesNodes, long blocksize) { if (numOfReplicas == 0 || clusterMap.getNumOfLeaves() == 0) { return new DatanodeDescriptor[0]; } int[] result = getActualReplicas(numOfReplicas, chosenNodes); numOfReplicas = result[0]; int maxNodesPerRack = result[1]; HashMap<Node, Node> excludedNodes = new HashMap<Node, Node>(); List<DatanodeDescriptor> results = new ArrayList<DatanodeDescriptor>( chosenNodes.size() + numOfReplicas); updateExcludedAndChosen(null, excludedNodes, results, chosenNodes); if (!clusterMap.contains(writer)) { writer = null; } DatanodeDescriptor localNode = super.chooseTarget(numOfReplicas, writer, excludedNodes, blocksize, maxNodesPerRack, results, chosenNodes.isEmpty()); return this.finalizeTargets(results, chosenNodes, writer, localNode); }
java
{ "resource": "" }
q160290
BlockPlacementPolicyConfigurable.chooseFirstInRemoteRack
train
protected void chooseFirstInRemoteRack(DatanodeDescriptor localMachine, HashMap<Node, Node> excludedNodes, long blocksize, int maxReplicasPerRack, List<DatanodeDescriptor> results) throws NotEnoughReplicasException { readLock(); try { RackRingInfo rackInfo = racksMap.get(localMachine.getNetworkLocation()); assert (rackInfo != null); Integer machineId = rackInfo.findNode(localMachine); assert (machineId != null); if (!chooseRemoteRack(rackInfo.index, rackInfo.index, rackWindow + 1, machineId, machineWindow, excludedNodes, blocksize, maxReplicasPerRack, results, false)) { LOG.info("Couldn't find a Datanode within node group. " + "Resorting to default policy."); super.chooseRemoteRack(1, localMachine, excludedNodes, blocksize, maxReplicasPerRack, results); } } finally { readUnlock(); } }
java
{ "resource": "" }
q160291
BlockPlacementPolicyConfigurable.findBestWithFirst
train
private void findBestWithFirst(DatanodeDescriptor first, List<DatanodeDescriptor> listOfNodes, DatanodeDescriptor[] result) { for (int in2 = 0; in2 < listOfNodes.size(); in2++) { DatanodeDescriptor n2 = listOfNodes.get(in2); if (!first.equals(n2)) { if (result[1] == null && inWindow(first, n2)) { result[0] = first; result[1] = n2; } for (int in3 = in2 + 1; in3 < listOfNodes.size(); in3++) { DatanodeDescriptor n3 = listOfNodes.get(in3); if (!first.equals(n3) && inWindow(first, n3, n2)) { result[0] = first; result[1] = n2; result[2] = n3; return; } } } } }
java
{ "resource": "" }
q160292
BlockPlacementPolicyConfigurable.inWindow
train
private boolean inWindow(DatanodeDescriptor first, DatanodeDescriptor testing) { readLock(); try { RackRingInfo rackInfo = racksMap.get(first.getNetworkLocation()); assert (rackInfo != null); Integer machineId = rackInfo.findNode(first); assert (machineId != null); final int rackWindowStart = rackInfo.index; final RackRingInfo rackTest = racksMap.get(testing.getNetworkLocation()); assert (rackTest != null); final int rackDist = (rackTest.index - rackWindowStart + racks.size()) % racks.size(); if (rackDist < rackWindow + 1 && rackTest.index != rackInfo.index) { // inside rack window final Integer idFirst = rackInfo.findNode(first); assert (idFirst != null); final int sizeFirstRack = rackInfo.rackNodes.size(); final int sizeTestRack = rackTest.rackNodes.size(); final int start = idFirst * sizeTestRack / sizeFirstRack; final Integer idTest = rackTest.findNode(testing); assert (idTest != null); final int dist = (idTest - start + sizeTestRack) % sizeTestRack; if (dist < machineWindow) { // inside machine Window return true; } } return false; } finally { readUnlock(); } }
java
{ "resource": "" }
q160293
BlockPlacementPolicyConfigurable.inWindow
train
private boolean inWindow(DatanodeDescriptor first, DatanodeDescriptor testing1, DatanodeDescriptor testing2) { readLock(); try { if (!testing1.getNetworkLocation().equals(testing2.getNetworkLocation())) { return false; } RackRingInfo rackInfo = racksMap.get(first.getNetworkLocation()); assert (rackInfo != null); Integer machineId = rackInfo.findNode(first); assert (machineId != null); final int rackWindowStart = rackInfo.index; final RackRingInfo rackTest = racksMap.get(testing1.getNetworkLocation()); assert (rackTest != null); final int rackDist = (rackTest.index - rackWindowStart + racks.size()) % racks.size(); if (rackDist < rackWindow + 1 && rackTest.index != rackInfo.index) { // inside rack window final int rackSize = rackTest.rackNodes.size(); Integer idN2 = rackTest.findNode(testing1); assert (idN2 != null); Integer idN3 = rackTest.findNode(testing2); assert (idN3 != null); final Integer idFirst = rackInfo.findNode(first); assert (idFirst != null); final int sizeFirstRack = rackInfo.rackNodes.size(); final int end = idFirst * rackSize / sizeFirstRack; // proportional to previous of idFirst final int prevIdFirst = (idFirst + sizeFirstRack - 1) % sizeFirstRack; int start = (prevIdFirst * rackSize / sizeFirstRack); int distPropWindow = (end - start + rackSize) % rackSize; if (distPropWindow > 0) { start = (start + 1) % rackSize; distPropWindow--; } int distIdN2 = (idN2 - start + rackSize) % rackSize; int distIdN3 = (idN3 - start + rackSize) % rackSize; int distN3N2 = (idN3 - idN2 + rackSize) % rackSize; int distN2N3 = (idN2 - idN3 + rackSize) % rackSize; if (distIdN2 <= distPropWindow && distN3N2 < machineWindow) return true; if (distIdN3 <= distPropWindow && distN2N3 < machineWindow) return true; } return false; } finally { readUnlock(); } }
java
{ "resource": "" }
q160294
BlockPlacementPolicyConfigurable.findBestWithoutFirst
train
private void findBestWithoutFirst(List<DatanodeDescriptor> listOfNodes, DatanodeDescriptor[] result) { readLock(); try { for (int in2 = 0; in2 < listOfNodes.size(); in2++) { DatanodeDescriptor n2 = listOfNodes.get(in2); for (int in3 = in2 + 1; in3 < listOfNodes.size(); in3++) { DatanodeDescriptor n3 = listOfNodes.get(in3); if (n2.getNetworkLocation().equals(n3.getNetworkLocation())) { RackRingInfo rackInfo = racksMap.get(n2.getNetworkLocation()); assert (rackInfo != null); final int rackSize = rackInfo.rackNodes.size(); final Integer idN2 = rackInfo.findNode(n2); final Integer idN3 = rackInfo.findNode(n3); if (idN2 != null && idN3 != null) { int dist = (idN3 - idN2 + rackSize) % rackSize; if (dist >= machineWindow) { dist = rackSize - dist; // try n2 - n3 } if (dist < machineWindow) { result[0] = null; result[1] = n2; result[2] = n3; return; } } } } } } finally { readUnlock(); } }
java
{ "resource": "" }
q160295
BlockPlacementPolicyConfigurable.chooseRemoteRack
train
protected boolean chooseRemoteRack(int rackIdx, int firstRack, int rackWindow, int machineIdx, int windowSize, HashMap<Node, Node> excludedNodes, long blocksize, int maxReplicasPerRack, List<DatanodeDescriptor> results, boolean reverse) throws NotEnoughReplicasException { // randomly choose one node from remote racks readLock(); try { HashSet<Integer> excludedRacks = new HashSet<Integer>(); excludedRacks.add(rackIdx); int n = racks.size(); int currRackSize = racksMap.get(racks.get(rackIdx)).rackNodes.size(); while (excludedRacks.size() < rackWindow) { int newRack = randomIntInWindow(firstRack, rackWindow, n, excludedRacks); if (newRack < 0) break; excludedRacks.add(newRack); int newRackSize = racksMap.get(racks.get(newRack)).rackNodes.size(); int firstMachine = machineIdx * newRackSize / currRackSize; int newWindowSize = windowSize; if (reverse) { firstMachine = ((int) Math.ceil((double) machineIdx * newRackSize / currRackSize)) % newRackSize; newWindowSize = Math.max(1, windowSize * newRackSize / currRackSize); } if (newWindowSize <= 0) { continue; } if (chooseMachine(racks.get(newRack), firstMachine, newWindowSize, excludedNodes, blocksize, maxReplicasPerRack, results)) { return true; } } return false; } finally { readUnlock(); } }
java
{ "resource": "" }
q160296
BlockPlacementPolicyConfigurable.chooseMachine
train
protected boolean chooseMachine(String rack, int firstMachine, int windowSize, HashMap<Node, Node> excludedNodes, long blocksize, int maxReplicasPerRack, List<DatanodeDescriptor> results) { readLock(); try { HashSet<Integer> excludedMachines = new HashSet<Integer>(); RackRingInfo rackInfo = racksMap.get(rack); assert (rackInfo != null); int n = rackInfo.rackNodesMap.size(); List<Node> rackDatanodes = clusterMap.getDatanodesInRack(rack); if (rackDatanodes == null) { return false; } while (excludedMachines.size() < windowSize) { int newMachine = randomIntInWindow(firstMachine, windowSize, n, excludedMachines); if (newMachine < 0) return false; excludedMachines.add(newMachine); DatanodeDescriptor chosenNode = null; for (Node node : rackDatanodes) { DatanodeDescriptor datanode = (DatanodeDescriptor) node; Integer idx = rackInfo.findNode(datanode); if (idx != null && idx.intValue() == newMachine) { chosenNode = datanode; break; } } if (chosenNode == null) continue; Node oldNode = excludedNodes.put(chosenNode, chosenNode); if (oldNode == null) { // choosendNode was not in the excluded list if (isGoodTarget(chosenNode, blocksize, maxReplicasPerRack, results)) { results.add(chosenNode); return true; } } } return false; } finally { readUnlock(); } }
java
{ "resource": "" }
q160297
ResourceTypeCounter.incr
train
public void incr(ResourceType resourceType) { Integer current = typeToCountMap.get(resourceType); if (current == null) { typeToCountMap.put(resourceType, new Integer(1)); } else { typeToCountMap.put(resourceType, current + 1); } }
java
{ "resource": "" }
q160298
ResourceTypeCounter.getCount
train
public Integer getCount(ResourceType resourceType) { Integer count = typeToCountMap.get(resourceType); if (count == null) { return 0; } else { return count; } }
java
{ "resource": "" }
q160299
SequentialNumber.skipTo
train
public synchronized void skipTo(final long newValue) throws IllegalStateException { if (newValue < currentValue) { throw new IllegalStateException( "Cannot skip to less than the current value (=" + currentValue + "), where newValue=" + newValue); } currentValue = newValue; }
java
{ "resource": "" }