_id
stringlengths
2
7
title
stringlengths
3
140
partition
stringclasses
3 values
text
stringlengths
73
34.1k
language
stringclasses
1 value
meta_information
dict
q160100
PageSpec.setSections
train
public void setSections(List<PageSection> sections) { this.sections.clear(); if (sections != null) { this.sections.addAll(sections); } }
java
{ "resource": "" }
q160101
PageSpec.findOnlyExistingMatchingObjectNames
train
public List<String> findOnlyExistingMatchingObjectNames(String objectExpression) { String[] parts = objectExpression.split(","); List<String> allSortedObjectNames = getSortedObjectNames(); List<String> resultingObjectNames = new LinkedList<>(); for (String part : parts) { String singleExpression = part.trim(); if (!singleExpression.isEmpty()) { if (GalenUtils.isObjectGroup(singleExpression)) { resultingObjectNames.addAll(findObjectsInGroup(GalenUtils.extractGroupName(singleExpression))); } else if (GalenUtils.isObjectsSearchExpression(singleExpression)) { Pattern objectPattern = GalenUtils.convertObjectNameRegex(singleExpression); for (String objectName : allSortedObjectNames) { if (objectPattern.matcher(objectName).matches()) { resultingObjectNames.add(objectName); } } } else if (objects.containsKey(singleExpression)) { resultingObjectNames.add(singleExpression); } } } return resultingObjectNames; }
java
{ "resource": "" }
q160102
PageSpec.getSortedObjectNames
train
public List<String> getSortedObjectNames() { List<String> list = new ArrayList<>(getObjects().keySet()); Collections.sort(list, new AlphanumericComparator()); return list; }
java
{ "resource": "" }
q160103
PageSpec.findObjectsInGroup
train
public List<String> findObjectsInGroup(String groupName) { if (getObjectGroups().containsKey(groupName)) { return getObjectGroups().get(groupName); } else { return Collections.emptyList(); } }
java
{ "resource": "" }
q160104
PageSpec.merge
train
public void merge(PageSpec spec) { if (spec == null) { throw new IllegalArgumentException("Cannot merge null spec"); } objects.putAll(spec.getObjects()); sections.addAll(spec.getSections()); objectGroups.putAll(spec.getObjectGroups()); }
java
{ "resource": "" }
q160105
PageSpec.addSpec
train
public void addSpec(String sectionName, String objectName, String specText) { PageSection pageSection = findSection(sectionName); if (pageSection == null) { pageSection = new PageSection(sectionName); sections.add(pageSection); } ObjectSpecs objectSpecs = new ObjectSpecs(objectName); objectSpecs.addSpec(new SpecReader().read(specText)); pageSection.addObjects(objectSpecs); }
java
{ "resource": "" }
q160106
FileTempStorage.cleanup
train
public void cleanup() { if (this.childStorages != null) { for (FileTempStorage storage : this.childStorages) { storage.cleanup(); } } for (File file : this.files.values()) { FileUtils.deleteQuietly(file); } this.files.clear(); }
java
{ "resource": "" }
q160107
GalenUtils.resizeScreenshotIfNeeded
train
public static BufferedImage resizeScreenshotIfNeeded(WebDriver driver, BufferedImage screenshotImage) { Double devicePixelRatio = 1.0; try { devicePixelRatio = ((Number) ((JavascriptExecutor) driver).executeScript(JS_RETRIEVE_DEVICE_PIXEL_RATIO)).doubleValue(); } catch (Exception ex) { ex.printStackTrace(); } if (devicePixelRatio > 1.0 && screenshotImage.getWidth() > 0) { Long screenSize = ((Number) ((JavascriptExecutor) driver).executeScript("return Math.max(" + "document.body.scrollWidth, document.documentElement.scrollWidth," + "document.body.offsetWidth, document.documentElement.offsetWidth," + "document.body.clientWidth, document.documentElement.clientWidth);" )).longValue(); Double estimatedPixelRatio = ((double)screenshotImage.getWidth()) / ((double)screenSize); if (estimatedPixelRatio > 1.0) { int newWidth = (int) (screenshotImage.getWidth() / estimatedPixelRatio); int newHeight = (int) (screenshotImage.getHeight() / estimatedPixelRatio); Image tmp = screenshotImage.getScaledInstance(newWidth, newHeight, Image.SCALE_SMOOTH); BufferedImage scaledImage = new BufferedImage(newWidth, newHeight, BufferedImage.TYPE_INT_RGB); Graphics2D g2d = scaledImage.createGraphics(); g2d.drawImage(tmp, 0, 0, null); g2d.dispose(); return scaledImage; } else return screenshotImage; } else return screenshotImage; }
java
{ "resource": "" }
q160108
Rect.calculatePointOffsetDistance
train
public int calculatePointOffsetDistance(Point point) { int right = left + width; int bottom = top + height; int pointLeft = point.getLeft(); int pointTop = point.getTop(); if (contains(point)) { return max(top - pointTop, pointTop - bottom, left - pointLeft, pointLeft - right); } else if (isQuadrant1(point)) { return max(abs(left - pointLeft), abs(top - pointTop)); } else if (isQuadrant2(point)) { return abs(top - pointTop); } else if (isQuadrant3(point)) { return max(abs(pointLeft - right), abs(top - pointTop)); } else if (isQuadrant4(point)) { return abs(pointLeft - right); } else if (isQuadrant5(point)) { return max(abs(pointLeft - right), abs(pointTop - bottom)); } else if (isQuadrant6(point)) { return abs(pointTop - bottom); } else if (isQuadrant7(point)) { return max(abs(left - pointLeft), abs(pointTop - bottom)); } else { return abs(left - pointLeft); } }
java
{ "resource": "" }
q160109
GalenJsApi.checkPageSpecLayout
train
public static LayoutReport checkPageSpecLayout(WebDriver driver, PageSpec pageSpec, String[]includedTags, String[]excludedTags, String screenshotFilePath) throws IOException { TestSession session = TestSession.current(); if (session == null) { throw new UnregisteredTestSession("Cannot check layout as there was no TestSession created"); } TestReport report = session.getReport(); File screenshotFile = null; if (screenshotFilePath != null) { screenshotFile = new File(screenshotFilePath); if (!screenshotFile.exists() || !screenshotFile.isFile()) { throw new IOException("Couldn't find screenshot in " + screenshotFilePath); } } if (pageSpec == null) { throw new IOException("Page spec is not defined"); } List<String> includedTagsList = toList(includedTags); LayoutReport layoutReport = Galen.checkLayout(new SeleniumBrowser(driver), pageSpec, new SectionFilter(includedTagsList, toList(excludedTags)), screenshotFile, session.getListener()); GalenUtils.attachLayoutReport(layoutReport, report, "<unknown>", includedTagsList); return layoutReport; }
java
{ "resource": "" }
q160110
JobBase.getReport
train
protected String getReport() { StringBuffer sb = new StringBuffer(); Iterator iter = this.longCounters.entrySet().iterator(); while (iter.hasNext()) { Entry e = (Entry) iter.next(); sb.append(e.getKey().toString()).append("\t").append(e.getValue()) .append("\n"); } iter = this.doubleCounters.entrySet().iterator(); while (iter.hasNext()) { Entry e = (Entry) iter.next(); sb.append(e.getKey().toString()).append("\t").append(e.getValue()) .append("\n"); } return sb.toString(); }
java
{ "resource": "" }
q160111
Pentomino.stringifySolution
train
public static String stringifySolution(int width, int height, List<List<ColumnName>> solution) { String[][] picture = new String[height][width]; StringBuffer result = new StringBuffer(); // for each piece placement... for(List<ColumnName> row: solution) { // go through to find which piece was placed Piece piece = null; for(ColumnName item: row) { if (item instanceof Piece) { piece = (Piece) item; break; } } // for each point where the piece was placed, mark it with the piece name for(ColumnName item: row) { if (item instanceof Point) { Point p = (Point) item; picture[p.y][p.x] = piece.getName(); } } } // put the string together for(int y=0; y < picture.length; ++y) { for (int x=0; x < picture[y].length; ++x) { result.append(picture[y][x]); } result.append("\n"); } return result.toString(); }
java
{ "resource": "" }
q160112
Pentomino.getCategory
train
public SolutionCategory getCategory(List<List<ColumnName>> names) { Piece xPiece = null; // find the "x" piece for(Piece p: pieces) { if ("x".equals(p.name)) { xPiece = p; break; } } // find the row containing the "x" for(List<ColumnName> row: names) { if (row.contains(xPiece)) { // figure out where the "x" is located int low_x = width; int high_x = 0; int low_y = height; int high_y = 0; for(ColumnName col: row) { if (col instanceof Point) { int x = ((Point) col).x; int y = ((Point) col).y; if (x < low_x) { low_x = x; } if (x > high_x) { high_x = x; } if (y < low_y) { low_y = y; } if (y > high_y) { high_y = y; } } } boolean mid_x = (low_x + high_x == width - 1); boolean mid_y = (low_y + high_y == height - 1); if (mid_x && mid_y) { return SolutionCategory.CENTER; } else if (mid_x) { return SolutionCategory.MID_X; } else if (mid_y) { return SolutionCategory.MID_Y; } break; } } return SolutionCategory.UPPER_LEFT; }
java
{ "resource": "" }
q160113
Pentomino.initializePieces
train
protected void initializePieces() { pieces.add(new Piece("x", " x /xxx/ x ", false, oneRotation)); pieces.add(new Piece("v", "x /x /xxx", false, fourRotations)); pieces.add(new Piece("t", "xxx/ x / x ", false, fourRotations)); pieces.add(new Piece("w", " x/ xx/xx ", false, fourRotations)); pieces.add(new Piece("u", "x x/xxx", false, fourRotations)); pieces.add(new Piece("i", "xxxxx", false, twoRotations)); pieces.add(new Piece("f", " xx/xx / x ", true, fourRotations)); pieces.add(new Piece("p", "xx/xx/x ", true, fourRotations)); pieces.add(new Piece("z", "xx / x / xx", true, twoRotations)); pieces.add(new Piece("n", "xx / xxx", true, fourRotations)); pieces.add(new Piece("y", " x /xxxx", true, fourRotations)); pieces.add(new Piece("l", " x/xxxx", true, fourRotations)); }
java
{ "resource": "" }
q160114
Pentomino.generateRows
train
private static void generateRows(DancingLinks dancer, Piece piece, int width, int height, boolean flip, boolean[] row, boolean upperLeft) { // for each rotation int[] rotations = piece.getRotations(); for(int rotIndex = 0; rotIndex < rotations.length; ++rotIndex) { // get the shape boolean[][] shape = piece.getShape(flip, rotations[rotIndex]); // find all of the valid offsets for(int x=0; x < width; ++x) { for(int y=0; y < height; ++y) { if (y + shape.length <= height && x + shape[0].length <= width && (!upperLeft || (isSide(x, shape[0].length, width) && isSide(y, shape.length, height)))) { // clear the columns related to the points on the board for(int idx=0; idx < width * height; ++idx) { row[idx] = false; } // mark the shape for(int subY=0; subY < shape.length; ++subY) { for(int subX=0; subX < shape[0].length; ++subX) { row[(y + subY) * width + x + subX] = shape[subY][subX]; } } dancer.addRow(row); } } } } }
java
{ "resource": "" }
q160115
Pentomino.main
train
public static void main(String[] args) { int width = 6; int height = 10; Pentomino model = new Pentomino(width, height); List splits = model.getSplits(2); for(Iterator splitItr=splits.iterator(); splitItr.hasNext(); ) { int[] choices = (int[]) splitItr.next(); System.out.print("split:"); for(int i=0; i < choices.length; ++i) { System.out.print(" " + choices[i]); } System.out.println(); System.out.println(model.solve(choices) + " solutions found."); } }
java
{ "resource": "" }
q160116
DBOutputFormat.constructQuery
train
protected String constructQuery(String table, String[] fieldNames) { if(fieldNames == null) { throw new IllegalArgumentException("Field names may not be null"); } StringBuilder query = new StringBuilder(); query.append("INSERT INTO ").append(table); if (fieldNames.length > 0 && fieldNames[0] != null) { query.append(" ("); for (int i = 0; i < fieldNames.length; i++) { query.append(fieldNames[i]); if (i != fieldNames.length - 1) { query.append(","); } } query.append(")"); } query.append(" VALUES ("); for (int i = 0; i < fieldNames.length; i++) { query.append("?"); if(i != fieldNames.length - 1) { query.append(","); } } query.append(");"); return query.toString(); }
java
{ "resource": "" }
q160117
DBOutputFormat.setOutput
train
public static void setOutput(JobConf job, String tableName, String... fieldNames) { job.setOutputFormat(DBOutputFormat.class); job.setReduceSpeculativeExecution(false); DBConfiguration dbConf = new DBConfiguration(job); dbConf.setOutputTableName(tableName); dbConf.setOutputFieldNames(fieldNames); }
java
{ "resource": "" }
q160118
NetworkTopology.getRacks
train
public List<String> getRacks() { netlock.readLock().lock(); try { return new ArrayList<String>(racks); } finally { netlock.readLock().unlock(); } }
java
{ "resource": "" }
q160119
NetworkTopology.add
train
public void add(Node node) { if (node==null) return; if( node instanceof InnerNode ) { throw new IllegalArgumentException( "Not allow to add an inner node: "+NodeBase.getPath(node)); } netlock.writeLock().lock(); try { Node rack = getNode(node.getNetworkLocation()); if (rack != null && !(rack instanceof InnerNode)) { throw new IllegalArgumentException("Unexpected data node " + node.toString() + " at an illegal network location"); } if (clusterMap.add(node)) { LOG.info("Adding a new node: "+NodeBase.getPath(node)); if (rack == null) { String rackName = node.getNetworkLocation(); if (!racks.contains(rackName)) { racks.add(rackName); } else { LOG.error("Discrepancy between network topology and list of racks. " + "New rack was already in the list of racks: " + rackName); } } } LOG.debug("NetworkTopology became:\n" + this.toString()); } finally { netlock.writeLock().unlock(); } }
java
{ "resource": "" }
q160120
NetworkTopology.remove
train
public void remove(Node node) { if (node==null) return; if( node instanceof InnerNode ) { throw new IllegalArgumentException( "Not allow to remove an inner node: "+NodeBase.getPath(node)); } LOG.info("Removing a node: "+NodeBase.getPath(node)); netlock.writeLock().lock(); try { String currentRackName = node.getNetworkLocation(); if (clusterMap.remove(node)) { InnerNode rack = (InnerNode)getNode(node.getNetworkLocation()); if (rack == null) { if (!racks.remove(currentRackName)) { LOG.error("Discrepancy between network topology and list of racks. " + "Removed rack " + currentRackName + " was not in the rack list."); } } } LOG.debug("NetworkTopology became:\n" + this.toString()); } finally { netlock.writeLock().unlock(); } }
java
{ "resource": "" }
q160121
NetworkTopology.getNode
train
public Node getNode(String loc) { netlock.readLock().lock(); try { loc = NodeBase.normalize(loc); if (!NodeBase.ROOT.equals(loc)) loc = loc.substring(1); return clusterMap.getLoc(loc); } finally { netlock.readLock().unlock(); } }
java
{ "resource": "" }
q160122
NetworkTopology.getDatanodesInRack
train
public List<Node> getDatanodesInRack(String loc) { netlock.readLock().lock(); try { loc = NodeBase.normalize(loc); if (!NodeBase.ROOT.equals(loc)) loc = loc.substring(1); InnerNode rack = (InnerNode) clusterMap.getLoc(loc); if (rack == null) return null; return new ArrayList<Node>(rack.getChildren()); } finally { netlock.readLock().unlock(); } }
java
{ "resource": "" }
q160123
NetworkTopology.getAllRacks
train
public Set<String> getAllRacks() { netlock.readLock().lock(); try { Set<String> result = new HashSet<String>(this.racks); result.addAll(this.masterRacksSet); return result; } finally { netlock.readLock().unlock(); } }
java
{ "resource": "" }
q160124
NetworkTopology.getDistance
train
public int getDistance(Node node1, Node node2) { if (node1 == node2) { return 0; } Node n1=node1, n2=node2; int dis = 0; netlock.readLock().lock(); try { int level1=node1.getLevel(), level2=node2.getLevel(); while(n1!=null && level1>level2) { n1 = n1.getParent(); level1--; dis++; } while(n2!=null && level2>level1) { n2 = n2.getParent(); level2--; dis++; } while(n1!=null && n2!=null && n1.getParent()!=n2.getParent()) { n1=n1.getParent(); n2=n2.getParent(); dis+=2; } } finally { netlock.readLock().unlock(); } if (n1==null) { LOG.warn("The cluster does not contain node: "+NodeBase.getPath(node1)); return Integer.MAX_VALUE; } if (n2==null) { LOG.warn("The cluster does not contain node: "+NodeBase.getPath(node2)); return Integer.MAX_VALUE; } return dis+2; }
java
{ "resource": "" }
q160125
NetworkTopology.isOnSameRack
train
public boolean isOnSameRack( Node node1, Node node2) { if (node1 == null || node2 == null) { return false; } netlock.readLock().lock(); try { return node1.getParent()==node2.getParent(); } finally { netlock.readLock().unlock(); } }
java
{ "resource": "" }
q160126
NetworkTopology.chooseRack
train
public String chooseRack(Set<String> excludedRacks) { String chosenRack = null; HashSet<Integer> chosenIndexes = new HashSet<Integer>(); netlock.readLock().lock(); try { int totalRacks = getNumOfRacks(); if (totalRacks - excludedRacks.size() <= 0) return null; // all racks are excluded while (true) { int rackIndex; do { rackIndex = r.nextInt(totalRacks); } while (chosenIndexes.contains(rackIndex)); chosenIndexes.add(rackIndex); chosenRack = racks.get(rackIndex); // if no excluded nodes are specified or the chosen node is not excluded // return it if (excludedRacks == null || (!excludedRacks.contains(chosenRack))) { return chosenRack; } // for sanity check if we used all indexes if (chosenIndexes.size() == totalRacks) { return null; } } } finally { netlock.readLock().unlock(); } }
java
{ "resource": "" }
q160127
Mapper.run
train
public void run(Context context) throws IOException, InterruptedException { setup(context); while (context.nextKeyValue()) { map(context.getCurrentKey(), context.getCurrentValue(), context); } cleanup(context); }
java
{ "resource": "" }
q160128
StripeReader.getBlockIdInFile
train
private static int getBlockIdInFile(DistributedFileSystem srcFs, Path srcPath, long blockId) throws IOException { FileStatus srcStat = srcFs.getFileStatus(srcPath); LocatedBlocks lbs = srcFs.getClient().getLocatedBlocks(srcPath.toUri().getPath(), 0, srcStat.getLen()); int i = 0; LOG.info("Look for block " + blockId + " in file " + srcPath); for (LocatedBlock lb: lbs.getLocatedBlocks()) { if (lb.getBlock().getBlockId() == blockId) { return i; } i++; } return -1; }
java
{ "resource": "" }
q160129
StripeReader.getBlockLocation
train
public static LocationPair getBlockLocation(Codec codec, FileSystem srcFs, Path srcFile, int blockIdxInFile, Configuration conf, List<FileStatus> lfs) throws IOException { int stripeIdx = 0; int blockIdxInStripe = 0; int blockIdx = blockIdxInFile; if (codec.isDirRaid) { Path parentPath = srcFile.getParent(); if (lfs == null) { lfs = RaidNode.listDirectoryRaidFileStatus(conf, srcFs, parentPath); } if (lfs == null) { throw new IOException("Couldn't list files under " + parentPath); } int blockNum = 0; Path qSrcFile = srcFs.makeQualified(srcFile); for (FileStatus fsStat: lfs) { if (!fsStat.getPath().equals(qSrcFile)) { blockNum += RaidNode.getNumBlocks(fsStat); } else { blockNum += blockIdxInFile; break; } } blockIdx = blockNum; } stripeIdx = blockIdx / codec.stripeLength; blockIdxInStripe = blockIdx % codec.stripeLength; return new LocationPair(stripeIdx, blockIdxInStripe, lfs); }
java
{ "resource": "" }
q160130
ServerView.createPartControl
train
@Override public void createPartControl(Composite parent) { Tree main = new Tree(parent, SWT.SINGLE | SWT.FULL_SELECTION | SWT.H_SCROLL | SWT.V_SCROLL); main.setHeaderVisible(true); main.setLinesVisible(false); main.setLayoutData(new GridData(GridData.FILL_BOTH)); TreeColumn serverCol = new TreeColumn(main, SWT.SINGLE); serverCol.setText("Location"); serverCol.setWidth(300); serverCol.setResizable(true); TreeColumn locationCol = new TreeColumn(main, SWT.SINGLE); locationCol.setText("Master node"); locationCol.setWidth(185); locationCol.setResizable(true); TreeColumn stateCol = new TreeColumn(main, SWT.SINGLE); stateCol.setText("State"); stateCol.setWidth(95); stateCol.setResizable(true); TreeColumn statusCol = new TreeColumn(main, SWT.SINGLE); statusCol.setText("Status"); statusCol.setWidth(300); statusCol.setResizable(true); viewer = new TreeViewer(main); viewer.setContentProvider(this); viewer.setLabelProvider(this); viewer.setInput(CONTENT_ROOT); // don't care getViewSite().setSelectionProvider(viewer); getViewSite().getActionBars().setGlobalActionHandler( ActionFactory.DELETE.getId(), deleteAction); getViewSite().getActionBars().getToolBarManager().add(editServerAction); getViewSite().getActionBars().getToolBarManager().add(newLocationAction); createActions(); createContextMenu(); }
java
{ "resource": "" }
q160131
WrappedRecordReader.skip
train
public void skip(K key) throws IOException { if (hasNext()) { while (cmp.compare(khead, key) <= 0 && next()); } }
java
{ "resource": "" }
q160132
WrappedRecordReader.accept
train
@SuppressWarnings("unchecked") // no static type for the slot this sits in public void accept(CompositeRecordReader.JoinCollector i, K key) throws IOException { vjoin.clear(); if (0 == cmp.compare(key, khead)) { do { vjoin.add(vhead); } while (next() && 0 == cmp.compare(key, khead)); } i.add(id, vjoin); }
java
{ "resource": "" }
q160133
WrappedRecordReader.next
train
public boolean next(K key, U value) throws IOException { if (hasNext()) { WritableUtils.cloneInto(key, khead); WritableUtils.cloneInto(value, vhead); next(); return true; } return false; }
java
{ "resource": "" }
q160134
BlockPlacementPolicy.getInstance
train
public static BlockPlacementPolicy getInstance(Configuration conf, FSClusterStats stats, NetworkTopology clusterMap, HostsFileReader hostsReader, DNSToSwitchMapping dnsToSwitchMapping, FSNamesystem namesystem) { Class<? extends BlockPlacementPolicy> replicatorClass = conf.getClass("dfs.block.replicator.classname", BlockPlacementPolicyDefault.class, BlockPlacementPolicy.class); BlockPlacementPolicy replicator = (BlockPlacementPolicy) ReflectionUtils.newInstance( replicatorClass, conf); replicator.initialize(conf, stats, clusterMap, hostsReader, dnsToSwitchMapping, namesystem); return replicator; }
java
{ "resource": "" }
q160135
SequenceFileInputFilter.setFilterClass
train
public static void setFilterClass(Configuration conf, Class filterClass) { conf.set(FILTER_CLASS, filterClass.getName()); }
java
{ "resource": "" }
q160136
IPCLoggerChannel.createExecutor
train
@VisibleForTesting protected ExecutorService createExecutor() { return Executors.newSingleThreadExecutor( new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat("Logger channel to " + addr) .setUncaughtExceptionHandler( UncaughtExceptionHandlers.systemExit()) .build()); }
java
{ "resource": "" }
q160137
IPCLoggerChannel.heartbeatIfNecessary
train
private void heartbeatIfNecessary() throws IOException { if (lastHeartbeatStopwatch.elapsedMillis() > HEARTBEAT_INTERVAL_MILLIS || !lastHeartbeatStopwatch.isRunning()) { try { getProxy().heartbeat(createReqInfo()); } finally { // Don't send heartbeats more often than the configured interval, // even if they fail. lastHeartbeatStopwatch.reset().start(); } } }
java
{ "resource": "" }
q160138
IPCLoggerChannel.buildURLToFetchImage
train
@Override public URL buildURLToFetchImage(long txid) { Preconditions.checkArgument(txid >= -1, "Invalid segment: %s", txid); Preconditions.checkState(httpPort != -1, "HTTP port not set yet"); try { // for now we disable throttling for image downloads String path = GetJournalImageServlet.buildPath(journalId, txid, nsInfo, true); return new URL("http", addr.getAddress().getHostAddress(), httpPort, path.toString()); } catch (MalformedURLException e) { // should never get here. throw new IllegalStateException(e); } }
java
{ "resource": "" }
q160139
MRAsyncDiskService.moveAndDeleteRelativePath
train
public boolean moveAndDeleteRelativePath(String volume, String pathName) throws IOException { volume = normalizePath(volume); // Move the file right now, so that it can be deleted later String newPathName = format.format(new Date()) + "_" + uniqueId.getAndIncrement(); newPathName = TOBEDELETED + Path.SEPARATOR_CHAR + newPathName; Path source = new Path(volume, pathName); Path target = new Path(volume, newPathName); try { if (!localFileSystem.rename(source, target)) { // If the source does not exists, return false. // This is necessary because rename can return false if the source // does not exists. if (!localFileSystem.exists(source)) { return false; } // Try to recreate the parent directory just in case it gets deleted. if (!localFileSystem.mkdirs(new Path(volume, TOBEDELETED))) { throw new IOException("Cannot create " + TOBEDELETED + " under " + volume); } // Try rename again. If it fails, return false. if (!localFileSystem.rename(source, target)) { throw new IOException("Cannot rename " + source + " to " + target); } } } catch (FileNotFoundException e) { // Return false in case that the file is not found. return false; } DeleteTask task = new DeleteTask(volume, pathName, newPathName); execute(volume, task); return true; }
java
{ "resource": "" }
q160140
MRAsyncDiskService.moveAndDeleteFromEachVolume
train
public boolean moveAndDeleteFromEachVolume(String pathName) throws IOException { boolean result = true; for (int i = 0; i < volumes.length; i++) { result = result && moveAndDeleteRelativePath(volumes[i], pathName); } return result; }
java
{ "resource": "" }
q160141
MRAsyncDiskService.getRelativePathName
train
private static String getRelativePathName(String absolutePathName, String volume) { absolutePathName = normalizePath(absolutePathName); // Get the file names if (!absolutePathName.startsWith(volume)) { return null; } // Get rid of the volume prefix String fileName = absolutePathName.substring(volume.length()); if (fileName.charAt(0) == Path.SEPARATOR_CHAR) { fileName = fileName.substring(1); } return fileName; }
java
{ "resource": "" }
q160142
MRAsyncDiskService.moveAndDeleteAbsolutePath
train
public boolean moveAndDeleteAbsolutePath(String absolutePathName) throws IOException { for (int v = 0; v < volumes.length; v++) { String relative = getRelativePathName(absolutePathName, volumes[v]); if (relative != null) { return moveAndDeleteRelativePath(volumes[v], relative); } } throw new IOException("Cannot delete " + absolutePathName + " because it's outside of all volumes."); }
java
{ "resource": "" }
q160143
GetJournalImageServlet.buildPath
train
public static String buildPath(String journalId, long txid, NamespaceInfo nsInfo, boolean throttle) { StringBuilder path = new StringBuilder("/getImage?getimage=1&"); try { path.append(JOURNAL_ID_PARAM).append("=") .append(URLEncoder.encode(journalId, "UTF-8")); path.append("&" + TXID_PARAM).append("=") .append(txid); path.append("&" + THROTTLE_PARAM).append("=") .append(throttle); path.append("&" + STORAGEINFO_PARAM).append("=") .append(URLEncoder.encode(nsInfo.toColonSeparatedString(), "UTF-8")); } catch (UnsupportedEncodingException e) { // Never get here -- everyone supports UTF-8 throw new RuntimeException(e); } return path.toString(); }
java
{ "resource": "" }
q160144
GenericWritable.set
train
public void set(Writable obj) { instance = obj; Class<? extends Writable> instanceClazz = instance.getClass(); Class<? extends Writable>[] clazzes = getTypes(); for (int i = 0; i < clazzes.length; i++) { Class<? extends Writable> clazz = clazzes[i]; if (clazz.equals(instanceClazz)) { type = (byte) i; return; } } throw new RuntimeException("The type of instance is: " + instance.getClass() + ", which is NOT registered."); }
java
{ "resource": "" }
q160145
FileJournalManager.getEditLogManifest
train
public RemoteEditLogManifest getEditLogManifest(long firstTxId) throws IOException { File currentDir = sd.getCurrentDir(); List<EditLogFile> allLogFiles = matchEditLogs( FileUtil.listFiles(currentDir)); if (LOG.isDebugEnabled()) { LOG.debug(allLogFiles); } List<RemoteEditLog> ret = new ArrayList<RemoteEditLog>( allLogFiles.size()); for (EditLogFile elf : allLogFiles) { if (elf.isCorrupt()) continue; if (elf.getFirstTxId() >= firstTxId) { ret.add(new RemoteEditLog(elf.firstTxId, elf.lastTxId, elf.isInProgress)); } else if ((firstTxId > elf.getFirstTxId()) && (firstTxId <= elf.getLastTxId())) { throw new IOException("Asked for firstTxId " + firstTxId + " which is in the middle of file " + elf.file); } } Collections.sort(ret); return new RemoteEditLogManifest(ret); }
java
{ "resource": "" }
q160146
FileJournalManager.getLogFiles
train
public List<EditLogFile> getLogFiles(long fromTxId, boolean enforceBoundary) throws IOException { File currentDir = sd.getCurrentDir(); List<EditLogFile> allLogFiles = matchEditLogs(currentDir.listFiles()); List<EditLogFile> logFiles = new ArrayList<EditLogFile>(); for (EditLogFile elf : allLogFiles) { if (enforceBoundary && fromTxId > elf.getFirstTxId() && fromTxId <= elf.getLastTxId()) { throw new IOException("Asked for fromTxId " + fromTxId + " which is in middle of file " + elf.file); } if (fromTxId <= elf.getFirstTxId()) { logFiles.add(elf); } } Collections.sort(logFiles, EditLogFile.COMPARE_BY_START_TXID); return logFiles; }
java
{ "resource": "" }
q160147
FileJournalManager.findMaxTransaction
train
private long findMaxTransaction() throws IOException { for (EditLogFile elf : getLogFiles(0)) { if (elf.isInProgress()) { maxSeenTransaction = Math.max(elf.getFirstTxId(), maxSeenTransaction); } maxSeenTransaction = Math.max(elf.getLastTxId(), maxSeenTransaction); } return maxSeenTransaction; }
java
{ "resource": "" }
q160148
TaskInProgress.init
train
void init(JobID jobId) { this.startTime = JobTracker.getClock().getTime(); this.id = new TaskID(jobId, isMapTask(), partition); this.skipping = startSkipping(); long speculativeDuration; if (isMapTask()) { this.speculativeLag = conf.getMapSpeculativeLag(); speculativeDuration = conf.getMapSpeculativeDuration(); } else { this.speculativeLag = conf.getReduceSpeculativeLag(); speculativeDuration = conf.getReduceSpeculativeDuration(); } // speculate only if 1/(1000 * progress_rate) > speculativeDuration // ie. : // speculate only if progress_rate < 1/(1000 * speculativeDuration) if (speculativeDuration > 0) { this.maxProgressRateForSpeculation = 1.0/(1000.0*speculativeDuration); } else { // disable this check for durations <= 0 this.maxProgressRateForSpeculation = -1.0; } this.useProcessingRateForSpeculation = conf.getBoolean("mapreduce.job.speculative.using.processing.rate", false); }
java
{ "resource": "" }
q160149
TaskInProgress.getDispatchTime
train
public long getDispatchTime(TaskAttemptID taskid){ Long l = dispatchTimeMap.get(taskid); if (l != null) { return l.longValue(); } return 0; }
java
{ "resource": "" }
q160150
TaskInProgress.setDispatchTime
train
public void setDispatchTime(TaskAttemptID taskid, long disTime){ dispatchTimeMap.put(taskid, disTime); this.lastDispatchTime = disTime; }
java
{ "resource": "" }
q160151
TaskInProgress.shouldClose
train
public boolean shouldClose(TaskAttemptID taskid) { /** * If the task hasn't been closed yet, and it belongs to a completed * TaskInProgress close it. * * However, for completed map tasks we do not close the task which * actually was the one responsible for _completing_ the TaskInProgress. */ if (tasksReportedClosed.contains(taskid)) { if (tasksToKill.keySet().contains(taskid)) return true; else return false; } boolean close = false; TaskStatus ts = taskStatuses.get(taskid); if ((ts != null) && ((this.failed) || ((job.getStatus().getRunState() != JobStatus.RUNNING && (job.getStatus().getRunState() != JobStatus.PREP))))) { tasksReportedClosed.add(taskid); close = true; } else if ((completes > 0) && // isComplete() is synchronized! !(isMapTask() && !jobSetup && !jobCleanup && isComplete(taskid))) { tasksReportedClosed.add(taskid); close = true; } else if (isCommitPending(taskid) && !shouldCommit(taskid)) { tasksReportedClosed.add(taskid); close = true; } else { close = tasksToKill.keySet().contains(taskid); } return close; }
java
{ "resource": "" }
q160152
TaskInProgress.generateSingleReport
train
synchronized TaskReport generateSingleReport() { ArrayList<String> diagnostics = new ArrayList<String>(); for (List<String> l : taskDiagnosticData.values()) { diagnostics.addAll(l); } TIPStatus currentStatus = null; if (isRunning() && !isComplete()) { currentStatus = TIPStatus.RUNNING; } else if (isComplete()) { currentStatus = TIPStatus.COMPLETE; } else if (wasKilled()) { currentStatus = TIPStatus.KILLED; } else if (isFailed()) { currentStatus = TIPStatus.FAILED; } else if (!(isComplete() || isRunning() || wasKilled())) { currentStatus = TIPStatus.PENDING; } TaskReport report = new TaskReport (getTIPId(), (float)progress, state, diagnostics.toArray(new String[diagnostics.size()]), currentStatus, execStartTime, execFinishTime, counters); if (currentStatus == TIPStatus.RUNNING) { report.setRunningTaskAttempts(activeTasks.keySet()); } else if (currentStatus == TIPStatus.COMPLETE) { report.setSuccessfulAttempt(getSuccessfulTaskid()); } return report; }
java
{ "resource": "" }
q160153
TaskInProgress.addDiagnosticInfo
train
public void addDiagnosticInfo(TaskAttemptID taskId, String diagInfo) { List<String> diagHistory = taskDiagnosticData.get(taskId); if (diagHistory == null) { diagHistory = new ArrayList<String>(); taskDiagnosticData.put(taskId, diagHistory); } diagHistory.add(diagInfo); }
java
{ "resource": "" }
q160154
TaskInProgress.incompleteSubTask
train
public void incompleteSubTask(TaskAttemptID taskid, JobStatus jobStatus) { // // Note the failure and its location // TaskStatus status = taskStatuses.get(taskid); String trackerName; String trackerHostName = null; TaskStatus.State taskState = TaskStatus.State.FAILED; if (status != null) { trackerName = status.getTaskTracker(); trackerHostName = JobInProgressTraits.convertTrackerNameToHostName(trackerName); // Check if the user manually KILLED/FAILED this task-attempt... Boolean shouldFail = tasksToKill.remove(taskid); if (shouldFail != null) { if (status.getRunState() == TaskStatus.State.FAILED || status.getRunState() == TaskStatus.State.KILLED) { taskState = (shouldFail) ? TaskStatus.State.FAILED : TaskStatus.State.KILLED; } else { taskState = (shouldFail) ? TaskStatus.State.FAILED_UNCLEAN : TaskStatus.State.KILLED_UNCLEAN; } status.setRunState(taskState); addDiagnosticInfo(taskid, "Task has been " + taskState + " by the user" ); } taskState = status.getRunState(); if (taskState != TaskStatus.State.FAILED && taskState != TaskStatus.State.KILLED && taskState != TaskStatus.State.FAILED_UNCLEAN && taskState != TaskStatus.State.KILLED_UNCLEAN) { LOG.info("Task '" + taskid + "' running on '" + trackerName + "' in state: '" + taskState + "' being failed!"); status.setRunState(TaskStatus.State.FAILED); taskState = TaskStatus.State.FAILED; } // tasktracker went down and failed time was not reported. if (0 == status.getFinishTime()){ status.setFinishTime(JobTracker.getClock().getTime()); } } this.activeTasks.remove(taskid); // Since we do not fail completed reduces (whose outputs go to hdfs), we // should note this failure only for completed maps, only if this taskid; // completed this map. however if the job is done, there is no need to // manipulate completed maps if (this.isMapTask() && !jobSetup && !jobCleanup && isComplete(taskid) && jobStatus.getRunState() != JobStatus.SUCCEEDED) { this.completes--; // Reset the successfulTaskId since we don't have a SUCCESSFUL task now resetSuccessfulTaskid(); } // Note that there can be failures of tasks that are hosted on a machine // that has not yet registered with restarted jobtracker // recalculate the counts only if its a genuine failure if (tasks.contains(taskid)) { if (taskState == TaskStatus.State.FAILED) { numTaskFailures++; machinesWhereFailed.add(trackerHostName); if(maxSkipRecords>0) { //skipping feature enabled LOG.debug("TaskInProgress adding" + status.getNextRecordRange()); failedRanges.add(status.getNextRecordRange()); skipping = startSkipping(); } } else if (taskState == TaskStatus.State.KILLED) { numKilledTasks++; } } if (numTaskFailures >= maxTaskAttempts) { LOG.info("TaskInProgress " + getTIPId() + " has failed " + numTaskFailures + " times."); kill(); } }
java
{ "resource": "" }
q160155
TaskInProgress.completed
train
public void completed(TaskAttemptID taskid) { // // Record that this taskid is complete // completedTask(taskid, TaskStatus.State.SUCCEEDED); // Note the successful taskid setSuccessfulTaskid(taskid); // // Now that the TIP is complete, the other speculative // subtasks will be closed when the owning tasktracker // reports in and calls shouldClose() on this object. // this.completes++; this.execFinishTime = JobTracker.getClock().getTime(); recomputeProgress(); }
java
{ "resource": "" }
q160156
TaskInProgress.killTask
train
boolean killTask(TaskAttemptID taskId, boolean shouldFail, String diagnosticInfo) { TaskStatus st = taskStatuses.get(taskId); if(st != null && (st.getRunState() == TaskStatus.State.RUNNING || st.getRunState() == TaskStatus.State.COMMIT_PENDING || st.inTaskCleanupPhase() || st.getRunState() == TaskStatus.State.UNASSIGNED) && tasksToKill.put(taskId, shouldFail) == null ) { addDiagnosticInfo(taskId, diagnosticInfo); LOG.info(diagnosticInfo); return true; } return false; }
java
{ "resource": "" }
q160157
TaskInProgress.canBeSpeculated
train
boolean canBeSpeculated(long currentTime) { if (skipping || !isRunnable() || !isRunning() || completes != 0 || isOnlyCommitPending() || activeTasks.size() > MAX_TASK_EXECS) { if (isMapTask() ? job.shouldLogCannotspeculativeMaps() : job.shouldLogCannotspeculativeReduces()) { LOG.info("Task "+ getTIPId() + " cannot be speculated because of " + "skipping = "+ skipping + " isRunnable() = "+ isRunnable() + " isRunning() = "+ isRunning() + " completes = " + completes + " isOnlyCommitPending() = "+ isOnlyCommitPending() + " activetask-size = "+ activeTasks.size() + " MAX_TASK_EXECS = " + MAX_TASK_EXECS); } return false; } if (isSpeculativeForced()) { return true; } // no speculation for first few seconds if (currentTime - lastDispatchTime < speculativeLag) { if (isMapTask() ? job.shouldLogCannotspeculativeMaps() : job.shouldLogCannotspeculativeReduces()) { LOG.info("Task "+ getTIPId() + " cannot be speculated because of " + "no speculation for first few seconds"); } return false; } // if the task is making progress fast enough to complete within // the acceptable duration allowed for each task - do not speculate if ((maxProgressRateForSpeculation > 0) && (progressRate > maxProgressRateForSpeculation)) { if (isMapTask() ? job.shouldLogCannotspeculativeMaps() : job.shouldLogCannotspeculativeReduces()) { LOG.info("Task "+ getTIPId() +" cannot be speculated because " + "the task progress rate is fast enough to complete." + " maxProgressRateForSpeculation = " + maxProgressRateForSpeculation + " and progressRate = " + progressRate); } return false; } if (isMapTask() ? job.shouldSpeculateAllRemainingMaps() : job.shouldSpeculateAllRemainingReduces()) { if (LOG.isDebugEnabled()) { LOG.debug("Speculate " + getTIPId() + " because the job is almost finished"); } return true; } if (useProcessingRateForSpeculation) { return canBeSpeculatedUsingProcessingRate(currentTime); } else { return canBeSpeculatedUsingProgressRate(currentTime); } }
java
{ "resource": "" }
q160158
TaskInProgress.getTaskToRun
train
public Task getTaskToRun(String taskTracker) { // Create the 'taskid'; do not count the 'killed' tasks against the job! TaskAttemptID taskid = null; if (nextTaskId < (MAX_TASK_EXECS + maxTaskAttempts + numKilledTasks)) { // Make sure that the attempts are unqiue across restarts int attemptId = job.getNumRestarts() * NUM_ATTEMPTS_PER_RESTART + nextTaskId; taskid = new TaskAttemptID( id, attemptId); ++nextTaskId; } else { LOG.warn("Exceeded limit of " + (MAX_TASK_EXECS + maxTaskAttempts) + " (plus " + numKilledTasks + " killed)" + " attempts for the tip '" + getTIPId() + "'"); return null; } //keep track of the last time we started an attempt at this TIP //used to calculate the progress rate of this TIP setDispatchTime(taskid, JobTracker.getClock().getTime()); if (0 == execStartTime){ // assume task starts running now execStartTime = JobTracker.getClock().getTime(); } return addRunningTask(taskid, taskTracker); }
java
{ "resource": "" }
q160159
TaskInProgress.addRunningTask
train
public Task addRunningTask(TaskAttemptID taskid, String taskTracker, boolean taskCleanup) { // 1 slot is enough for taskCleanup task int numSlotsNeeded = taskCleanup ? 1 : numSlotsRequired; // create the task Task t = null; if (isMapTask()) { LOG.debug("attempt " + numTaskFailures + " sending skippedRecords " + failedRanges.getIndicesCount()); String splitClass = null; BytesWritable split; if (!jobSetup && !jobCleanup) { splitClass = rawSplit.getClassName(); split = rawSplit.getBytes(); } else { split = new BytesWritable(); } t = new MapTask(jobFile, taskid, partition, splitClass, split, numSlotsNeeded, job.getUser()); } else { t = new ReduceTask(jobFile, taskid, partition, numMaps, numSlotsNeeded, job.getUser()); } if (jobCleanup) { t.setJobCleanupTask(); } if (jobSetup) { t.setJobSetupTask(); } if (taskCleanup) { t.setTaskCleanupTask(); t.setState(taskStatuses.get(taskid).getRunState()); cleanupTasks.put(taskid, taskTracker); } t.setConf(conf); LOG.debug("Launching task with skipRanges:"+failedRanges.getSkipRanges()); t.setSkipRanges(failedRanges.getSkipRanges()); t.setSkipping(skipping); if(failedRanges.isTestAttempt()) { t.setWriteSkipRecs(false); } if (activeTasks.size() >= 1) { speculativeTaskId = taskid; } else { speculativeTaskId = null; } activeTasks.put(taskid, taskTracker); tasks.add(taskid); // Ask JobTracker to note that the task exists // jobtracker.createTaskEntry(taskid, taskTracker, this); /* // code to find call paths to createTaskEntry StackTraceElement[] stackTraceElements = Thread.currentThread().getStackTrace(); boolean found = false; for (StackTraceElement s: stackTraceElements) { if (s.getMethodName().indexOf("heartbeat") != -1 || s.getMethodName().indexOf("findTask") != -1 || s.getMethodName().indexOf("createAndAddAttempt") != -1 || s.getMethodName().indexOf("processTaskAttempt") != -1) { found = true; break; } } if (!found) { RuntimeException e = new RuntimeException ("calling addRunningTask from outside heartbeat"); LOG.info(StringUtils.stringifyException(e)); throw (e); } */ // check and set the first attempt if (firstTaskId == null) { firstTaskId = taskid; } return t; }
java
{ "resource": "" }
q160160
TaskInProgress.hasRunOnMachine
train
public boolean hasRunOnMachine(String trackerHost, String trackerName) { return this.activeTasks.values().contains(trackerName) || hasFailedOnMachine(trackerHost); }
java
{ "resource": "" }
q160161
TaskInProgress.getSplitNodes
train
public String getSplitNodes() { if (!isMapTask() || jobSetup || jobCleanup) { return ""; } String[] nodes = rawSplit.getLocations(); if (nodes == null || nodes.length == 0) { return ""; } StringBuffer ret = new StringBuffer(nodes[0]); for(int i = 1; i < nodes.length;i++) { ret.append(","); ret.append(nodes[i]); } return ret.toString(); }
java
{ "resource": "" }
q160162
TaskInProgress.updateProgressRate
train
public void updateProgressRate(long currentTime) { double bestProgressRate = 0; for (TaskStatus ts : taskStatuses.values()){ if (ts.getRunState() == TaskStatus.State.RUNNING || ts.getRunState() == TaskStatus.State.SUCCEEDED || ts.getRunState() == TaskStatus.State.COMMIT_PENDING) { double tsProgressRate = ts.getProgress()/Math.max(1, currentTime - getDispatchTime(ts.getTaskID())); if (tsProgressRate > bestProgressRate){ bestProgressRate = tsProgressRate; } } } DataStatistics taskStats = job.getRunningTaskStatistics(isMapTask()); taskStats.updateStatistics(progressRate, bestProgressRate); progressRate = bestProgressRate; }
java
{ "resource": "" }
q160163
TaskInProgress.updateJobStats
train
private void updateJobStats(Phase phase, ProcessingRates oldRates, ProcessingRates newRates) { DataStatistics stats = job.getRunningTaskStatistics(phase); stats.updateStatistics(oldRates.getRate(phase), newRates.getRate(phase)); }
java
{ "resource": "" }
q160164
FairScheduler.getAvailableSlots
train
private int getAvailableSlots(TaskTrackerStatus tts, TaskType type) { return getMaxSlots(tts, type) - occupiedSlotsAfterHeartbeat(tts, type); }
java
{ "resource": "" }
q160165
FairScheduler.occupiedSlotsAfterHeartbeat
train
private int occupiedSlotsAfterHeartbeat( TaskTrackerStatus tts, TaskType type) { int occupied = (type == TaskType.MAP) ? tts.countOccupiedMapSlots() - tts.getMapsReleased() : tts.countOccupiedReduceSlots() - tts.getReducesReleased(); return occupied; }
java
{ "resource": "" }
q160166
FairScheduler.updateLocalityWaitTimes
train
private void updateLocalityWaitTimes(long currentTime) { long timeSinceLastHeartbeat = (lastHeartbeatTime == 0 ? 0 : currentTime - lastHeartbeatTime); lastHeartbeatTime = currentTime; for (JobInfo info: infos.values()) { if (info.skippedAtLastHeartbeat) { info.timeWaitedForLocalMap += timeSinceLastHeartbeat; // We reset the flag so that timeWaitedForLocalMap is increment only // once. It will be increment again if skippedAtLastHeartbeat is set // to true next time. info.skippedAtLastHeartbeat = false; } } }
java
{ "resource": "" }
q160167
FairScheduler.updateLastMapLocalityLevel
train
private void updateLastMapLocalityLevel(JobInProgress job, Task mapTaskLaunched, TaskTrackerStatus tracker) { JobInfo info = infos.get(job); LocalityLevel localityLevel = localManager.taskToLocalityLevel( job, mapTaskLaunched, tracker); info.lastMapLocalityLevel = localityLevel; info.timeWaitedForLocalMap = 0; }
java
{ "resource": "" }
q160168
FairScheduler.update
train
protected void update() { //Making more granual locking so that clusterStatus can be fetched from Jobtracker. ClusterStatus clusterStatus = taskTrackerManager.getClusterStatus(); // Recompute locality delay from JobTracker heartbeat interval if enabled. // This will also lock the JT, so do it outside of a fair scheduler lock. if (autoComputeLocalityDelay) { JobTracker jobTracker = (JobTracker) taskTrackerManager; localityDelayNodeLocal = Math.min(MAX_AUTOCOMPUTED_LOCALITY_DELAY, (long) (1.5 * jobTracker.getNextHeartbeatInterval())); localityDelayRackLocal = localityDelayNodeLocal; } // Got clusterStatus hence acquiring scheduler lock now // Remove non-running jobs synchronized(this){ // Reload allocations file if it hasn't been loaded in a while if (poolMgr.reloadAllocsIfNecessary()) { // Check if the cluster have enough slots for reserving poolMgr.checkMinimumSlotsAvailable(clusterStatus, TaskType.MAP); poolMgr.checkMinimumSlotsAvailable(clusterStatus, TaskType.REDUCE); } List<JobInProgress> toRemove = new ArrayList<JobInProgress>(); for (JobInProgress job: infos.keySet()) { int runState = job.getStatus().getRunState(); if (runState == JobStatus.SUCCEEDED || runState == JobStatus.FAILED || runState == JobStatus.KILLED) { toRemove.add(job); } } for (JobInProgress job: toRemove) { infos.remove(job); poolMgr.removeJob(job); } // Update running jobs with deficits since last update, and compute new // slot allocations, weight, shares and task counts long now = clock.getTime(); long timeDelta = now - lastUpdateTime; updateDeficits(timeDelta); updateRunnability(); updateTaskCounts(); updateWeights(); updateMinAndMaxSlots(); updateFairShares(clusterStatus); if (preemptionEnabled) { updatePreemptionVariables(); } sortJobs(); updatePoolMetrics(); dumpStatus(now); lastUpdateTime = now; } }
java
{ "resource": "" }
q160169
FairScheduler.adjustClusterwideReason
train
private BlockedAdmissionReason adjustClusterwideReason( AdmissionControlData admissionControlData, BlockedAdmissionReason originalReason, String poolName) { BlockedAdmissionReason clusterwideReason = (BlockedAdmissionReason.underClusterwideAdmissionControl( admissionControlData.getSoftTaskLimit(), admissionControlData.getHardTaskLimit(), admissionControlData.getTotalTasks(), !poolMgr.isSystemPool(poolName))); return (clusterwideReason == BlockedAdmissionReason.NONE) ? originalReason : clusterwideReason; }
java
{ "resource": "" }
q160170
FairScheduler.getNotAdmittedJobs
train
synchronized Collection<NotAdmittedJobInfo> getNotAdmittedJobs() { List<NotAdmittedJobInfo> jobInfoList = new ArrayList<NotAdmittedJobInfo>(infos.size()); AdmissionControlData admissionControlData = jobInitializer.getAdmissionControlData(); float averageWaitMsecsPerHardAdmissionJob = jobInitializer.getAverageWaitMsecsPerHardAdmissionJob(); for (Map.Entry<JobInProgress, JobInfo> entry : infos.entrySet()) { JobInProgress job = entry.getKey(); JobInfo jobInfo = entry.getValue(); if (!jobInfo.needsInitializing) { continue; } String poolName = poolMgr.getPoolName(job); // Adjust the not admitted reason with admission control data for // any soft or hard limits BlockedAdmissionReason reason = adjustClusterwideReason( admissionControlData, jobInfo.reason, poolName); jobInfoList.add(new NotAdmittedJobInfo( job.getStartTime(), job.getJobID().toString(), job.getJobConf().getUser(), poolName, job.getPriority().toString(), reason, jobInfo.reasonLimit, jobInfo.actualValue, jobInfo.hardAdmissionPosition, averageWaitMsecsPerHardAdmissionJob)); } return jobInfoList; }
java
{ "resource": "" }
q160171
FairScheduler.getJobNotAdmittedReason
train
private synchronized String getJobNotAdmittedReason(JobInProgress job, JobAdmissionWaitInfo waitInfo) { JobInfo jobInfo = infos.get(job); if (jobInfo == null) { return "Unknown, can't find job"; } AdmissionControlData admissionControlData = jobInitializer.getAdmissionControlData(); return NotAdmittedJobInfo.getReasoning( adjustClusterwideReason( admissionControlData, jobInfo.reason, poolMgr.getPoolName(job)), jobInfo.reasonLimit, jobInfo.actualValue, jobInfo.hardAdmissionPosition, waitInfo); }
java
{ "resource": "" }
q160172
FairScheduler.dumpStatus
train
private void dumpStatus(long now) { if (now - lastDumpStatusTime < dumpStatusPeriod) { return; } lastDumpStatusTime = now; logJobStats(infos.keySet(), TaskType.MAP); logJobStats(infos.keySet(), TaskType.REDUCE); dumpSpeculationStatus(now); }
java
{ "resource": "" }
q160173
FairScheduler.slowerButAccurateCountTasks
train
private void slowerButAccurateCountTasks(JobInfo info, JobInProgress job) { // Count maps int totalMaps = job.numMapTasks; int finishedMaps = 0; int runningMaps = 0; int runningMapTips = 0; for (TaskInProgress tip : job.getTasks(org.apache.hadoop.mapreduce.TaskType.MAP)) { if (tip.isComplete()) { finishedMaps += 1; } else if (tip.isRunning()) { runningMaps += tip.getActiveTasks().size(); runningMapTips += 1; } } info.totalInitedTasks = job.numMapTasks + job.numReduceTasks; info.runningMaps = runningMaps; infosummary.totalRunningMaps += runningMaps; poolMgr.incRunningTasks(info.poolName, TaskType.MAP, runningMaps); info.neededSpeculativeMaps = taskSelector.neededSpeculativeMaps(job); info.neededMaps = (totalMaps - runningMapTips - finishedMaps + info.neededSpeculativeMaps); // Count reduces int totalReduces = job.numReduceTasks; int finishedReduces = 0; int runningReduces = 0; int runningReduceTips = 0; for (TaskInProgress tip : job.getTasks(org.apache.hadoop.mapreduce.TaskType.REDUCE)) { if (tip.isComplete()) { finishedReduces += 1; } else if (tip.isRunning()) { runningReduces += tip.getActiveTasks().size(); runningReduceTips += 1; } } info.runningReduces = runningReduces; infosummary.totalRunningReduces += runningReduces; poolMgr.incRunningTasks(info.poolName, TaskType.REDUCE, runningReduces); if (job.scheduleReduces()) { info.neededSpeculativeReduces = taskSelector.neededSpeculativeReduces(job); info.neededReduces = (totalReduces - runningReduceTips - finishedReduces + info.neededSpeculativeReduces); } else { info.neededReduces = 0; } }
java
{ "resource": "" }
q160174
FairScheduler.fifoWeightAdjust
train
private void fifoWeightAdjust(Pool pool) { List<JobInProgress> jobs = new ArrayList<JobInProgress>(); jobs.addAll(pool.getJobs()); Collections.sort(jobs, fifoComparator); double factor = 1.0; for (JobInProgress job : jobs) { JobInfo info = infos.get(job); if (info == null) { throw new IllegalStateException("Couldn't find job " + job.jobId + " in pool " + pool.getName()); } info.mapWeight *= factor; info.reduceWeight *= factor; factor *= FIFO_WEIGHT_DECAY_FACTOR; } }
java
{ "resource": "" }
q160175
FairScheduler.incSlotLimit
train
private boolean incSlotLimit(JobInfo info, TaskType type, LimitType limit) { switch (limit) { case MIN: if (type == TaskType.MAP) { if (info.minMaps < runnableTasks(info, type)) { info.minMaps += 1; return true; } } else { if (info.minReduces < runnableTasks(info, type)) { info.minReduces += 1; return true; } } return false; case MAX: if (type == TaskType.MAP) { if (info.maxMaps < runnableTasks(info, type)) { info.maxMaps += 1; return true; } } else { if (info.maxReduces < runnableTasks(info, type)) { info.maxReduces += 1; return true; } } return false; } // Never come here return false; }
java
{ "resource": "" }
q160176
FairScheduler.computeShare
train
private double computeShare(JobInfo info, double w2sRatio, TaskType type, boolean considerMinMax) { if (!isRunnable(info)) { return 0; } double share = type == TaskType.MAP ? info.mapWeight : info.reduceWeight; share *= w2sRatio; if (considerMinMax) { int minSlots = type == TaskType.MAP ? info.minMaps : info.minReduces; share = Math.max(share, minSlots); int maxSlots = type == TaskType.MAP ? info.maxMaps : info.maxReduces; share = Math.min(share, maxSlots); } share = Math.min(share, runnableTasks(info, type)); return share; }
java
{ "resource": "" }
q160177
FairScheduler.neededTasks
train
protected int neededTasks(JobInfo info, TaskType taskType) { if (info == null) return 0; return taskType == TaskType.MAP ? info.neededMaps : info.neededReduces; }
java
{ "resource": "" }
q160178
FairScheduler.isStarvedForMinShare
train
boolean isStarvedForMinShare(JobInfo info, TaskType taskType) { float starvingThreshold = (float) (minTasks(info, taskType) * 0.9); return runningTasks(info, taskType) < starvingThreshold; }
java
{ "resource": "" }
q160179
FairScheduler.preemptTasksIfNecessary
train
protected void preemptTasksIfNecessary() { if (!preemptionEnabled || jobComparator == JobComparator.FIFO) return; long curTime = clock.getTime(); if (curTime - lastPreemptCheckTime < preemptionInterval) return; lastPreemptCheckTime = curTime; int currentMaxPreemptibleTasks = maxPreemptibleTasks; boolean currentCountNonPreemptibleTasks = countNonPreemptibleTasks; // Acquire locks on both the JobTracker (task tracker manager) and this // because we might need to call some JobTracker methods (killTask). synchronized (taskTrackerManager) { synchronized (this) { List<JobInProgress> jobs = new ArrayList<JobInProgress>(infos.keySet()); for (TaskType type: MAP_AND_REDUCE) { int tasksToPreempt = 0; for (JobInProgress job: jobs) { if (!currentCountNonPreemptibleTasks && !canBePreempted(job)) { continue; } tasksToPreempt += tasksToPreempt(job, type, curTime); } if (tasksToPreempt > 0) { // for debugging purposes log the jobs by scheduling priority // to check whether preemption and scheduling are in sync. logJobStats(sortedJobsByMapNeed, TaskType.MAP); logJobStats(sortedJobsByReduceNeed, TaskType.REDUCE); } // Possibly adjust the maximum number of tasks to preempt. int actualTasksToPreempt = tasksToPreempt; if ((currentMaxPreemptibleTasks >= 0) && (tasksToPreempt > currentMaxPreemptibleTasks)) { actualTasksToPreempt = currentMaxPreemptibleTasks; } LOG.info("preemptTasksIfNecessary: Should preempt " + tasksToPreempt + " " + type + " tasks, actually preempting " + actualTasksToPreempt + " tasks, countNonPreemptibleTasks = " + countNonPreemptibleTasks); // Actually preempt the tasks. The policy for this is to pick // tasks from jobs that are above their min share and have very // negative deficits (meaning they've been over-scheduled). // However, we also want to minimize the amount of computation // wasted by preemption, so prefer tasks that started recently. preemptTasks(jobs, type, tasksToPreempt); } } } }
java
{ "resource": "" }
q160180
DatanodeInfo.getDatanodeReport
train
public String getDatanodeReport() { StringBuffer buffer = new StringBuffer(); long c = getCapacity(); long r = getRemaining(); long u = getDfsUsed(); long nonDFSUsed = getNonDfsUsed(); float usedPercent = getDfsUsedPercent(); float remainingPercent = getRemainingPercent(); buffer.append("Name: "+name+"\n"); if (!NetworkTopology.DEFAULT_RACK.equals(location)) { buffer.append("Rack: "+location+"\n"); } buffer.append("Decommission Status : "); if (isDecommissioned()) { buffer.append("Decommissioned\n"); } else if (isDecommissionInProgress()) { buffer.append("Decommission in progress\n"); } else { buffer.append("Normal\n"); } buffer.append("Configured Capacity: "+c+" ("+StringUtils.byteDesc(c)+")"+"\n"); buffer.append("DFS Used: "+u+" ("+StringUtils.byteDesc(u)+")"+"\n"); buffer.append("Non DFS Used: "+nonDFSUsed+" ("+StringUtils.byteDesc(nonDFSUsed)+")"+"\n"); buffer.append("DFS Remaining: " +r+ "("+StringUtils.byteDesc(r)+")"+"\n"); buffer.append("DFS Used%: "+StringUtils.limitDecimalTo2(usedPercent)+"%\n"); buffer.append("DFS Remaining%: "+StringUtils.limitDecimalTo2(remainingPercent)+"%\n"); buffer.append("Last contact: "+new Date(lastUpdate)+"\n"); return buffer.toString(); }
java
{ "resource": "" }
q160181
DatanodeInfo.dumpDatanode
train
public String dumpDatanode() { StringBuffer buffer = new StringBuffer(); long c = getCapacity(); long r = getRemaining(); long u = getDfsUsed(); buffer.append(name); if (!NetworkTopology.DEFAULT_RACK.equals(location)) { buffer.append(" "+location); } if (isDecommissioned()) { buffer.append(" DD"); } else if (isDecommissionInProgress()) { buffer.append(" DP"); } else { buffer.append(" IN"); } buffer.append(" " + c + "(" + StringUtils.byteDesc(c)+")"); buffer.append(" " + u + "(" + StringUtils.byteDesc(u)+")"); buffer.append(" " + StringUtils.limitDecimalTo2(((1.0*u)/c)*100)+"%"); buffer.append(" " + r + "(" + StringUtils.byteDesc(r)+")"); buffer.append(" " + new Date(lastUpdate)); return buffer.toString(); }
java
{ "resource": "" }
q160182
DatanodeInfo.setAdminState
train
protected void setAdminState(AdminStates newState) { if (newState == AdminStates.NORMAL) { adminState = null; } else { adminState = newState; } }
java
{ "resource": "" }
q160183
Balancer.parse
train
private void parse(String[] args) { Options cliOpts = setupOptions(); BasicParser parser = new BasicParser(); CommandLine cl = null; try { try { cl = parser.parse(cliOpts, args); } catch (ParseException ex) { throw new IllegalArgumentException("args = " + Arrays.toString(args)); } int newThreshold = Integer.parseInt(cl.getOptionValue("threshold", "10")); int iterationTime = Integer.parseInt(cl.getOptionValue("iter_len", String.valueOf(maxIterationTime/(60 * 1000)))); maxConcurrentMoves = Integer.parseInt(cl.getOptionValue("node_par_moves", String.valueOf(MAX_NUM_CONCURRENT_MOVES))); moveThreads = Integer.parseInt(cl.getOptionValue("par_moves", String.valueOf(MOVER_THREAD_POOL_SIZE))); maxIterationTime = iterationTime * 60 * 1000L; threshold = checkThreshold(newThreshold); System.out.println("Running with threshold of " + threshold + " and iteration time of " + maxIterationTime + " milliseconds"); } catch (RuntimeException e) { printUsage(cliOpts); throw e; } }
java
{ "resource": "" }
q160184
CoronaReleaseManager.getLastTimeStamp
train
private long getLastTimeStamp() { long result = -1; if (coronaReleaseFileCheck != null && !coronaReleaseFileCheck.isEmpty()) { result = getLastTimeStamp(new Path(releasePath, coronaReleaseFileCheck)); if (result > 0) { return result; } } return getLastTimeStamp(releasePath); }
java
{ "resource": "" }
q160185
CoronaReleaseManager.getLastTimeStamp
train
private long getLastTimeStamp(Path pathToCheck) { long lastTimeStamp = -1; long tmpTimeStamp = -1; try { for (FileStatus fileStat: fs.listStatus(pathToCheck)) { Path srcPath = fileStat.getPath(); if (!fileStat.isDir()) { boolean checkFlag = true; if (release_pattern != null) { // just need to check the files that match the pattern Matcher m = release_pattern.matcher(srcPath.toString()); if (!m.find()) { checkFlag = false; } } if (checkFlag) { tmpTimeStamp = fileStat.getModificationTime(); } else { continue; } } else { tmpTimeStamp = getLastTimeStamp(srcPath); } if (tmpTimeStamp > lastTimeStamp) { lastTimeStamp = tmpTimeStamp; } } } catch (IOException ioe) { LOG.error("IOException when checking timestamp ", ioe); } return lastTimeStamp; }
java
{ "resource": "" }
q160186
CoronaReleaseManager.copyRelease
train
private boolean copyRelease(Path src, Path dest, boolean isTop, boolean isForced) { try { if (!fs.exists(dest)) { if (!fs.mkdirs(dest)) { LOG.error("Unable to make dir " + dest.toString()); return false; } } else { if (isTop && !isForced) { Path donePath = new Path(dest, RELEASE_TAG_FILE); if (fs.exists(donePath)) { LOG.info(donePath + " exists. There is no need to copy again"); return true; } } } for (FileStatus fileStat: fs.listStatus(src)) { Path srcPath = fileStat.getPath(); if (!fileStat.isDir()) { boolean copyFlag = true; if (release_pattern != null) { Matcher m = release_pattern.matcher(srcPath.toString()); if (!m.find()) { copyFlag = false; } } if (copyFlag) { Path destPath = new Path(dest, srcPath.getName()); fs.copyFromLocalFile(srcPath, destPath); } } else { Path destPath = new Path(dest, srcPath.getName()); if (!copyRelease(srcPath, destPath, false, isForced)) { LOG.error("Unable to create link for " + srcPath.toString() + " as " + destPath.toString()); return false; } } } if (isTop) { // create the tag file Path donePath = new Path(dest, RELEASE_TAG_FILE); FSDataOutputStream fos = fs.create(donePath); fos.close(); } } catch (IOException ioe) { LOG.error("IOException when link dir ", ioe); return false; } return true; }
java
{ "resource": "" }
q160187
ClusterManagerMetrics.sessionEnd
train
public void sessionEnd(SessionStatus finishState) { if (sessionStatusToMetrics.containsKey(finishState)) { sessionStatusToMetrics.get(finishState).inc(); } else { throw new IllegalArgumentException("Invalid end state " + finishState); } }
java
{ "resource": "" }
q160188
ClusterManagerMetrics.registerUpdater
train
public void registerUpdater(Scheduler scheduler, SessionNotifier sessionNotifier) { this.scheduler = scheduler; this.sessionNotifier = sessionNotifier; context.registerUpdater(this); }
java
{ "resource": "" }
q160189
ClusterManagerMetrics.createSessionStatusToMetricsMap
train
private Map<SessionStatus, MetricsTimeVaryingInt> createSessionStatusToMetricsMap() { Map<SessionStatus, MetricsTimeVaryingInt> m = new HashMap<SessionStatus, MetricsTimeVaryingInt>(); for (SessionStatus endState : SESSION_END_STATES) { String name = endState.toString().toLowerCase() + "_sessions"; m.put(endState, new MetricsTimeVaryingInt(name, registry)); } return m; }
java
{ "resource": "" }
q160190
ClusterManagerMetrics.createTypeToCountMap
train
private Map<ResourceType, MetricsIntValue> createTypeToCountMap( Collection<ResourceType> resourceTypes, String actionType) { Map<ResourceType, MetricsIntValue> m = new HashMap<ResourceType, MetricsIntValue>(); for (ResourceType t : resourceTypes) { String name = (actionType + "_" + t).toLowerCase(); MetricsIntValue value = new MetricsIntValue(name, registry); m.put(t, value); } return m; }
java
{ "resource": "" }
q160191
ClusterManagerMetrics.createTypeToResourceCountMap
train
private Map<ResourceType, MetricsTimeVaryingLong> createTypeToResourceCountMap( Collection<ResourceType> resourceTypes, String actionType) { Map<ResourceType, MetricsTimeVaryingLong> m = new HashMap<ResourceType, MetricsTimeVaryingLong>(); for (ResourceType t : resourceTypes) { String name = (actionType + "_" + t).toLowerCase(); MetricsTimeVaryingLong value = new MetricsTimeVaryingLong(name, registry); m.put(t, value); } return m; }
java
{ "resource": "" }
q160192
ZombieJob.makeUpRuntime
train
private long makeUpRuntime(List<LoggedDiscreteCDF> mapAttemptCDFs) { int total = 0; for (LoggedDiscreteCDF cdf : mapAttemptCDFs) { total += cdf.getNumberValues(); } if (total == 0) { return -1; } int index = random.nextInt(total); for (LoggedDiscreteCDF cdf : mapAttemptCDFs) { if (index >= cdf.getNumberValues()) { index -= cdf.getNumberValues(); } else { if (index < 0) { throw new IllegalStateException("application error"); } return makeUpRuntime(cdf); } } throw new IllegalStateException("not possible to get here"); }
java
{ "resource": "" }
q160193
RccTask.execute
train
public void execute() throws BuildException { if (src == null && filesets.size()==0) { throw new BuildException("There must be a file attribute or a fileset child element"); } if (src != null) { doCompile(src); } Project myProject = getProject(); for (int i = 0; i < filesets.size(); i++) { FileSet fs = filesets.get(i); DirectoryScanner ds = fs.getDirectoryScanner(myProject); File dir = fs.getDir(myProject); String[] srcs = ds.getIncludedFiles(); for (int j = 0; j < srcs.length; j++) { doCompile(new File(dir, srcs[j])); } } }
java
{ "resource": "" }
q160194
VLongWritable.compareTo
train
public int compareTo(Object o) { long thisValue = this.value; long thatValue = ((VLongWritable)o).value; return (thisValue < thatValue ? -1 : (thisValue == thatValue ? 0 : 1)); }
java
{ "resource": "" }
q160195
DistributedPentomino.createInputDirectory
train
private static void createInputDirectory(FileSystem fs, Path dir, Pentomino pent, int depth ) throws IOException { fs.mkdirs(dir); List<int[]> splits = pent.getSplits(depth); PrintStream file = new PrintStream(new BufferedOutputStream (fs.create(new Path(dir, "part1")), 64*1024)); for(int[] prefix: splits) { for(int i=0; i < prefix.length; ++i) { if (i != 0) { file.print(','); } file.print(prefix[i]); } file.print('\n'); } file.close(); }
java
{ "resource": "" }
q160196
CoronaDirectTaskUmbilical.getAddress
train
public static InetSocketAddress getAddress(JobConf conf, String key) { String str = conf.get(key); if (str == null) return null; String hostPortPair[] = str.split(":"); if (hostPortPair.length != 2) return null; return new InetSocketAddress(hostPortPair[0], Integer.parseInt(hostPortPair[1])); }
java
{ "resource": "" }
q160197
CoronaDirectTaskUmbilical.setAddress
train
public static void setAddress(JobConf conf, String key, InetSocketAddress address) { if (address == null) { conf.unset(key); return; } String addrStr = address.getHostName() + ":" + address.getPort(); conf.set(key, addrStr); }
java
{ "resource": "" }
q160198
PoolGroupSchedulable.snapshotConfig
train
private void snapshotConfig() { maximum = configManager.getPoolGroupMaximum(getName(), getType()); minimum = configManager.getPoolGroupMinimum(getName(), getType()); }
java
{ "resource": "" }
q160199
PoolGroupSchedulable.getScheduleQueue
train
public Queue<PoolSchedulable> getScheduleQueue() { if (scheduleQueue == null) { ScheduleComparator sc = configManager.getPoolGroupComparator(getName()); scheduleQueue = createPoolQueue(sc); } return scheduleQueue; }
java
{ "resource": "" }