_id
stringlengths
2
7
title
stringlengths
3
140
partition
stringclasses
3 values
text
stringlengths
73
34.1k
language
stringclasses
1 value
meta_information
dict
q161400
Storage.getFields
train
protected void getFields(Properties props, StorageDirectory sd ) throws IOException { String sv, st, sid, sct; sv = props.getProperty(LAYOUT_VERSION); st = props.getProperty(STORAGE_TYPE); sid = props.getProperty(NAMESPACE_ID); sct = props.getProperty(CHECK_TIME); if (sv == null || st == null || sid == null || sct == null) throw new InconsistentFSStateException(sd.root, "file " + STORAGE_FILE_VERSION + " is invalid."); int rv = Integer.parseInt(sv); NodeType rt = NodeType.valueOf(st); int rid = Integer.parseInt(sid); long rct = Long.parseLong(sct); if (!storageType.equals(rt) || !((namespaceID == 0) || (rid == 0) || namespaceID == rid)) throw new InconsistentFSStateException(sd.root, "is incompatible with others. " + " namespaceID is " + namespaceID + " and rid is " + rid + "," + " storage type is " + storageType + " but rt is " + rt); if (rv < FSConstants.LAYOUT_VERSION) // future version throw new IncorrectVersionException(rv, "storage directory " + sd.root.getCanonicalPath()); layoutVersion = rv; storageType = rt; namespaceID = rid; cTime = rct; }
java
{ "resource": "" }
q161401
Storage.writeAll
train
public void writeAll() throws IOException { this.layoutVersion = FSConstants.LAYOUT_VERSION; for (Iterator<StorageDirectory> it = storageDirs.iterator(); it.hasNext();) { it.next().write(); } }
java
{ "resource": "" }
q161402
Storage.unlockAll
train
public void unlockAll() throws IOException { for (Iterator<StorageDirectory> it = storageDirs.iterator(); it.hasNext();) { it.next().unlock(); } }
java
{ "resource": "" }
q161403
Storage.isLockSupported
train
public boolean isLockSupported(int idx) throws IOException { StorageDirectory sd = storageDirs.get(idx); FileLock firstLock = null; FileLock secondLock = null; try { firstLock = sd.lock; if(firstLock == null) { firstLock = sd.tryLock(); if(firstLock == null) return true; } secondLock = sd.tryLock(); if(secondLock == null) return true; } finally { if(firstLock != null && firstLock != sd.lock) { firstLock.release(); firstLock.channel().close(); } if(secondLock != null) { secondLock.release(); secondLock.channel().close(); } } return false; }
java
{ "resource": "" }
q161404
SessionNotifier.write
train
public void write(JsonGenerator jsonGenerator) throws IOException { jsonGenerator.writeStartObject(); int totalSessionsToCtx = 0, totalDeletedSessions = 0; for (int i = 0; i < numNotifierThreads; i++) { totalSessionsToCtx += notifierThreads[i].sessionsToCtx.size(); totalDeletedSessions += notifierThreads[i].deletedSessions.size(); } jsonGenerator.writeNumberField("totalSessionsToCtx", totalSessionsToCtx); jsonGenerator.writeFieldName("sessionsToCtx"); jsonGenerator.writeStartObject(); for (int i = 0; i < numNotifierThreads; i++) { for (ConcurrentMap.Entry<String, SessionNotificationCtx> entry : notifierThreads[i].sessionsToCtx.entrySet()) { jsonGenerator.writeFieldName(entry.getKey()); entry.getValue().write(jsonGenerator); } } jsonGenerator.writeEndObject(); jsonGenerator.writeNumberField("totalDeletedSessions", totalDeletedSessions); jsonGenerator.writeFieldName("deletedSessions"); jsonGenerator.writeStartArray(); for (int i = 0; i < numNotifierThreads; i++) { for (String deletedSessionHandle : notifierThreads[i].deletedSessions.keySet()) { jsonGenerator.writeString(deletedSessionHandle); } } jsonGenerator.writeEndArray(); jsonGenerator.writeEndObject(); }
java
{ "resource": "" }
q161405
SessionNotifier.restoreAfterSafeModeRestart
train
public void restoreAfterSafeModeRestart() { // Put the sessionsToCtxFromDisk entries into their respective // SessionNotifierThreads instances for (Map.Entry<String, SessionNotificationCtx> entry : sessionsToCtxFromDisk.entrySet()) { // The conf and the conf related properties are missing in the // sessionsToCtx objects entry.getValue().setConf(conf); handleToNotifier(entry.getKey()).sessionsToCtx.put(entry.getKey(), entry.getValue()); sessionsToCtxFromDisk.remove(entry); } // Put the deletedSessions into the the respective SessionNotifierThreads for (String deletedSessionHandle : deletedSessionsFromDisk) { SessionNotifierThread notifierThread = handleToNotifier(deletedSessionHandle); if (notifierThread.sessionsToCtx.get(deletedSessionHandle) != null) { notifierThread.deletedSessions.put(deletedSessionHandle, notifierThread); } deletedSessionsFromDisk.remove(deletedSessionHandle); } // We can now start the notifier threads for (int i = 0; i < numNotifierThreads; i++) { notifierThreads[i].start(); } }
java
{ "resource": "" }
q161406
PermissionChecker.checkSuperuserPrivilege
train
public static void checkSuperuserPrivilege(UserGroupInformation owner, String supergroup) throws AccessControlException { PermissionChecker checker = new PermissionChecker(owner.getUserName(), supergroup); if (!checker.isSuper) { throw new AccessControlException("Access denied for user " + checker.user + ". Superuser privilege is required"); } }
java
{ "resource": "" }
q161407
LoadManager.getMaxSlots
train
public int getMaxSlots(TaskTrackerStatus status, TaskType type) { return (type == TaskType.MAP) ? status.getMaxMapSlots() : status.getMaxReduceSlots(); }
java
{ "resource": "" }
q161408
MapFile.rename
train
public static void rename(FileSystem fs, String oldName, String newName) throws IOException { Path oldDir = new Path(oldName); Path newDir = new Path(newName); if (!fs.rename(oldDir, newDir)) { throw new IOException("Could not rename " + oldDir + " to " + newDir); } }
java
{ "resource": "" }
q161409
MapFile.delete
train
public static void delete(FileSystem fs, String name) throws IOException { Path dir = new Path(name); Path data = new Path(dir, DATA_FILE_NAME); Path index = new Path(dir, INDEX_FILE_NAME); fs.delete(data, true); fs.delete(index, true); fs.delete(dir, true); }
java
{ "resource": "" }
q161410
MapFile.fix
train
public static long fix(FileSystem fs, Path dir, Class<? extends Writable> keyClass, Class<? extends Writable> valueClass, boolean dryrun, Configuration conf) throws Exception { String dr = (dryrun ? "[DRY RUN ] " : ""); Path data = new Path(dir, DATA_FILE_NAME); Path index = new Path(dir, INDEX_FILE_NAME); int indexInterval = 128; if (!fs.exists(data)) { // there's nothing we can do to fix this! throw new Exception(dr + "Missing data file in " + dir + ", impossible to fix this."); } if (fs.exists(index)) { // no fixing needed return -1; } SequenceFile.Reader dataReader = new SequenceFile.Reader(fs, data, conf); if (!dataReader.getKeyClass().equals(keyClass)) { throw new Exception(dr + "Wrong key class in " + dir + ", expected" + keyClass.getName() + ", got " + dataReader.getKeyClass().getName()); } if (!dataReader.getValueClass().equals(valueClass)) { throw new Exception(dr + "Wrong value class in " + dir + ", expected" + valueClass.getName() + ", got " + dataReader.getValueClass().getName()); } long cnt = 0L; Writable key = ReflectionUtils.newInstance(keyClass, conf); Writable value = ReflectionUtils.newInstance(valueClass, conf); SequenceFile.Writer indexWriter = null; if (!dryrun) indexWriter = SequenceFile.createWriter(fs, conf, index, keyClass, LongWritable.class); try { long pos = 0L; LongWritable position = new LongWritable(); while(dataReader.next(key, value)) { cnt++; if (cnt % indexInterval == 0) { position.set(pos); if (!dryrun) indexWriter.append(key, position); } pos = dataReader.getPosition(); } } catch(Throwable t) { // truncated data file. swallow it. } dataReader.close(); if (!dryrun) indexWriter.close(); return cnt; }
java
{ "resource": "" }
q161411
ConfigManager.findConfigFiles
train
private void findConfigFiles() { // Find the materialized_JSON configuration file. if (configFileName == null) { String jsonConfigFileString = conf.getConfigFile().replace( CoronaConf.DEFAULT_CONFIG_FILE, Configuration.MATERIALIZEDJSON); File jsonConfigFile = new File(jsonConfigFileString); String jsonConfigFileName = null; if (jsonConfigFile.exists()) { jsonConfigFileName = jsonConfigFileString; } else { URL u = classLoader.getResource(jsonConfigFileString); jsonConfigFileName = (u != null) ? u.getPath() : null; } // Check that the materialized_JSON contains the resources // of corona.xml if (jsonConfigFileName != null) { try { jsonConfigFile = new File(jsonConfigFileName); InputStream in = new BufferedInputStream(new FileInputStream( jsonConfigFile)); JSONObject json = conf.instantiateJsonObject(in); if (json.has(conf.xmlToThrift(CoronaConf.DEFAULT_CONFIG_FILE))) { configFileName = jsonConfigFileName; LOG.info("Attempt to find config file " + jsonConfigFileString + " as a file and in class loader returned " + configFileName); } } catch (IOException e) { LOG.warn("IOException: " + "while parsing corona JSON configuration"); } catch (JSONException e) { LOG.warn("JSONException: " + "while parsing corona JSON configuration"); } } } if (configFileName == null) { // Parsing the JSON configuration failed. Look for // the xml configuration. String configFileString = conf.getConfigFile(); File configFile = new File(configFileString); if (configFile.exists()) { configFileName = configFileString; } else { URL u = classLoader.getResource(configFileString); configFileName = (u != null) ? u.getPath() : null; } LOG.info("Attempt to find config file " + configFileString + " as a file and in class loader returned " + configFileName); } if (poolsConfigFileName == null) { String poolsConfigFileString = conf.getPoolsConfigFile(); File poolsConfigFile = new File(poolsConfigFileString); if (poolsConfigFile.exists()) { poolsConfigFileName = poolsConfigFileString; } else { URL u = classLoader.getResource(poolsConfigFileString); poolsConfigFileName = (u != null) ? u.getPath() : null; } LOG.info("Attempt to find pools config file " + poolsConfigFileString + " as a file and in class loader returned " + poolsConfigFileName); } }
java
{ "resource": "" }
q161412
ConfigManager.getWeight
train
public synchronized double getWeight(PoolInfo poolInfo) { Double weight = (poolInfoToWeight == null) ? null : poolInfoToWeight.get(poolInfo); return weight == null ? 1.0 : weight; }
java
{ "resource": "" }
q161413
ConfigManager.getPriority
train
public synchronized int getPriority(PoolInfo poolInfo) { Integer priority = (poolInfoToPriority == null) ? null : poolInfoToPriority.get(poolInfo); return priority == null ? 0 : priority; }
java
{ "resource": "" }
q161414
ConfigManager.getPoolComparator
train
public synchronized ScheduleComparator getPoolComparator(PoolInfo poolInfo) { ScheduleComparator comparator = (poolInfoToComparator == null) ? null : poolInfoToComparator.get(poolInfo); return comparator == null ? defaultPoolComparator : comparator; }
java
{ "resource": "" }
q161415
ConfigManager.getLocalityWait
train
public synchronized long getLocalityWait(ResourceType type, LocalityLevel level) { if (level == LocalityLevel.ANY) { return 0L; } Long wait = level == LocalityLevel.NODE ? typeToNodeWait.get(type) : typeToRackWait.get(type); if (wait == null) { throw new IllegalArgumentException("Unknown type:" + type); } return wait; }
java
{ "resource": "" }
q161416
ConfigManager.generatePoolsConfigIfClassSet
train
public String generatePoolsConfigIfClassSet() { if (poolsConfigDocumentGenerator == null) { return null; } Document document = poolsConfigDocumentGenerator.generatePoolsDocument(); if (document == null) { LOG.warn("generatePoolsConfig: Did not generate a valid pools xml file"); return null; } // Write the content into a temporary xml file and rename to the // expected file. File tempXmlFile; try { TransformerFactory transformerFactory = TransformerFactory.newInstance(); transformerFactory.setAttribute("indent-number", new Integer(2)); Transformer transformer = transformerFactory.newTransformer(); transformer.setOutputProperty( "{http://xml.apache.org/xslt}indent-amount", "2"); transformer.setOutputProperty(OutputKeys.INDENT, "yes"); DOMSource source = new DOMSource(document); tempXmlFile = File.createTempFile("tmpPoolsConfig", "xml"); if (LOG.isDebugEnabled()) { StreamResult stdoutResult = new StreamResult(System.out); transformer.transform(source, stdoutResult); } StreamResult result = new StreamResult(tempXmlFile); transformer.transform(source, result); String md5 = org.apache.commons.codec.digest.DigestUtils.md5Hex( new FileInputStream(tempXmlFile)); File destXmlFile = new File(conf.getPoolsConfigFile()); boolean success = tempXmlFile.renameTo(destXmlFile); LOG.info("generatePoolConfig: Renamed generated file " + tempXmlFile.getAbsolutePath() + " to " + destXmlFile.getAbsolutePath() + " returned " + success + " with md5sum " + md5); return md5; } catch (TransformerConfigurationException e) { LOG.warn("generatePoolConfig: Failed to write file", e); } catch (IOException e) { LOG.warn("generatePoolConfig: Failed to write file", e); } catch (TransformerException e) { LOG.warn("generatePoolConfig: Failed to write file", e); } return null; }
java
{ "resource": "" }
q161417
ConfigManager.reloadAllConfig
train
public synchronized boolean reloadAllConfig(boolean init) throws IOException, SAXException, ParserConfigurationException, JSONException { if (!isConfigChanged(init)) { return false; } reloadConfig(); reloadPoolsConfig(); this.lastSuccessfulReload = ClusterManager.clock.getTime(); return true; }
java
{ "resource": "" }
q161418
ConfigManager.isConfigChanged
train
private boolean isConfigChanged(boolean init) throws IOException { if (init && (configFileName == null || (poolsConfigFileName == null && conf.onlyAllowConfiguredPools()))) { throw new IOException("ClusterManager needs a config and a " + "pools file to start"); } if (configFileName == null && poolsConfigFileName == null) { return false; } boolean configChanged = false; if (configFileName != null) { File file = new File(configFileName); configChanged |= (file.lastModified() == 0 || file.lastModified() > lastSuccessfulReload); } if (poolsConfigFileName != null) { File file = new File(poolsConfigFileName); configChanged |= (file.lastModified() == 0 || file.lastModified() > lastSuccessfulReload); } return configChanged; }
java
{ "resource": "" }
q161419
ConfigManager.getRootElement
train
private Element getRootElement(String fileName) throws IOException, SAXException, ParserConfigurationException { DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance(); docBuilderFactory.setIgnoringComments(true); DocumentBuilder builder = docBuilderFactory.newDocumentBuilder(); Document doc = builder.parse(new File(fileName)); Element root = doc.getDocumentElement(); if (!matched(root, CONFIGURATION_TAG_NAME)) { throw new IOException("Bad " + fileName); } return root; }
java
{ "resource": "" }
q161420
ConfigManager.getText
train
private static String getText(Element element) { if (element.getFirstChild() == null) { return ""; } return ((Text) element.getFirstChild()).getData().trim(); }
java
{ "resource": "" }
q161421
LoadManager.incrementLoad
train
public synchronized void incrementLoad(ResourceType type) { Integer load = typeToTotalLoad.get(type); assert(load != null); typeToTotalLoad.put(type, load + 1); }
java
{ "resource": "" }
q161422
HadoopLogsAnalyzer.readBalancedLine
train
private Pair<String, String> readBalancedLine() throws IOException { String line = readCountedLine(); if (line == null) { return null; } while (line.indexOf('\f') > 0) { line = line.substring(line.indexOf('\f')); } if (line.length() != 0 && line.charAt(0) == '\f') { String subjectLine = readCountedLine(); if (subjectLine != null && subjectLine.length() != 0 && apparentConfFileHeader(line) && apparentXMLFileStart(subjectLine)) { StringBuilder sb = new StringBuilder(); while (subjectLine != null && subjectLine.indexOf('\f') > 0) { subjectLine = subjectLine.substring(subjectLine.indexOf('\f')); } while (subjectLine != null && (subjectLine.length() == 0 || subjectLine.charAt(0) != '\f')) { sb.append(subjectLine); subjectLine = readCountedLine(); } if (subjectLine != null) { unreadCountedLine(subjectLine); } return new Pair<String, String>(line, sb.toString()); } // here we had a file line, but it introduced a log segment, not // a conf file. We want to just ignore the file line. return readBalancedLine(); } String endlineString = (version == 0 ? " " : " ."); if (line.length() < endlineString.length()) { return new Pair<String, String>(null, line); } if (!endlineString.equals(line.substring(line.length() - endlineString.length()))) { StringBuilder sb = new StringBuilder(line); String addedLine; do { addedLine = readCountedLine(); if (addedLine == null) { return new Pair<String, String>(null, sb.toString()); } while (addedLine.indexOf('\f') > 0) { addedLine = addedLine.substring(addedLine.indexOf('\f')); } if (addedLine.length() > 0 && addedLine.charAt(0) == '\f') { unreadCountedLine(addedLine); return new Pair<String, String>(null, sb.toString()); } sb.append("\n"); sb.append(addedLine); } while (addedLine.length() >= endlineString.length() && !endlineString.equals(addedLine.substring(addedLine.length() - endlineString.length()))); line = sb.toString(); } return new Pair<String, String>(null, line); }
java
{ "resource": "" }
q161423
StripeStore.initializeConf
train
public Configuration initializeConf(String[] keys, Configuration conf, FileSystem fs) throws IOException { Configuration newConf = new Configuration(conf); if (fs == null) { fs = FileSystem.get(conf); } String suffix = fs.getUri().getAuthority(); for (String key: keys) { String value = conf.get(key + "." + suffix); if (value != null) { newConf.set(key, value); } } return newConf; }
java
{ "resource": "" }
q161424
OfflineEditsViewer.go
train
public void go(EditsVisitor visitor) throws IOException { setEditsLoader(EditsLoader.LoaderFactory.getLoader(visitor)); editsLoader.loadEdits(); }
java
{ "resource": "" }
q161425
OfflineEditsViewer.printHelp
train
private void printHelp() { String summary = "Usage: bin/hdfs oev [OPTIONS] -i INPUT_FILE -o OUTPUT_FILE\n" + "Offline edits viewer\n" + "Parse a Hadoop edits log file INPUT_FILE and save results\n" + "in OUTPUT_FILE.\n" + "Required command line arguments:\n" + "-i,--inputFile <arg> edits file to process, xml (case\n" + " insensitive) extension means XML format,\n" + " any other filename means binary format\n" + "-o,--outputFile <arg> Name of output file. If the specified\n" + " file exists, it will be overwritten,\n" + " format of the file is determined\n" + " by -p option\n" + "\n" + "Optional command line arguments:\n" + "-p,--processor <arg> Select which type of processor to apply\n" + " against image file, currently supported\n" + " processors are: binary (native binary format\n" + " that Hadoop uses), xml (default, XML\n" + " format), stats (prints statistics about\n" + " edits file)\n" + "-h,--help Display usage information and exit\n" + "-v,--verbose More verbose output, prints the input and\n" + " output filenames, for processors that write\n" + " to a file, also output to screen. On large\n" + " image files this will dramatically increase\n" + " processing time (default is false).\n"; System.out.println(summary); System.out.println(); ToolRunner.printGenericCommandUsage(System.out); }
java
{ "resource": "" }
q161426
ServletUtil.initHTML
train
public static PrintWriter initHTML(ServletResponse response, String title ) throws IOException { response.setContentType("text/html"); PrintWriter out = response.getWriter(); out.println("<html>\n" + "<link rel='stylesheet' type='text/css' href='/static/hadoop.css'>\n" + "<title>" + title + "</title>\n" + "<body>\n" + "<h1>" + title + "</h1>\n"); return out; }
java
{ "resource": "" }
q161427
ServletUtil.getParameter
train
public static String getParameter(ServletRequest request, String name) { String s = request.getParameter(name); if (s == null) { return null; } s = s.trim(); return s.length() == 0? null: s; }
java
{ "resource": "" }
q161428
ServletUtil.percentageGraph
train
public static String percentageGraph(int perc, int width) throws IOException { assert perc >= 0; assert perc <= 100; StringBuilder builder = new StringBuilder(); builder.append("<table border=\"1px\" width=\""); builder.append(width); builder.append("px\"><tr>"); if(perc > 0) { builder.append("<td cellspacing=\"0\" class=\"perc_filled\" width=\""); builder.append(perc); builder.append("%\"></td>"); }if(perc < 100) { builder.append("<td cellspacing=\"0\" class=\"perc_nonfilled\" width=\""); builder.append(100 - perc); builder.append("%\"></td>"); } builder.append("</tr></table>"); return builder.toString(); }
java
{ "resource": "" }
q161429
SessionNotificationCtx.write
train
public void write(JsonGenerator jsonGenerator) throws IOException { jsonGenerator.writeStartObject(); jsonGenerator.writeStringField("handle", handle); jsonGenerator.writeStringField("host", host); jsonGenerator.writeNumberField("port", port); jsonGenerator.writeNumberField("numPendingCalls", pendingCalls.size()); jsonGenerator.writeFieldName("pendingCalls"); jsonGenerator.writeStartArray(); for (TBase call : pendingCalls) { jsonGenerator.writeStartObject(); // TBase is an abstract class. While reading back, we want to know // what kind of object we actually wrote. Jackson does provide two methods // to do it automatically, but one of them adds types at a lot of places // where we don't need it, and hence our parsing would be required to be // changed. The other required adding an annotation to the TBase class, // which we can't do, since it is auto-generated by Thrift. String callType = call.getClass().getName(); jsonGenerator.writeStringField("callType", callType); jsonGenerator.writeObjectField("call", call); jsonGenerator.writeEndObject(); } jsonGenerator.writeEndArray(); jsonGenerator.writeEndObject(); }
java
{ "resource": "" }
q161430
SessionNotificationCtx.makeCalls
train
public boolean makeCalls(long now) { if (now < nextDispatchTime) return true; // we make calls in a loop until all pending calls are drained // if any call hits an error - we stop while (!pendingCalls.isEmpty()) { TBase call = pendingCalls.get(0); try { // initialize the client/transport unless already done init(); // make one thrift call dispatchCall(call); // if we made a call successfully, reset any retry state nextDispatchTime = -1; numRetries = 0; currentRetryInterval = retryIntervalStart; pendingCalls.remove(0); } catch (TException e) { boolean logged = false; if (e instanceof TTransportException) { TTransportException tte = (TTransportException) e; Throwable cause = tte.getCause(); if (cause != null && cause instanceof SocketTimeoutException) { // Got a socket timeout while waiting for a response from the // client. The session is stuck. logged = true; LOG.error("Call to session: " + handle + " for call: " + call.getClass().getName() + ", numRetry: " + numRetries + "(retryCountMax=" + retryCountMax + ")" + " failed with SocketTimeoutException, will retry it"); } } if (!logged) { LOG.warn("Call to session: " + handle + " for call: " + call.getClass().getName() + ", numRetry: " + numRetries + "(retryCountMax=" + retryCountMax + ")" + " failed with TException", e); } // close the transport/client on any exception // will be reopened on next try close(); if (numRetries > retryCountMax) return false; numRetries++; nextDispatchTime = now + currentRetryInterval; currentRetryInterval *= retryIntervalFactor; // no more calls for now return true; } } close(); return true; }
java
{ "resource": "" }
q161431
CompressionCodecFactory.getCodecClasses
train
public static List<Class<? extends CompressionCodec>> getCodecClasses(Configuration conf) { String codecsString = conf.get("io.compression.codecs"); if (codecsString != null) { List<Class<? extends CompressionCodec>> result = new ArrayList<Class<? extends CompressionCodec>>(); StringTokenizer codecSplit = new StringTokenizer(codecsString, ","); while (codecSplit.hasMoreElements()) { String codecSubstring = codecSplit.nextToken(); if (codecSubstring.length() != 0) { try { Class<?> cls = conf.getClassByName(codecSubstring); if (!CompressionCodec.class.isAssignableFrom(cls)) { throw new IllegalArgumentException("Class " + codecSubstring + " is not a CompressionCodec"); } result.add(cls.asSubclass(CompressionCodec.class)); } catch (ClassNotFoundException ex) { throw new IllegalArgumentException("Compression codec " + codecSubstring + " not found.", ex); } } } return result; } else { return null; } }
java
{ "resource": "" }
q161432
CompressionCodecFactory.setCodecClasses
train
public static void setCodecClasses(Configuration conf, List<Class> classes) { StringBuffer buf = new StringBuffer(); Iterator<Class> itr = classes.iterator(); if (itr.hasNext()) { Class cls = itr.next(); buf.append(cls.getName()); while(itr.hasNext()) { buf.append(','); buf.append(itr.next().getName()); } } conf.set("io.compression.codecs", buf.toString()); }
java
{ "resource": "" }
q161433
CompressionCodecFactory.getCodec
train
public CompressionCodec getCodec(Path file) { CompressionCodec result = null; if (codecs != null) { String filename = file.getName(); String reversedFilename = new StringBuffer(filename).reverse().toString(); SortedMap<String, CompressionCodec> subMap = codecs.headMap(reversedFilename); if (!subMap.isEmpty()) { String potentialSuffix = subMap.lastKey(); if (reversedFilename.startsWith(potentialSuffix)) { result = codecs.get(potentialSuffix); } } } return result; }
java
{ "resource": "" }
q161434
CompressionCodecFactory.getCodecByClassName
train
public CompressionCodec getCodecByClassName(String classname) { if (codecsByClassName == null) { return null; } return codecsByClassName.get(classname); }
java
{ "resource": "" }
q161435
CompressionCodecFactory.removeSuffix
train
public static String removeSuffix(String filename, String suffix) { if (filename.endsWith(suffix)) { return filename.substring(0, filename.length() - suffix.length()); } return filename; }
java
{ "resource": "" }
q161436
CompressionCodecFactory.main
train
public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); CompressionCodecFactory factory = new CompressionCodecFactory(conf); boolean encode = false; for(int i=0; i < args.length; ++i) { if ("-in".equals(args[i])) { encode = true; } else if ("-out".equals(args[i])) { encode = false; } else { CompressionCodec codec = factory.getCodec(new Path(args[i])); if (codec == null) { System.out.println("Codec for " + args[i] + " not found."); } else { if (encode) { CompressionOutputStream out = codec.createOutputStream(new java.io.FileOutputStream(args[i])); byte[] buffer = new byte[100]; String inFilename = removeSuffix(args[i], codec.getDefaultExtension()); java.io.InputStream in = new java.io.FileInputStream(inFilename); int len = in.read(buffer); while (len > 0) { out.write(buffer, 0, len); len = in.read(buffer); } in.close(); out.close(); } else { CompressionInputStream in = codec.createInputStream(new java.io.FileInputStream(args[i])); byte[] buffer = new byte[100]; int len = in.read(buffer); while (len > 0) { System.out.write(buffer, 0, len); len = in.read(buffer); } in.close(); } } } } }
java
{ "resource": "" }
q161437
DocumentAndOp.setInsert
train
public void setInsert(Document doc) { this.op = Op.INSERT; this.doc = doc; this.term = null; }
java
{ "resource": "" }
q161438
DocumentAndOp.setDelete
train
public void setDelete(Term term) { this.op = Op.DELETE; this.doc = null; this.term = term; }
java
{ "resource": "" }
q161439
DocumentAndOp.setUpdate
train
public void setUpdate(Document doc, Term term) { this.op = Op.UPDATE; this.doc = doc; this.term = term; }
java
{ "resource": "" }
q161440
RPC.getProtocolProxy
train
public static <T extends VersionedProtocol> ProtocolProxy<T> getProtocolProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf, SocketFactory factory) throws IOException { UserGroupInformation ugi = null; try { ugi = UserGroupInformation.login(conf); } catch (LoginException le) { throw new RuntimeException("Couldn't login!"); } return getProtocolProxy(protocol, clientVersion, addr, ugi, conf, factory); }
java
{ "resource": "" }
q161441
RPC.getProtocolProxy
train
@SuppressWarnings("unchecked") public static <T extends VersionedProtocol> ProtocolProxy<T> getProtocolProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int rpcTimeout) throws IOException { T proxy = (T) Proxy.newProxyInstance( protocol.getClassLoader(), new Class[] { protocol }, new Invoker(addr, ticket, conf, factory, rpcTimeout, protocol)); String protocolName = protocol.getName(); try { ProtocolSignature serverInfo = proxy .getProtocolSignature(protocolName, clientVersion, ProtocolSignature.getFingerprint(protocol.getMethods())); return new ProtocolProxy<T>(protocol, proxy, serverInfo.getMethods()); } catch (RemoteException re) { IOException ioe = re.unwrapRemoteException(IOException.class); if (ioe.getMessage().startsWith(IOException.class.getName() + ": " + NoSuchMethodException.class.getName())) { // Method getProtocolSignature not supported long serverVersion = proxy.getProtocolVersion(protocol.getName(), clientVersion); if (serverVersion == clientVersion) { return new ProtocolProxy<T>(protocol, proxy, null); } throw new VersionMismatch(protocolName, clientVersion, serverVersion, proxy); } throw re; } }
java
{ "resource": "" }
q161442
LocalFileSystem.reportChecksumFailure
train
public boolean reportChecksumFailure(Path p, FSDataInputStream in, long inPos, FSDataInputStream sums, long sumsPos) { try { // canonicalize f File f = ((RawLocalFileSystem)fs).pathToFile(p).getCanonicalFile(); // find highest writable parent dir of f on the same device String device = new DF(f, getConf()).getMount(); File parent = f.getParentFile(); File dir = null; while (parent!=null && parent.canWrite() && parent.toString().startsWith(device)) { dir = parent; parent = parent.getParentFile(); } if (dir==null) { throw new IOException( "not able to find the highest writable parent dir"); } // move the file there File badDir = new File(dir, "bad_files"); if (!badDir.mkdirs()) { if (!badDir.isDirectory()) { throw new IOException("Mkdirs failed to create " + badDir.toString()); } } String suffix = "." + rand.nextInt(); File badFile = new File(badDir, f.getName()+suffix); LOG.warn("Moving bad file " + f + " to " + badFile); in.close(); // close it first f.renameTo(badFile); // rename it // move checksum file too File checkFile = ((RawLocalFileSystem)fs).pathToFile(getChecksumFile(p)); checkFile.renameTo(new File(badDir, checkFile.getName()+suffix)); } catch (IOException e) { LOG.warn("Error moving bad file " + p + ": " + e); } return false; }
java
{ "resource": "" }
q161443
SpillRecord.getIndex
train
public IndexRecord getIndex(int partition) { final int pos = partition * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8; return new IndexRecord(entries.get(pos), entries.get(pos + 1), entries.get(pos + 2)); }
java
{ "resource": "" }
q161444
SpillRecord.putIndex
train
public void putIndex(IndexRecord rec, int partition) { final int pos = partition * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8; entries.put(pos, rec.startOffset); entries.put(pos + 1, rec.rawLength); entries.put(pos + 2, rec.partLength); }
java
{ "resource": "" }
q161445
SpillRecord.writeToFile
train
public void writeToFile(Path loc, JobConf job) throws IOException { writeToFile(loc, job, new PureJavaCrc32()); }
java
{ "resource": "" }
q161446
FairSchedulerServlet.printFilterInfo
train
static void printFilterInfo(PrintWriter out, String poolFilter, String userFilter, String showAllLink) { if (userFilter != null || poolFilter != null) { StringBuilder customizedInfo = new StringBuilder("Only showing "); if (poolFilter != null) { customizedInfo.append("pool(s) " + poolFilter); } if (userFilter != null) { if (customizedInfo.length() != 0) { customizedInfo.append(" and "); } customizedInfo.append("user(s) " + userFilter); } out.printf("<h3>%s <a href=\"%s\">(show all pools and users)</a></h3>", customizedInfo.toString(), showAllLink); } }
java
{ "resource": "" }
q161447
FairSchedulerServlet.showAdminFormPreemption
train
private void showAdminFormPreemption(PrintWriter out, boolean advancedView) { out.print("<h2>Task Preemption</h2>\n"); String advParam = advancedView ? "&advanced" : ""; out.print(generateSelect(Arrays.asList("On,Off".split(",")), scheduler.isPreemptionEnabled() ? "On" : "Off", "/fairscheduler?setPreemptionEnabled=<CHOICE>" + advParam)); }
java
{ "resource": "" }
q161448
FairSchedulerServlet.showAdminFormMemBasedLoadMgr
train
private void showAdminFormMemBasedLoadMgr(PrintWriter out, boolean advancedView) { if (!(loadMgr instanceof MemBasedLoadManager)) { return; } out.print("<h2>Memory Based Scheduling</h2>\n"); MemBasedLoadManager memLoadMgr = (MemBasedLoadManager)loadMgr; Collection<String> possibleThresholds = Arrays.asList(("0,1,2,3,4,5,6,7,8,9,10,1000").split(",")); long reservedMemGB = (long)(memLoadMgr.getReservedPhysicalMemoryOnTT() / 1024D + 0.5); out.printf("<p>Reserve %s GB memory on one node.", generateSelect(possibleThresholds, "" + reservedMemGB, "/fairscheduler?setTtThreshold=<CHOICE>" + (advancedView ? "&advanced" : ""))); }
java
{ "resource": "" }
q161449
FairSchedulerServlet.showCluster
train
static void showCluster( PrintWriter out, boolean advancedView, JobTracker jobTracker) { String cluster = ""; try { cluster = JSPUtil.generateClusterResTable(jobTracker); if (cluster.equals("")) { return; } } catch (IOException e) { return; } out.print("<h2>Cluster Resource</h2>\n"); out.print(cluster); }
java
{ "resource": "" }
q161450
FairSchedulerServlet.showNumTaskPerHeartBeatOption
train
private void showNumTaskPerHeartBeatOption( PrintWriter out, boolean advancedView) { out.print("<h2>Number of Assigned Tasks Per HeartBeat</h2>\n"); out.printf("<p>Number of map tasks assigned per heartbeat:%s", generateSelect(Arrays.asList("1,2,3,4,5,6,7,8,9,10".split(",")), scheduler.getMapPerHeartBeat() + "", "/fairscheduler?setMapPerHeartBeat=<CHOICE>" + (advancedView ? "&advanced" : ""))); out.printf("<p>Number of reduce tasks assigned per heartbeat:%s", generateSelect(Arrays.asList("1,2,3,4,5,6,7,8,9,10".split(",")), scheduler.getReducePerHeartBeat() + "", "/fairscheduler?setReducePerHeartBeat=<CHOICE>" + (advancedView ? "&advanced" : ""))); }
java
{ "resource": "" }
q161451
FairSchedulerServlet.getInitedJobs
train
private Collection<JobInProgress> getInitedJobs() { Collection<JobInProgress> runningJobs = jobTracker.getRunningJobs(); for (Iterator<JobInProgress> it = runningJobs.iterator(); it.hasNext();) { JobInProgress job = it.next(); if (!job.inited()) { it.remove(); } } return runningJobs; }
java
{ "resource": "" }
q161452
Submitter.setIfUnset
train
private static void setIfUnset(JobConf conf, String key, String value) { if (conf.get(key) == null) { conf.set(key, value); } }
java
{ "resource": "" }
q161453
Submitter.main
train
public static void main(String[] args) throws Exception { int exitCode = new Submitter().run(args); System.exit(exitCode); }
java
{ "resource": "" }
q161454
FSNamesystem.loadEnabledPermissionCheckingDirs
train
private void loadEnabledPermissionCheckingDirs(Configuration conf) throws IOException { if (this.isPermissionEnabled) { String[] permissionCheckingDirs = conf.getStrings("dfs.permissions.checking.paths", "/"); int numDirs = permissionCheckingDirs.length; if (numDirs == 0) { return; } this.permissionEnabled = new INode[numDirs]; int i = 0; for (String src : permissionCheckingDirs) { INode permissionEnabledNode = this.dir.getINode(src); if (permissionEnabledNode == null) { throw new IOException( "Non-existent path for disabling permission Checking: " + src); } permissionEnabled[i++] = permissionEnabledNode; } } }
java
{ "resource": "" }
q161455
FSNamesystem.isPermissionCheckingEnabled
train
private boolean isPermissionCheckingEnabled(INode[] pathNodes) { if (this.isPermissionEnabled) { if (permissionEnabled == null) { return false; } for (INode enableDir : this.permissionEnabled) { for (INode pathNode : pathNodes) { if (pathNode == enableDir) { return true; } } } return false; } return false; }
java
{ "resource": "" }
q161456
FSNamesystem.setHeartbeatInterval
train
private void setHeartbeatInterval(long heartbeatInterval, long heartbeatRecheckInterval) { this.heartbeatInterval = heartbeatInterval; this.heartbeatRecheckInterval = heartbeatRecheckInterval; this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval + 10 * heartbeatInterval; ReplicationConfigKeys.blockInvalidateLimit = Math.max(ReplicationConfigKeys.blockInvalidateLimit, 20 * (int) (heartbeatInterval/1000L)); }
java
{ "resource": "" }
q161457
FSNamesystem.stopLeaseMonitor
train
public void stopLeaseMonitor() throws InterruptedException { if (lmmonitor != null) { lmmonitor.stop(); InjectionHandler .processEvent(InjectionEvent.FSNAMESYSTEM_STOP_LEASEMANAGER); } if (lmthread != null) { // interrupt the lease monitor thread. We need to make sure the // interruption does not happen during // LeaseManager.checkLeases(), which would possibly interfere with writing // to edit log, and hence with clean shutdown. It essential during // failover, as it could exclude edit log streams!!! writeLock(); try { lmthread.interrupt(); } finally { writeUnlock(); } lmthread.join(); } }
java
{ "resource": "" }
q161458
FSNamesystem.close
train
public void close() { fsRunning = false; try { if (pendingReplications != null) { pendingReplications.stop(); } if (hbthread != null) { hbthread.interrupt(); } if (underreplthread != null) { underreplthread.interrupt(); } if (overreplthread != null) { overreplthread.interrupt(); } if (raidEncodingTaskThread != null) { raidEncodingTaskThread.interrupt(); } if (dnthread != null) { dnthread.interrupt(); } if (automaticEditsRollingThread != null) { automaticEditsRoller.stop(); // We cannot interrupt roller thread. For manual failover, we want // the edits file operations to finish. automaticEditsRollingThread.join(); } if (safeMode != null) { safeMode.shutdown(); } } catch (Exception e) { LOG.warn("Exception shutting down FSNamesystem", e); } finally { // using finally to ensure we also wait for lease daemon try { LOG.info("Stopping LeaseManager"); stopLeaseMonitor(); if (InjectionHandler .trueCondition(InjectionEvent.FSNAMESYSTEM_CLOSE_DIRECTORY)) { if (dir != null) { LOG.info("Stopping directory (fsimage, fsedits)"); dir.close(); } } } catch (InterruptedException ie) { } catch (IOException ie) { LOG.error("Error closing FSDirectory", ie); IOUtils.cleanup(LOG, dir); } } }
java
{ "resource": "" }
q161459
FSNamesystem.metaSave
train
void metaSave(String filename) throws IOException { readLock(); try { checkSuperuserPrivilege(); File file = new File(System.getProperty("hadoop.log.dir"), filename); PrintWriter out = new PrintWriter(new BufferedWriter( new FileWriter(file, true))); // // Dump contents of neededReplication // synchronized (neededReplications) { out.println("Metasave: Blocks waiting for replication: " + neededReplications.size()); for (Block block : neededReplications) { List<DatanodeDescriptor> containingNodes = new ArrayList<DatanodeDescriptor>(); NumberReplicas numReplicas = new NumberReplicas(); // source node returned is not used chooseSourceDatanode(block, containingNodes, numReplicas); int usableReplicas = numReplicas.liveReplicas() + numReplicas.decommissionedReplicas(); // l: == live:, d: == decommissioned c: == corrupt e: == excess out.print(block + ((usableReplicas > 0) ? "" : " MISSING") + " size: " + block.getNumBytes() + " (replicas:" + " l: " + numReplicas.liveReplicas() + " d: " + numReplicas.decommissionedReplicas() + " c: " + numReplicas.corruptReplicas() + " e: " + numReplicas.excessReplicas() + ") "); Collection<DatanodeDescriptor> corruptNodes = corruptReplicas.getNodes(block); for (Iterator<DatanodeDescriptor> jt = blocksMap.nodeIterator(block); jt.hasNext();) { DatanodeDescriptor node = jt.next(); String state = ""; if (corruptNodes != null && corruptNodes.contains(node)) { state = "(corrupt)"; } else if (node.isDecommissioned() || node.isDecommissionInProgress()) { state = "(decommissioned)"; } out.print(" " + node + state + " : "); } out.println(""); } } // // Dump blocks from pendingReplication // pendingReplications.metaSave(out); // // Dump blocks that are waiting to be deleted // dumpRecentInvalidateSets(out); // // Dump blocks that are excess and waiting to be deleted // dumpExcessReplicasSets(out); // // Dump all datanodes // datanodeDump(out); out.flush(); out.close(); } finally { readUnlock(); } }
java
{ "resource": "" }
q161460
FSNamesystem.addBlock
train
private long addBlock(Block block, List<BlockWithLocations> results) { ArrayList<String> machineSet = new ArrayList<String>(blocksMap.numNodes(block)); for (Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block); it.hasNext();) { String storageID = it.next().getStorageID(); // filter invalidate replicas LightWeightHashSet<Block> blocks = recentInvalidateSets.get(storageID); if (blocks == null || !blocks.contains(block)) { machineSet.add(storageID); } } if (machineSet.size() == 0) { return 0; } else { results.add(new BlockWithLocations(block, machineSet.toArray(new String[machineSet.size()]))); return block.getNumBytes(); } }
java
{ "resource": "" }
q161461
FSNamesystem.setPermission
train
public void setPermission(String src, FsPermission permission ) throws IOException { INode[] inodes = null; writeLock(); try { if (isInSafeMode()) { throw new SafeModeException("Cannot set permission for " + src, safeMode); } inodes = dir.getExistingPathINodes(src); if (isPermissionCheckingEnabled(inodes)) { checkOwner(src, inodes); } dir.setPermission(src, permission); } finally { writeUnlock(); } getEditLog().logSync(false); if (auditLog.isInfoEnabled()) { logAuditEvent(getCurrentUGI(), Server.getRemoteIp(), "setPermission", src, null, getLastINode(inodes)); } }
java
{ "resource": "" }
q161462
FSNamesystem.setOwner
train
public void setOwner(String src, String username, String group ) throws IOException { INode[] inodes = null; writeLock(); try { if (isInSafeMode()) { throw new SafeModeException("Cannot set permission for " + src, safeMode); } inodes = dir.getExistingPathINodes(src); if (isPermissionCheckingEnabled(inodes)) { FSPermissionChecker pc = checkOwner(src, inodes); if (!pc.isSuper) { if (username != null && !pc.user.equals(username)) { if (this.permissionAuditOnly) { // do not throw the exception, we would like to only log. LOG.warn("PermissionAudit failed on " + src + ": non-super user cannot change owner."); } else { throw new AccessControlException("Non-super user cannot change owner."); } } if (group != null && !pc.containsGroup(group)) { if (this.permissionAuditOnly) { // do not throw the exception, we would like to only log. LOG.warn("PermissionAudit failed on " + src + ": user does not belong to " + group + " ."); } else { throw new AccessControlException("User does not belong to " + group + " ."); } } } } dir.setOwner(src, username, group); } finally { writeUnlock(); } getEditLog().logSync(false); if (auditLog.isInfoEnabled()) { logAuditEvent(getCurrentUGI(), Server.getRemoteIp(), "setOwner", src, null, getLastINode(inodes)); } }
java
{ "resource": "" }
q161463
FSNamesystem.updateDatanodeInfo
train
LocatedBlocksWithMetaInfo updateDatanodeInfo(LocatedBlocks locatedBlocks) throws IOException { if (locatedBlocks.getLocatedBlocks().size() == 0) return new LocatedBlocksWithMetaInfo(locatedBlocks.getFileLength(), locatedBlocks.getLocatedBlocks(), false, DataTransferProtocol.DATA_TRANSFER_VERSION, getNamespaceId(), this.nameNode.getClientProtocolMethodsFingerprint()); List<LocatedBlock> newBlocks = new ArrayList<LocatedBlock>(); readLock(); try { for (LocatedBlock locBlock: locatedBlocks.getLocatedBlocks()) { Block block = locBlock.getBlock(); int numNodes = blocksMap.numNodes(block); int numCorruptNodes = countNodes(block).corruptReplicas(); int numCorruptReplicas = corruptReplicas.numCorruptReplicas(block); if (numCorruptNodes != numCorruptReplicas) { LOG.warn("Inconsistent number of corrupt replicas for " + block + "blockMap has " + numCorruptNodes + " but corrupt replicas map has " + numCorruptReplicas); } boolean blockCorrupt = numCorruptNodes == numNodes; int numMachineSet = blockCorrupt ? numNodes : (numNodes - numCorruptNodes); DatanodeDescriptor[] machineSet = new DatanodeDescriptor[numMachineSet]; if (numMachineSet > 0) { numNodes = 0; for(Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block); it.hasNext();) { DatanodeDescriptor dn = it.next(); boolean replicaCorrupt = corruptReplicas.isReplicaCorrupt(block, dn); if (blockCorrupt || (!blockCorrupt && !replicaCorrupt)) machineSet[numNodes++] = dn; } } // We need to make a copy of the block object before releasing the lock // to prevent the state of block is changed after that and before the // object is serialized to clients, to avoid potential inconsistency. // Further optimization is possible to avoid some object copy. Since it // is so far not a critical path. We leave a safe approach here. // Block blockCopy = null; if (block != null) { blockCopy = new Block(block); } LocatedBlock newBlock = new LocatedBlock(blockCopy, machineSet, 0, blockCorrupt); newBlocks.add(newBlock); } } finally { readUnlock(); } return new LocatedBlocksWithMetaInfo(locatedBlocks.getFileLength(), newBlocks, false, DataTransferProtocol.DATA_TRANSFER_VERSION, getNamespaceId(), this.nameNode.getClientProtocolMethodsFingerprint()); }
java
{ "resource": "" }
q161464
FSNamesystem.setTimes
train
public void setTimes(String src, long mtime, long atime) throws IOException { if ( !accessTimeTouchable && atime != -1) { throw new AccessTimeException("setTimes is not allowed for accessTime"); } setTimesInternal(src, mtime, atime); getEditLog().logSync(false); }
java
{ "resource": "" }
q161465
FSNamesystem.verifyReplication
train
private void verifyReplication(String src, short replication, String clientName ) throws IOException { String text = "file " + src + ((clientName != null) ? " on client " + clientName : "") + ".\n" + "Requested replication " + replication; if (replication > maxReplication) { throw new IOException(text + " exceeds maximum " + maxReplication); } if (replication < minReplication) { throw new IOException( text + " is less than the required minimum " + minReplication); } }
java
{ "resource": "" }
q161466
FSNamesystem.startFile
train
void startFile(String src, PermissionStatus permissions, String holder, String clientMachine, boolean overwrite, boolean createParent, short replication, long blockSize ) throws IOException { INodeFileUnderConstruction file = startFileInternal(src, permissions, holder, clientMachine, overwrite, false, createParent, replication, blockSize); getEditLog().logSync(false); if (auditLog.isInfoEnabled()) { logAuditEvent(getCurrentUGI(), Server.getRemoteIp(), "create", src, null, file); } }
java
{ "resource": "" }
q161467
FSNamesystem.recoverLease
train
boolean recoverLease(String src, String holder, String clientMachine, boolean discardLastBlock) throws IOException { // convert names to array of bytes w/o holding lock byte[][] components = INodeDirectory.getPathComponents(src); writeLock(); try { if (isInSafeMode()) { throw new SafeModeException( "Cannot recover the lease of " + src, safeMode); } INode inode = dir.getFileINode(components); if (inode == null) { throw new FileNotFoundException("File not found " + src); } if (!inode.isUnderConstruction()) { return true; } if (isPermissionEnabled) { INode[] inodes = dir.getExistingPathINodes(src); if (isPermissionCheckingEnabled(inodes)) { checkPathAccess(src, inodes, FsAction.WRITE); } } return recoverLeaseInternal(inode, src, holder, clientMachine, true, discardLastBlock); } finally { writeUnlock(); } }
java
{ "resource": "" }
q161468
FSNamesystem.getAdditionalBlock
train
public LocatedBlock getAdditionalBlock(String src, String clientName ) throws IOException { return getAdditionalBlock(src, clientName, null); }
java
{ "resource": "" }
q161469
FSNamesystem.findBestDatanodeInCluster
train
private DatanodeDescriptor[] findBestDatanodeInCluster( List<DatanodeInfo> infos, int replication) throws IOException { int targetReplication = Math.min(infos.size(), replication); DatanodeDescriptor[] dns = new DatanodeDescriptor[targetReplication]; boolean[] changedRacks = new boolean[targetReplication]; boolean isOnSameRack = (clusterMap.getNumOfRacks() > 1 && targetReplication > 1); for (int i=0; i<targetReplication; i++) { DatanodeInfo info = infos.get(i); DatanodeDescriptor node = getDatanode(info); if (node == null) { node = host2DataNodeMap.getDataNodeByIpPort(toHostPort(info)); } if (node == null && InjectionHandler.trueCondition( InjectionEvent.FSNAMESYSTEM_STOP_LEASEMANAGER, info.getHost())) { node = host2DataNodeMap.getDatanodeByHost(info.getHost()); } if (node == null) { if (info.getNetworkLocation() == null || info.getNetworkLocation().equals(NetworkTopology.DEFAULT_RACK)) { resolveNetworkLocation(info); } // If the current cluster doesn't contain the node, fallback to // something machine local and then rack local. List<Node> rackNodes = clusterMap.getDatanodesInRack(info .getNetworkLocation()); if (rackNodes != null) { // Try something machine local. for (Node rackNode : rackNodes) { if (((DatanodeDescriptor) rackNode).getHost().equals(info.getHost())) { node = (DatanodeDescriptor) rackNode; break; } } // Try something rack local. if (node == null && !rackNodes.isEmpty()) { node = (DatanodeDescriptor) (rackNodes .get(r.nextInt(rackNodes.size()))); } } // If we can't even choose rack local, just choose any node in the // cluster. if (node == null) { node = (DatanodeDescriptor) clusterMap.chooseRandom(NodeBase.ROOT); LOG.info("ChooseTarget for favored nodes: " + toString(infos) + ". Node " + info + " changed its rack location to " + node); changedRacks[i] = true; } else { changedRacks[i] = false; } } if (node == null) { throw new IOException("Could not find any node in the cluster for : " + info); } dns[i] = node; if (i!=0 && isOnSameRack && !clusterMap.isOnSameRack(dns[i], dns[i-1])) { isOnSameRack = false; } } // Make sure that the returning nodes are not on the same rack if (isOnSameRack) { for (int i=0; i<targetReplication; i++) { if (changedRacks[i]) { dns[i] = (DatanodeDescriptor) clusterMap.chooseRandom(NodeBase.ROOT, dns[i].getNetworkLocation()); } } } return dns; }
java
{ "resource": "" }
q161470
FSNamesystem.setLastBlockSize
train
private void setLastBlockSize(INodeFileUnderConstruction pendingFile) { Block block = pendingFile.getLastBlock(); if (block != null) { block.setNumBytes(pendingFile.getPreferredBlockSize()); } }
java
{ "resource": "" }
q161471
FSNamesystem.replicateLastBlock
train
private void replicateLastBlock(String src, INodeFileUnderConstruction file) { BlockInfo[] blks = file.getBlocks(); if (blks == null || blks.length == 0) return; BlockInfo block = blks[blks.length-1]; DatanodeDescriptor[] targets = file.getValidTargets(); final int numOfTargets = targets == null ? 0 : targets.length; NumberReplicas status = countNodes(block); int totalReplicas = status.getTotal(); if (numOfTargets > totalReplicas) { pendingReplications.add(block, numOfTargets-totalReplicas); } int expectedReplicas = file.getReplication(); if (numOfTargets < expectedReplicas || status.decommissionedReplicas != 0 || status.corruptReplicas != 0) { LOG.info("Add " + block + " of " + src + " to needReplication queue: " + " numOfTargets = " + numOfTargets + " decomissionedReplicas = " + status.decommissionedReplicas + " corruptReplicas = " + status.corruptReplicas); neededReplications.add(block, status.liveReplicas, status.decommissionedReplicas, expectedReplicas); } // update metrics if (numOfTargets < expectedReplicas) { if (numOfTargets == 1) { myFSMetrics.numNewBlocksWithOneReplica.inc(); } } else { myFSMetrics.numNewBlocksWithoutFailure.inc(); } myFSMetrics.numNewBlocks.inc(); }
java
{ "resource": "" }
q161472
FSNamesystem.abandonBlock
train
public boolean abandonBlock(Block b, String src, String holder ) throws IOException { writeLock(); try { // // Remove the block from the pending creates list // if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: " + b + "of file " + src); } if (isInSafeMode()) { throw new SafeModeException("Cannot abandon block " + b + " for fle" + src, safeMode); } INodeFileUnderConstruction file = checkLease(src, holder); dir.removeBlock(src, file, b); if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: " + b + " is removed from pendingCreates"); } return true; } finally { writeUnlock(); } }
java
{ "resource": "" }
q161473
FSNamesystem.checkLease
train
private INodeFileUnderConstruction checkLease(String src, String holder) throws IOException { INodeFile file = dir.getFileINode(src); return checkLease(src, holder, file); }
java
{ "resource": "" }
q161474
FSNamesystem.allocateBlock
train
private Block allocateBlock(String src, INode[] inodes) throws IOException { Block b = new Block(generateBlockId(), 0, 0); while (isValidBlock(b)) { b.setBlockId(generateBlockId()); } b.setGenerationStamp(getGenerationStamp()); b = dir.addBlock(src, inodes, b); return b; }
java
{ "resource": "" }
q161475
FSNamesystem.allocateParityBlocks
train
private Block[] allocateParityBlocks(int numParityBlocks) throws IOException { Block[] blocks = new Block[numParityBlocks]; for (int i = 0; i < numParityBlocks; i++) { Block b = new Block(generateBlockId(), 0, 0); while (isValidBlock(b)) { b.setBlockId(generateBlockId()); } b.setGenerationStamp(getGenerationStamp()); blocks[i] = b; } return blocks; }
java
{ "resource": "" }
q161476
FSNamesystem.checkFileProgress
train
boolean checkFileProgress(INodeFile v, boolean checkall) throws IOException { INode.enforceRegularStorageINode(v, "checkFileProgress is not supported for non-regular files"); if (checkall) { // // check all blocks of the file. // int closeFileReplicationMin = Math.min(v.getReplication(), this.minCloseReplication); for (Block block : v.getBlocks()) { if (!checkBlockProgress(v, block, closeFileReplicationMin)) { return false; } } return true; } else { // // check the penultimate block of this file // Block b = v.getStorage().getPenultimateBlock(); return checkBlockProgress(v, b, minReplication); } }
java
{ "resource": "" }
q161477
FSNamesystem.removeFromInvalidates
train
void removeFromInvalidates(String storageID) { LightWeightHashSet<Block> blocks = recentInvalidateSets.remove(storageID); if (blocks != null) { pendingDeletionBlocksCount -= blocks.size(); } }
java
{ "resource": "" }
q161478
FSNamesystem.addToInvalidates
train
void addToInvalidates(Block b, DatanodeInfo n, boolean ackRequired) { addToInvalidatesNoLog(b, n, ackRequired); if (isInitialized && !isInSafeModeInternal()) { // do not log in startup phase NameNode.stateChangeLog.info("BLOCK* NameSystem.addToInvalidates: " + b.getBlockName() + " is added to invalidSet of " + n.getName()); } }
java
{ "resource": "" }
q161479
FSNamesystem.addToInvalidatesNoLog
train
void addToInvalidatesNoLog(Block b, DatanodeInfo n, boolean ackRequired) { // We are the standby avatar and we don't want to add blocks to the // invalidates list. if (this.getNameNode().shouldRetryAbsentBlocks()) { return; } LightWeightHashSet<Block> invalidateSet = recentInvalidateSets.get(n .getStorageID()); if (invalidateSet == null) { invalidateSet = new LightWeightHashSet<Block>(); recentInvalidateSets.put(n.getStorageID(), invalidateSet); } if(!ackRequired){ b.setNumBytes(BlockFlags.NO_ACK); } if (invalidateSet.add(b)) { pendingDeletionBlocksCount++; } }
java
{ "resource": "" }
q161480
FSNamesystem.addToInvalidates
train
private void addToInvalidates(Block b, boolean ackRequired) { StringBuilder sb = new StringBuilder(); for (Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(b); it.hasNext();) { DatanodeDescriptor node = it.next(); addToInvalidatesNoLog(b, node, ackRequired); sb.append(node.getName()); sb.append(' '); } if (isInitialized && !isInSafeMode()) { // do not log in startup phase NameNode.stateChangeLog.info("BLOCK* NameSystem.addToInvalidates: " + b.getBlockName() + " is added to invalidSet of " + sb); } }
java
{ "resource": "" }
q161481
FSNamesystem.markBlockAsCorrupt
train
public void markBlockAsCorrupt(Block blk, DatanodeInfo dn, final boolean parallelInitialBlockReport) throws IOException { if (!parallelInitialBlockReport) { // regular call, not through parallel block report writeLock(); } lockParallelBRLock(parallelInitialBlockReport); try { DatanodeDescriptor node = getDatanode(dn); if (node == null) { throw new IOException("Cannot mark block" + blk.getBlockName() + " as corrupt because datanode " + dn.getName() + " does not exist. "); } final BlockInfo storedBlockInfo = blocksMap.getStoredBlock(blk); if (storedBlockInfo == null) { // Check if the replica is in the blockMap, if not // ignore the request for now. This could happen when BlockScanner // thread of Datanode reports bad block before Block reports are sent // by the Datanode on startup NameNode.stateChangeLog.info("BLOCK NameSystem.markBlockAsCorrupt: " + "block " + blk + " could not be marked " + "as corrupt as it does not exists in " + "blocksMap"); } else { INodeFile inode = storedBlockInfo.getINode(); if (inode == null) { NameNode.stateChangeLog.info("BLOCK NameSystem.markBlockAsCorrupt: " + "block " + blk + " could not be marked " + "as corrupt as it does not belong to " + "any file"); addToInvalidates(storedBlockInfo, node, false); return; } // Add this replica to corruptReplicas Map if (!corruptReplicas.addToCorruptReplicasMap(storedBlockInfo, node)) { return; } NumberReplicas num = countNodes(storedBlockInfo); short blockReplication = inode.getBlockReplication(storedBlockInfo); if (num.liveReplicas() > blockReplication) { // the block is over-replicated so invalidate the replicas immediately invalidateBlock(storedBlockInfo, node, true); } else if (isPopulatingReplQueuesInternal()) { // add the block to neededReplication int numCurrentReplicas = num.liveReplicas() + pendingReplications.getNumReplicas(storedBlockInfo); updateNeededReplicationQueue(storedBlockInfo, -1, numCurrentReplicas, num.decommissionedReplicas, node, blockReplication); } } } finally { if (!parallelInitialBlockReport) { writeUnlock(); } unlockParallelBRLock(parallelInitialBlockReport); } }
java
{ "resource": "" }
q161482
FSNamesystem.invalidateBlock
train
private void invalidateBlock(Block blk, DatanodeInfo dn, boolean ackRequired) throws IOException { NameNode.stateChangeLog.info("DIR* NameSystem.invalidateBlock: " + blk + " on " + dn.getName()); DatanodeDescriptor node = getDatanode(dn); if (node == null) { throw new IOException("Cannot invalidate block " + blk + " because datanode " + dn.getName() + " does not exist."); } // Check how many copies we have of the block. If we have at least one // copy on a live node, then we can delete it. int count = countNodes(blk).liveReplicas(); if (count > 1) { // delete with ACK addToInvalidates(blk, dn, ackRequired); removeStoredBlock(blk, node); if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("BLOCK* NameSystem.invalidateBlocks: " + blk + " on " + dn.getName() + " listed for deletion."); } } else { NameNode.stateChangeLog.info("BLOCK* NameSystem.invalidateBlocks: " + blk + " on " + dn.getName() + " is the only copy and was not deleted."); } }
java
{ "resource": "" }
q161483
FSNamesystem.hardLinkTo
train
public boolean hardLinkTo(String src, String dst) throws IOException { INode dstNode = hardLinkToInternal(src, dst); getEditLog().logSync(false); if (dstNode != null && auditLog.isInfoEnabled()) { logAuditEvent(getCurrentUGI(), Server.getRemoteIp(), "hardlink", src, dst, dstNode); } return dstNode != null; }
java
{ "resource": "" }
q161484
FSNamesystem.deleteInternal
train
boolean deleteInternal(String src, INode[] inodes, boolean recursive, boolean enforcePermission) throws IOException { ArrayList<BlockInfo> collectedBlocks = new ArrayList<BlockInfo>(); INode targetNode = null; byte[][] components = inodes == null ? INodeDirectory.getPathComponents(src) : null; writeLock(); try { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src); } if (isInSafeMode()) { throw new SafeModeException("Cannot delete " + src, safeMode); } if (inodes == null) { inodes = new INode[components.length]; dir.rootDir.getExistingPathINodes(components, inodes); } if (enforcePermission && isPermissionEnabled && isPermissionCheckingEnabled(inodes)) { checkPermission(src, inodes, false, null, FsAction.WRITE, null, FsAction.ALL); } if (neverDeletePaths.contains(src)) { NameNode.stateChangeLog.warn("DIR* NameSystem.delete: " + " Trying to delete a whitelisted path " + src + " by user " + getCurrentUGI() + " from server " + Server.getRemoteIp()); throw new IOException("Deleting a whitelisted directory is not allowed. " + src); } if ((!recursive) && (!dir.isDirEmpty(inodes[inodes.length-1]))) { throw new IOException(src + " is non empty"); } targetNode = dir.delete(src, inodes, collectedBlocks, BLOCK_DELETION_INCREMENT); if (targetNode == null) { return false; } } finally { writeUnlock(); } List<INode> removedINodes = new ArrayList<INode>(); while (targetNode.name != null) { // Interatively Remove blocks collectedBlocks.clear(); // sleep to make sure that the lock can be grabbed by another thread try { Thread.sleep(1); } catch (InterruptedException e) { throw new InterruptedIOException(e.getMessage()); } writeLock(); try { int filesRemoved = targetNode.collectSubtreeBlocksAndClear( collectedBlocks, BLOCK_DELETION_INCREMENT, removedINodes); incrDeletedFileCount(this, filesRemoved); removeBlocks(collectedBlocks); // remove from inodeMap dir.removeFromInodeMap(removedINodes); } finally { writeUnlock(); } } collectedBlocks.clear(); removedINodes.clear(); return true; }
java
{ "resource": "" }
q161485
FSNamesystem.removeBlocks
train
private void removeBlocks(List<BlockInfo> blocks) { if (blocks == null) { return; } for (BlockInfo b : blocks) { removeFromExcessReplicateMap(b); neededReplications.remove(b, -1); corruptReplicas.removeFromCorruptReplicasMap(b); if (pendingReplications != null) { int replicas = pendingReplications.getNumReplicas(b); for (int i = 0; i < replicas; i++) { pendingReplications.remove(b); } } addToInvalidates(b, false); blocksMap.removeBlock(b); } }
java
{ "resource": "" }
q161486
FSNamesystem.removePathAndBlocks
train
void removePathAndBlocks(String src, List<BlockInfo> blocks) throws IOException { // No need for lock until we start accepting requests from clients. assert (!nameNode.isRpcServerRunning() || hasWriteLock()); leaseManager.removeLeaseWithPrefixPath(src); removeBlocks(blocks); }
java
{ "resource": "" }
q161487
FSNamesystem.fsync
train
void fsync(String src, String clientName) throws IOException { NameNode.stateChangeLog.info("BLOCK* NameSystem.fsync: file " + src + " for " + clientName); writeLock(); try { if (isInSafeMode()) { throw new SafeModeException("Cannot fsync file " + src, safeMode); } INodeFileUnderConstruction pendingFile = checkLease(src, clientName); // If the block has a length of zero, set it to size 1 so // that lease recovery will not discard it. Block last = pendingFile.getLastBlock(); if (last.getNumBytes() == 0) { last.setNumBytes(1); } dir.persistBlocks(src, pendingFile); } finally { writeUnlock(); } getEditLog().logSync(); }
java
{ "resource": "" }
q161488
FSNamesystem.internalReleaseLease
train
void internalReleaseLease(Lease lease, String src, INodeFileUnderConstruction pendingFile) throws IOException { if (lease.hasPath()) { // make a copy of the paths because internalReleaseLeaseOne removes // pathnames from the lease record. String[] leasePaths = new String[lease.getPaths().size()]; lease.getPaths().toArray(leasePaths); LOG.info("Recovering lease: " + lease + " for paths " + Arrays.toString(leasePaths)); for (String p: leasePaths) { internalReleaseLeaseOne(lease, p); } } else { internalReleaseLeaseOne(lease, src, pendingFile, false); } }
java
{ "resource": "" }
q161489
FSNamesystem.discardDone
train
private boolean discardDone(INodeFileUnderConstruction pendingFile, String src) throws IOException { Block[] blocks = pendingFile.getBlocks(); if (blocks == null || blocks.length == 0) { return false; } Block last = blocks[blocks.length-1]; if (last.getNumBytes() == 0) { dir.removeBlock(src, pendingFile, last); finalizeINodeFileUnderConstruction(src, pendingFile); NameNode.stateChangeLog.warn("BLOCK*" + " internalReleaseLease: discarded last block " + last + " , lease removed for " + src); return true; } return false; }
java
{ "resource": "" }
q161490
FSNamesystem.updatePipeline
train
void updatePipeline(String clientName, Block oldBlock, Block newBlock, List<DatanodeID> newNodes) throws IOException { LOG.info("updatePipeline(block=" + oldBlock + ", newGenerationStamp=" + newBlock.getGenerationStamp() + ", newLength=" + newBlock.getNumBytes() + ", newNodes=" + newNodes + ")"); writeLock(); try { // check the vadility of the block and lease holder name final INodeFileUnderConstruction pendingFile = checkUCBlock(oldBlock, clientName); final Block oldBlockInfo = pendingFile.getLastBlock(); // check new GS & length: this is not expected if (newBlock.getGenerationStamp() == oldBlockInfo.getGenerationStamp()) { // we will do nothing if the GS is the same, to make this method // idempotent, this should come from the avatar failover retry. String msg = "Update " + oldBlock + " (len = " + oldBlockInfo.getNumBytes() + ") to a same generation stamp: " + newBlock + " (len = " + newBlock.getNumBytes() + ")"; LOG.warn(msg); return; } if (newBlock.getGenerationStamp() < oldBlockInfo.getGenerationStamp() || newBlock.getNumBytes() < oldBlockInfo.getNumBytes()) { String msg = "Update " + oldBlock + " (len = " + oldBlockInfo.getNumBytes() + ") to an older state: " + newBlock + " (len = " + newBlock.getNumBytes() + ")"; LOG.warn(msg); throw new IOException(msg); } // Remove old block from blocks map. This alawys have to be done // because the generation stamp of this block is changing. blocksMap.removeBlock(oldBlockInfo); // update Last block, add it to the blocks map BlockInfo newBlockInfo = blocksMap.addINode(newBlock, pendingFile, pendingFile.getReplication()); // find the Datanode Descriptor objects DatanodeDescriptor[] descriptors = null; if (!newNodes.isEmpty()) { descriptors = new DatanodeDescriptor[newNodes.size()]; for (int i = 0; i < newNodes.size(); i++) { descriptors[i] = getDatanode(newNodes.get(i)); } } // add locations into the the INodeUnderConstruction pendingFile.setLastBlock(newBlockInfo, descriptors); // persist blocks only if append is supported String src = leaseManager.findPath(pendingFile); if (supportAppends) { dir.persistBlocks(src, pendingFile); } } finally { writeUnlock(); } if (supportAppends) { getEditLog().logSync(); } LOG.info("updatePipeline(" + oldBlock + ") successfully to " + newBlock); }
java
{ "resource": "" }
q161491
FSNamesystem.newStorageID
train
private String newStorageID() { String newID = null; while (newID == null) { newID = "DS" + Integer.toString(r.nextInt()); if (datanodeMap.get(newID) != null) { newID = null; } } return newID; }
java
{ "resource": "" }
q161492
FSNamesystem.computeDatanodeWork
train
public int computeDatanodeWork() throws IOException { int workFound = 0; int blocksToProcess = 0; int nodesToProcess = 0; // blocks should not be replicated or removed if safe mode is on if (isInSafeMode()) { updateReplicationCounts(workFound); return workFound; } synchronized (heartbeats) { blocksToProcess = (int) (heartbeats.size() * ReplicationConfigKeys.replicationWorkMultiplier); nodesToProcess = (int) Math.ceil((double) heartbeats.size() * ReplicationConfigKeys.INVALIDATE_WORK_PCT_PER_ITERATION / 100); } workFound = computeReplicationWork(blocksToProcess); // Update FSNamesystemMetrics counters updateReplicationCounts(workFound); workFound += computeInvalidateWork(nodesToProcess); return workFound; }
java
{ "resource": "" }
q161493
FSNamesystem.computeInvalidateWork
train
int computeInvalidateWork(int nodesToProcess) { int numOfNodes = 0; ArrayList<String> keyArray = null; readLock(); try { numOfNodes = recentInvalidateSets.size(); // get an array of the keys keyArray = new ArrayList<String>(recentInvalidateSets.keySet()); } finally { readUnlock(); } nodesToProcess = Math.min(numOfNodes, nodesToProcess); // randomly pick up <i>nodesToProcess</i> nodes // and put them at [0, nodesToProcess) int remainingNodes = numOfNodes - nodesToProcess; if (nodesToProcess < remainingNodes) { for (int i = 0; i < nodesToProcess; i++) { int keyIndex = r.nextInt(numOfNodes - i) + i; Collections.swap(keyArray, keyIndex, i); // swap to front } } else { for (int i = 0; i < remainingNodes; i++) { int keyIndex = r.nextInt(numOfNodes - i); Collections.swap(keyArray, keyIndex, numOfNodes - i - 1); // swap to end } } int blockCnt = 0; for (int nodeCnt = 0; nodeCnt < nodesToProcess; nodeCnt++) { blockCnt += invalidateWorkForOneNode(keyArray.get(nodeCnt)); } return blockCnt; }
java
{ "resource": "" }
q161494
FSNamesystem.getQuotaForThisPriority
train
private int getQuotaForThisPriority(int totalQuota, int blocksForThisPriority, int blocksForLowerPriorities) { // reserve at most 20% for lower priority blocks int quotaForLowerPriorities = Math.min(totalQuota/5, blocksForLowerPriorities); return Math.min( blocksForThisPriority, totalQuota-quotaForLowerPriorities); }
java
{ "resource": "" }
q161495
FSNamesystem.chooseUnderReplicatedBlocks
train
List<List<BlockInfo>> chooseUnderReplicatedBlocks(int blocksToProcess) { // initialize data structure for the return value List<List<BlockInfo>> blocksToReplicate = new ArrayList<List<BlockInfo>>(UnderReplicatedBlocks.LEVEL); for (int i = 0; i < UnderReplicatedBlocks.LEVEL; i++) { blocksToReplicate.add(new ArrayList<BlockInfo>()); } writeLock(); try { synchronized (neededReplications) { if (neededReplications.size() == 0) { return blocksToReplicate; } for (int priority = 0; priority<UnderReplicatedBlocks.LEVEL; priority++) { // Go through all blocks that need replications of priority BlockIterator neededReplicationsIterator = neededReplications.iterator(priority); int numBlocks = neededReplications.size(priority); if (replIndex[priority] > numBlocks) { replIndex[priority] = 0; } // skip to the first unprocessed block, which is at replIndex for (int i = 0; i < replIndex[priority] && neededReplicationsIterator.hasNext(); i++) { neededReplicationsIterator.next(); } // # of blocks to process for this priority int blocksToProcessIter = getQuotaForThisPriority(blocksToProcess, numBlocks, neededReplications.getSize(priority+1)); blocksToProcess -= blocksToProcessIter; for (int blkCnt = 0; blkCnt < blocksToProcessIter; blkCnt++, replIndex[priority]++) { if (!neededReplicationsIterator.hasNext()) { // start from the beginning replIndex[priority] = 0; neededReplicationsIterator = neededReplications.iterator(priority); assert neededReplicationsIterator.hasNext() : "neededReplications should not be empty."; } BlockInfo block = neededReplicationsIterator.next(); blocksToReplicate.get(priority).add(block); } // end for } } // end try return blocksToReplicate; } finally { writeUnlock(); } }
java
{ "resource": "" }
q161496
FSNamesystem.updateReplicationMetrics
train
private void updateReplicationMetrics(List<ReplicationWork> work) { for(ReplicationWork rw : work){ DatanodeDescriptor[] targets = rw.targets; if (targets == null) continue; for (DatanodeDescriptor target : targets) { if (clusterMap.isOnSameRack(rw.srcNode, target)) { myFSMetrics.numLocalRackReplications.inc(); } else { myFSMetrics.numAcrossRackReplications.inc(); } } } }
java
{ "resource": "" }
q161497
FSNamesystem.chooseTarget
train
private DatanodeDescriptor[] chooseTarget(ReplicationWork work) { if (!neededReplications.contains(work.block)) { return null; } if (work.blockSize == BlockFlags.NO_ACK) { LOG.warn("Block " + work.block.getBlockId() + " of the file " + getFullPathName(work.fileINode) + " is invalidated and cannot be replicated."); return null; } if (work.blockSize == BlockFlags.DELETED) { LOG.warn("Block " + work.block.getBlockId() + " of the file " + getFullPathName(work.fileINode) + " is a deleted block and cannot be replicated."); return null; } return replicator.chooseTarget(work.fileINode, work.numOfReplicas, work.srcNode, work.containingNodes, null, work.blockSize); }
java
{ "resource": "" }
q161498
FSNamesystem.isGoodReplica
train
private boolean isGoodReplica(DatanodeDescriptor node, Block block) { Collection<Block> excessBlocks = excessReplicateMap.get(node.getStorageID()); Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(block); return (nodesCorrupt == null || !nodesCorrupt.contains(node)) // not corrupt // not over scheduling for replication && (node.getNumberOfBlocksToBeReplicated() < maxReplicationStreams) // not alredy scheduled for removal && (excessBlocks == null || !excessBlocks.contains(block)) && !node.isDecommissioned(); // not decommissioned }
java
{ "resource": "" }
q161499
FSNamesystem.processPendingReplications
train
void processPendingReplications() { BlockInfo[] timedOutItems = pendingReplications.getTimedOutBlocks(); if (timedOutItems != null) { writeLock(); try { for (int i = 0; i < timedOutItems.length; i++) { NumberReplicas num = countNodes(timedOutItems[i]); neededReplications.add( timedOutItems[i], num.liveReplicas(), num.decommissionedReplicas(), getReplication(timedOutItems[i])); } } finally { writeUnlock(); } /* If we know the target datanodes where the replication timedout, * we could invoke decBlocksScheduled() on it. Its ok for now. */ } }
java
{ "resource": "" }