_id
stringlengths
2
7
title
stringlengths
3
140
partition
stringclasses
3 values
text
stringlengths
73
34.1k
language
stringclasses
1 value
meta_information
dict
q161800
LinuxUtilizationGauger.getPS
train
protected String[] getPS() { ShellCommandExecutor shellExecutor = new ShellCommandExecutor(CMD); try { shellExecutor.execute(); } catch (IOException e) { LOG.error(StringUtils.stringifyException(e)); return null; } return shellExecutor.getOutput().split("\n"); }
java
{ "resource": "" }
q161801
LinuxUtilizationGauger.parsePS
train
private String[][] parsePS(String[] psStrings) { String[][] result = new String[psStrings.length-1][NUM_FIELDS]; for (int i = 1; i < psStrings.length; i++) { Matcher matcher = psPattern.matcher(psStrings[i]); if (matcher.find()) { for (int j = 0; j < NUM_FIELDS; j++) { result[i-1][j] = matcher.group(j+1); } } } return result; }
java
{ "resource": "" }
q161802
LinuxUtilizationGauger.percentageToGHz
train
private double percentageToGHz(double cpuUsage) { cpuUsage /= 100; cpuUsage /= ttUtilization.getNumCpu(); cpuUsage *= ttUtilization.getCpuTotalGHz(); return cpuUsage; }
java
{ "resource": "" }
q161803
LinuxUtilizationGauger.getSubProcessUsage
train
private double[] getSubProcessUsage(String pid, Map<String, String[]> pidToContent, Map<String, LinkedList<String>> pidToChildPid) { double cpuMemUsage[] = new double[2]; Queue<String> pidQueue = new LinkedList<String>(); pidQueue.add(pid); while (!pidQueue.isEmpty()) { pid = pidQueue.poll(); for (String child : pidToChildPid.get(pid)) { pidQueue.add(child); } String[] psContent = pidToContent.get(pid); double cpuUsage = Double.parseDouble(psContent[PCPU]); cpuUsage = percentageToGHz(cpuUsage); double memUsage = Double.parseDouble(psContent[RSS]); // "ps -eo rss" gives memory in kB. We convert it in GB memUsage /= 1000000d; cpuMemUsage[0] += cpuUsage; cpuMemUsage[1] += memUsage; } return cpuMemUsage; }
java
{ "resource": "" }
q161804
UnderReplicatedBlocks.clear
train
void clear() { for(int i=0; i<LEVEL; i++) { priorityQueues.get(i).clear(); } raidQueue.clear(); }
java
{ "resource": "" }
q161805
UnderReplicatedBlocks.size
train
synchronized int size( int priority) { if (priority < 0 || priority >= LEVEL) { throw new IllegalArgumentException("Unsupported priority: " + priority); } return priorityQueues.get(priority).size(); }
java
{ "resource": "" }
q161806
UnderReplicatedBlocks.getSize
train
synchronized int getSize( int priority ) { int size = 0; for (int i=priority; i<LEVEL; i++) { size += priorityQueues.get(i).size(); } return size; }
java
{ "resource": "" }
q161807
NodeSnapshot.removeNode
train
public void removeNode(ClusterNode node) { String host = node.getHost(); NodeContainer container = hostToRunnableNode.get(host); if (container != null) { if (container.removeNode(node)) { runnableNodeCount--; } if (container.isEmpty()) { hostToRunnableNode.remove(host); } } Node rack = topologyCache.getNode(host).getParent(); container = rackToRunnableNode.get(rack); if (container != null) { container.removeNode(node); if (container.isEmpty()) { rackToRunnableNode.remove(rack); } } }
java
{ "resource": "" }
q161808
ImageVisitor.visit
train
void visit(ImageElement element, int value) throws IOException { visit(element, Integer.toString(value)); }
java
{ "resource": "" }
q161809
ImageVisitor.visitEnclosingElement
train
void visitEnclosingElement(ImageElement element, ImageElement key, int value) throws IOException { visitEnclosingElement(element, key, Integer.toString(value)); }
java
{ "resource": "" }
q161810
Scheduler.addSession
train
public void addSession(String id, Session session) { for (SchedulerForType scheduleThread : schedulersForTypes.values()) { scheduleThread.addSession(id, session); } }
java
{ "resource": "" }
q161811
Scheduler.start
train
public void start() { for (Thread schedulerForType : schedulersForTypes.values()) { LOG.info("Starting " + schedulerForType.getName()); schedulerForType.start(); } configManager.start(); }
java
{ "resource": "" }
q161812
Scheduler.close
train
public void close() { for (SchedulerForType scheduleThread : schedulersForTypes.values()) { scheduleThread.close(); } for (Thread scheduleThread : schedulersForTypes.values()) { Utilities.waitThreadTermination(scheduleThread); } configManager.close(); }
java
{ "resource": "" }
q161813
Scheduler.getPoolInfoMetrics
train
public Map<PoolInfo, PoolInfoMetrics> getPoolInfoMetrics(ResourceType type) { return schedulersForTypes.get(type).getPoolInfoMetrics(); }
java
{ "resource": "" }
q161814
Scheduler.getPoolInfos
train
public List<PoolInfo> getPoolInfos() { Set<PoolInfo> poolInfos = new HashSet<PoolInfo>(); for (ResourceType type : types) { poolInfos.addAll(getPoolInfoMetrics(type).keySet()); } List<PoolInfo> result = new ArrayList<PoolInfo>(); result.addAll(poolInfos); Collections.sort(result); return result; }
java
{ "resource": "" }
q161815
Scheduler.submitMetrics
train
public void submitMetrics(MetricsRecord metricsRecord) { List<PoolMetadata> poolMetadatas = getPoolMetadataList(); PoolFairnessCalculator.calculateFairness(poolMetadatas, metricsRecord); for (SchedulerForType scheduler: schedulersForTypes.values()) { scheduler.submitMetrics(); } }
java
{ "resource": "" }
q161816
TransferFsImage.downloadImageToStorage
train
static MD5Hash downloadImageToStorage(String fsName, long imageTxId, FSImage fsImage, boolean needDigest) throws IOException { // by default do not disable throttling return downloadImageToStorage(fsName, imageTxId, fsImage, needDigest, false); }
java
{ "resource": "" }
q161817
TransferFsImage.downloadImageToStorage
train
static MD5Hash downloadImageToStorage(String fsName, long imageTxId, FSImage dstImage, boolean needDigest, boolean disableThrottle) throws IOException { String fileid = GetImageServlet.getParamStringForImage( imageTxId, dstImage.storage, disableThrottle); List<OutputStream> outputStreams = dstImage.getCheckpointImageOutputStreams(imageTxId); if (outputStreams.size() == 0) { throw new IOException("No targets in destination storage!"); } MD5Hash hash = getFileClient(fsName, fileid, outputStreams, dstImage.storage, needDigest); LOG.info("Downloaded image files for txid: " + imageTxId); return hash; }
java
{ "resource": "" }
q161818
TransferFsImage.uploadImageFromStorage
train
static void uploadImageFromStorage(String fsName, String machine, int port, NNStorage storage, long txid) throws IOException { String fileid = GetImageServlet.getParamStringToPutImage( txid, machine, port, storage); LOG.info("Image upload: Posted URL " + fsName + fileid); // this doesn't directly upload an image, but rather asks the NN // to connect back to the 2NN to download the specified image. TransferFsImage.getFileClient(fsName, fileid, null, storage, false); LOG.info("Uploaded image with txid " + txid + " to namenode at " + fsName); }
java
{ "resource": "" }
q161819
TransferFsImage.getFileServer
train
public static void getFileServer(OutputStream outstream, File localfile, DataTransferThrottler throttler) throws IOException { byte buf[] = new byte[BUFFER_SIZE]; InputStream infile = null; try { infile = new BufferedInputStream(new FileInputStream(localfile), BUFFER_SIZE); if (InjectionHandler.falseCondition(InjectionEvent.TRANSFERFSIMAGE_GETFILESERVER0) && localfile.getAbsolutePath().contains("secondary")) { // throw exception only when the secondary sends its image throw new IOException("If this exception is not caught by the " + "name-node fs image will be truncated."); } if (InjectionHandler.falseCondition(InjectionEvent.TRANSFERFSIMAGE_GETFILESERVER1) && localfile.getAbsolutePath().contains("fsimage")) { // Test sending image shorter than localfile long len = localfile.length(); buf = new byte[(int)Math.min(len/2, BUFFER_SIZE)]; // This will read at most half of the image // and the rest of the image will be sent over the wire infile.read(buf); } int num = 1; while (num > 0) { num = infile.read(buf); if (num <= 0) { break; } if (InjectionHandler.falseCondition(InjectionEvent.TRANSFERFSIMAGE_GETFILESERVER2)) { // Simulate a corrupted byte on the wire LOG.warn("SIMULATING A CORRUPT BYTE IN IMAGE TRANSFER!"); buf[0]++; } InjectionHandler.processEvent(InjectionEvent.TRANSFERFSIMAGE_GETFILESERVER3); outstream.write(buf, 0, num); if (throttler != null) { throttler.throttle(num); } } } finally { if (infile != null) { infile.close(); } } }
java
{ "resource": "" }
q161820
TransferFsImage.getFileServerForPartialFiles
train
public static void getFileServerForPartialFiles(OutputStream outstream, String filename, InputStream infile, DataTransferThrottler throttler, long startPosition, long lengthToSend) throws IOException { byte buf[] = new byte[BUFFER_SIZE]; try { int num = 1; while (num > 0) { num = infile.read(buf, 0, Math.min(BUFFER_SIZE, (int) Math.min(lengthToSend, Integer.MAX_VALUE))); lengthToSend -= num; if (num <= 0) { break; } try { outstream.write(buf, 0, num); } catch (Exception e) { // silently ignore. connection might have been closed break; } if (throttler != null) { throttler.throttle(num); } } if (lengthToSend > 0) { LOG.warn("Could not serve requested number of bytes. Left with " + lengthToSend + " bytes for file: " + filename); } } finally { if (infile != null) { infile.close(); } } }
java
{ "resource": "" }
q161821
TransferFsImage.getHttpTimeout
train
private static int getHttpTimeout(Storage st) { if (!(st instanceof NNStorage)) return DFS_IMAGE_TRANSFER_TIMEOUT_DEFAULT; NNStorage storage = (NNStorage) st; if (storage == null || storage.getConf() == null) { return DFS_IMAGE_TRANSFER_TIMEOUT_DEFAULT; } return storage.getConf().getInt(DFS_IMAGE_TRANSFER_TIMEOUT_KEY, DFS_IMAGE_TRANSFER_TIMEOUT_DEFAULT); }
java
{ "resource": "" }
q161822
TransferFsImage.printProgress
train
private static int printProgress(String str, long received, long advertisedSize, int lastPrinted) { if (advertisedSize == 0) return 0; int currentPercent = (int) ((received * 100) / advertisedSize); if (currentPercent != lastPrinted) FLOG.info("Downloading: " + str + ", completed: " + currentPercent); return currentPercent; }
java
{ "resource": "" }
q161823
DNS.reverseDns
train
@Deprecated public static String reverseDns(InetAddress hostIp, String ns) throws NamingException { // // Builds the reverse IP lookup form // This is formed by reversing the IP numbers and appending in-addr.arpa // String[] parts = hostIp.getHostAddress().split("\\."); String reverseIP = parts[3] + "." + parts[2] + "." + parts[1] + "." + parts[0] + ".in-addr.arpa"; DirContext ictx = new InitialDirContext(); Attributes attribute = ictx.getAttributes("dns://" // Use "dns:///" if the default + ((ns == null) ? "" : ns) + // nameserver is to be used "/" + reverseIP, new String[] { "PTR" }); ictx.close(); return attribute.get("PTR").get().toString(); }
java
{ "resource": "" }
q161824
DNS.getIPs
train
public static String[] getIPs(String strInterface) throws UnknownHostException { try { boolean toGetIpv4 = true; if (System.getProperty(PREFER_IPV6_ADDRESS_PROPERTY) != null) { toGetIpv4 = "false".equals(System .getProperty(PREFER_IPV6_ADDRESS_PROPERTY)); } NetworkInterface netIF = NetworkInterface.getByName(strInterface); if (netIF == null) { return new String[] { InetAddress.getLocalHost() .getHostAddress() }; } else { Vector<String> ips = new Vector<String>(); Enumeration<InetAddress> e = netIF.getInetAddresses(); while (e.hasMoreElements()) { InetAddress curr = e.nextElement(); if (toGetIpv4 && !(curr instanceof Inet4Address)) { continue; } ips.add(curr.getHostAddress()); } return ips.toArray(new String[] {}); } } catch (SocketException e) { return new String[] { InetAddress.getLocalHost().getHostAddress() }; } }
java
{ "resource": "" }
q161825
DNS.getDefaultIP
train
public static String getDefaultIP(String strInterface) throws UnknownHostException { String[] ips = getIPs(strInterface); return ips[0]; }
java
{ "resource": "" }
q161826
DNS.getHosts
train
@Deprecated public static String[] getHosts(String strInterface, String nameserver) throws UnknownHostException { String[] ips = getIPs(strInterface); Vector<String> hosts = new Vector<String>(); for (int ctr = 0; ctr < ips.length; ctr++) try { hosts.add(reverseDns(InetAddress.getByName(ips[ctr]), nameserver)); } catch (Exception e) { } if (hosts.size() == 0) return new String[] { InetAddress.getLocalHost().getCanonicalHostName() }; else return hosts.toArray(new String[] {}); }
java
{ "resource": "" }
q161827
Journal.refreshCachedData
train
private synchronized void refreshCachedData() throws IOException { IOUtils.closeStream(committedTxnId); File currentDir = journalStorage.getSingularStorageDir().getCurrentDir(); this.lastPromisedEpoch = new PersistentLongFile( new File(currentDir, LAST_PROMISED_FILENAME), 0); this.lastWriterEpoch = new PersistentLongFile( new File(currentDir, LAST_WRITER_EPOCH), 0); this.committedTxnId = new BestEffortLongFile( new File(currentDir, COMMITTED_TXID_FILENAME), HdfsConstants.INVALID_TXID); metrics.lastWriterEpoch.set(lastWriterEpoch.get()); }
java
{ "resource": "" }
q161828
Journal.copyMetaFilesForUpgrade
train
private synchronized void copyMetaFilesForUpgrade() throws IOException { Configuration conf = new Configuration(); File currentDir = journalStorage.getSingularStorageDir().getCurrentDir(); File prevDir = journalStorage.getSingularStorageDir().getPreviousTmp(); FileSystem fs = FileSystem.getLocal(conf).getRaw(); FileUtil.copy(new File(prevDir, LAST_PROMISED_FILENAME), fs, new File( currentDir, LAST_PROMISED_FILENAME), false, conf); FileUtil.copy(new File(prevDir, LAST_WRITER_EPOCH), fs, new File( currentDir, LAST_WRITER_EPOCH), false, conf); FileUtil.copy(new File(prevDir, COMMITTED_TXID_FILENAME), fs, new File( currentDir, COMMITTED_TXID_FILENAME), false, conf); FileUtil.copy(new File(prevDir, JNStorage.PAXOS_DIR), fs, new File( currentDir, JNStorage.PAXOS_DIR), false, conf); }
java
{ "resource": "" }
q161829
Journal.rollbackImage
train
public void rollbackImage(NamespaceInfo nsInfo) throws IOException { Preconditions.checkState(nsInfo.getLayoutVersion() != 0, "can't rollback with uninitialized layout version: %s", nsInfo.toColonSeparatedString()); LOG.info("Rolling back image " + this.getJournalId() + " with namespace info: (" + nsInfo.toColonSeparatedString() + ")"); imageStorage.rollback(nsInfo); }
java
{ "resource": "" }
q161830
Journal.doUpgradeImage
train
private void doUpgradeImage(NamespaceInfo nsInfo) throws IOException { Preconditions.checkState(nsInfo.getNamespaceID() != 0, "can't upgrade with uninitialized namespace info: %s", nsInfo.toColonSeparatedString()); LOG.info("Upgrading image " + this.getJournalId() + " with namespace info: (" + nsInfo.toColonSeparatedString() + ")"); // clear the digest for the most recent image, it might change during // upgrade checkpointImageDigests.remove(mostRecentCheckpointTxid); imageStorage.doUpgrade(nsInfo); }
java
{ "resource": "" }
q161831
Journal.completeUpgradeImage
train
private void completeUpgradeImage(NamespaceInfo nsInfo) throws IOException { Preconditions.checkState(nsInfo.getNamespaceID() != 0, "can't upgrade with uninitialized namespace info: %s", nsInfo.toColonSeparatedString()); LOG.info("Completing Upgrading image " + this.getJournalId() + " with namespace info: (" + nsInfo.toColonSeparatedString() + ")"); // Do something about checkpoint image digests. imageStorage.completeUpgrade(nsInfo); }
java
{ "resource": "" }
q161832
Journal.completeUpgradeJournal
train
private void completeUpgradeJournal(NamespaceInfo nsInfo) throws IOException { Preconditions.checkState(nsInfo.getNamespaceID() != 0, "can't upgrade with uninitialized namespace info: %s", nsInfo.toColonSeparatedString()); LOG.info("Completing Upgrading journal" + this.getJournalId() + " with namespace info: (" + nsInfo.toColonSeparatedString() + ")"); journalStorage.completeUpgrade(nsInfo); }
java
{ "resource": "" }
q161833
Journal.rollbackJournal
train
public void rollbackJournal(NamespaceInfo nsInfo) throws IOException { Preconditions.checkState(nsInfo.getLayoutVersion() != 0, "can't rollback with uninitialized layout version : %s", nsInfo.toColonSeparatedString()); LOG.info("Rolling back journal " + this.getJournalId() + " with namespace info: (" + nsInfo.toColonSeparatedString() + ")"); journalStorage.rollback(nsInfo); refreshCachedData(); }
java
{ "resource": "" }
q161834
Journal.doUpgradeJournal
train
private void doUpgradeJournal(NamespaceInfo nsInfo) throws IOException { Preconditions.checkState(nsInfo.getNamespaceID() != 0, "can't upgrade with uninitialized namespace info: %s", nsInfo.toColonSeparatedString()); LOG.info("Upgrading journal " + this.getJournalId() + " with namespace info: (" + nsInfo.toColonSeparatedString() + ")"); journalStorage.doUpgrade(nsInfo); copyMetaFilesForUpgrade(); refreshCachedData(); }
java
{ "resource": "" }
q161835
Journal.recoverJournal
train
private void recoverJournal(StartupOption startOpt) throws IOException { LOG.info("Recovering journal " + this.getJournalId()); journalStorage.recover(startOpt); }
java
{ "resource": "" }
q161836
Journal.recoverImage
train
private void recoverImage(StartupOption startOpt) throws IOException { LOG.info("Recovering image" + this.getJournalId()); imageStorage.recover(startOpt); }
java
{ "resource": "" }
q161837
Journal.close
train
@Override // Closeable public void close() throws IOException { journalStorage.close(); imageStorage.close(); IOUtils.closeStream(committedTxnId); }
java
{ "resource": "" }
q161838
Journal.newEpoch
train
synchronized NewEpochResponseProto newEpoch( NamespaceInfo nsInfo, long epoch) throws IOException { checkJournalStorageFormatted(); journalStorage.checkConsistentNamespace(nsInfo); // if we are storing image too, check consistency as well if (imageStorage.isFormatted()) { imageStorage.checkConsistentNamespace(nsInfo); } // Check that the new epoch being proposed is in fact newer than // any other that we've promised. if (epoch <= getLastPromisedEpoch()) { throw new IOException("Proposed epoch " + epoch + " <= last promise " + getLastPromisedEpoch()); } updateLastPromisedEpoch(epoch); abortCurSegment(); NewEpochResponseProto ret = new NewEpochResponseProto(); EditLogFile latestFile = scanStorageForLatestEdits(); if (latestFile != null) { ret.setLastSegmentTxId(latestFile.getFirstTxId()); } return ret; }
java
{ "resource": "" }
q161839
Journal.checkRequest
train
private synchronized void checkRequest(RequestInfo reqInfo) throws IOException { // Invariant 25 from ZAB paper if (reqInfo.getEpoch() < lastPromisedEpoch.get()) { throw new IOException("IPC's epoch " + reqInfo.getEpoch() + " is less than the last promised epoch " + lastPromisedEpoch.get()); } else if (reqInfo.getEpoch() > lastPromisedEpoch.get()) { // A newer client has arrived. Fence any previous writers by updating // the promise. updateLastPromisedEpoch(reqInfo.getEpoch()); } // Ensure that the IPCs are arriving in-order as expected. if (reqInfo.getIpcSerialNumber() <= currentEpochIpcSerial) { checkSync(false, "IPC serial %s from client %s was not higher than prior highest " + "IPC serial %s", reqInfo.getIpcSerialNumber(), Server.getRemoteIp(), currentEpochIpcSerial); } currentEpochIpcSerial = reqInfo.getIpcSerialNumber(); if (reqInfo.hasCommittedTxId()) { if (reqInfo.getCommittedTxId() < committedTxnId.get()) { throw new IllegalArgumentException( "Client trying to move committed txid backward from " + committedTxnId.get() + " to " + reqInfo.getCommittedTxId()); } // persist txid every second, as it is not needed for correctness boolean persist = (now() - lastPersistedCommittedTxId) > 1000; if (persist) { lastPersistedCommittedTxId = now(); } committedTxnId.set(reqInfo.getCommittedTxId(), persist); } }
java
{ "resource": "" }
q161840
Journal.startLogSegment
train
public synchronized void startLogSegment(RequestInfo reqInfo, long txid) throws IOException { assert fjm != null; checkJournalStorageFormatted(); checkRequest(reqInfo); if (curSegment != null) { LOG.warn("Client is requesting a new log segment " + txid + " though we are already writing " + curSegment + ". " + "Aborting the current segment in order to begin the new one."); // The writer may have lost a connection to us and is now // re-connecting after the connection came back. // We should abort our own old segment. abortCurSegment(); } // Paranoid sanity check: we should never overwrite a finalized log file. // Additionally, if it's in-progress, it should have at most 1 transaction. // This can happen if the writer crashes exactly at the start of a segment. EditLogFile existing = fjm.getLogFile(txid); if (existing != null) { if (!existing.isInProgress()) { throw new IllegalStateException("Already have a finalized segment " + existing + " beginning at " + txid); } // If it's in-progress, it should only contain one transaction, // because the "startLogSegment" transaction is written alone at the // start of each segment. existing.validateLog(); if (existing.getLastTxId() != existing.getFirstTxId()) { throw new IllegalStateException("The log file " + existing + " seems to contain valid transactions"); } } long curLastWriterEpoch = lastWriterEpoch.get(); if (curLastWriterEpoch != reqInfo.getEpoch()) { LOG.info("Updating lastWriterEpoch from " + curLastWriterEpoch + " to " + reqInfo.getEpoch() + " for client " + Server.getRemoteIp()); lastWriterEpoch.set(reqInfo.getEpoch()); metrics.lastWriterEpoch.set(reqInfo.getEpoch()); } // The fact that we are starting a segment at this txid indicates // that any previous recovery for this same segment was aborted. // Otherwise, no writer would have started writing. So, we can // remove the record of the older segment here. purgePaxosDecision(txid); curSegment = fjm.startLogSegment(txid); curSegmentTxId = txid; nextTxId = txid; // the layout version has only been written // plus OP_INVALID currentSegmentWrittenBytes = 5L; // inform the syncer service that we might have some work to do if (journalNode != null) { journalNode.addSyncTask(this, curSegmentTxId); } }
java
{ "resource": "" }
q161841
Journal.finalizeLogSegment
train
public synchronized void finalizeLogSegment(RequestInfo reqInfo, long startTxId, long endTxId) throws IOException { checkJournalStorageFormatted(); checkRequest(reqInfo); boolean needsValidation = true; // Finalizing the log that the writer was just writing. if (startTxId == curSegmentTxId) { if (curSegment != null) { curSegment.close(); curSegment = null; curSegmentTxId = HdfsConstants.INVALID_TXID; currentSegmentWrittenBytes = 0L; } checkSync(nextTxId == endTxId + 1, "Trying to finalize in-progress log segment %s to end at " + "txid %s but only written up to txid %s", startTxId, endTxId, nextTxId - 1); // No need to validate the edit log if the client is finalizing // the log segment that it was just writing to. needsValidation = false; } FileJournalManager.EditLogFile elf = fjm.getLogFile(startTxId); if (elf == null) { throw new JournalOutOfSyncException("No log file to finalize at " + "transaction ID " + startTxId); } if (elf.isInProgress()) { if (needsValidation) { LOG.info("Validating log segment " + elf.getFile() + " about to be " + "finalized"); elf.validateLog(); checkSync(elf.getLastTxId() == endTxId, "Trying to finalize in-progress log segment %s to end at " + "txid %s but log %s on disk only contains up to txid %s", startTxId, endTxId, elf.getFile(), elf.getLastTxId()); } fjm.finalizeLogSegment(startTxId, endTxId); } else { Preconditions.checkArgument(endTxId == elf.getLastTxId(), "Trying to re-finalize already finalized log " + elf + " with different endTxId " + endTxId); } // Once logs are finalized, a different length will never be decided. // During recovery, we treat a finalized segment the same as an accepted // recovery. Thus, we no longer need to keep track of the previously- // accepted decision. The existence of the finalized log segment is enough. purgePaxosDecision(elf.getFirstTxId()); }
java
{ "resource": "" }
q161842
Journal.purgePaxosDecision
train
private void purgePaxosDecision(long segmentTxId) throws IOException { File paxosFile = journalStorage.getPaxosFile(segmentTxId); if (paxosFile.exists()) { if (!paxosFile.delete()) { throw new IOException("Unable to delete paxos file " + paxosFile); } } }
java
{ "resource": "" }
q161843
Journal.syncLog
train
File syncLog(RequestInfo reqInfo, final SegmentStateProto segment, final URL url) throws IOException { long startTxId = segment.getStartTxId(); long epoch = reqInfo.getEpoch(); return syncLog(epoch, segment.getStartTxId(), url, segment.toString(), journalStorage.getSyncLogTemporaryFile(startTxId, epoch)); }
java
{ "resource": "" }
q161844
Journal.syncLog
train
File syncLog(long stamp, final long startTxId, final URL url, String name, File tmpFile) throws IOException { final File[] localPaths = new File[] { tmpFile }; // TODO add security if needed. LOG.info("Synchronizing log " + name + " from " + url); boolean success = false; try { TransferFsImage.doGetUrl( url, ImageSet.convertFilesToStreams(localPaths, journalStorage, url.toString()), journalStorage, true); assert tmpFile.exists(); success = true; } finally { if (!success) { if (!tmpFile.delete()) { LOG.warn("Failed to delete temporary file " + tmpFile); } } } return tmpFile; }
java
{ "resource": "" }
q161845
Journal.completeHalfDoneAcceptRecovery
train
private void completeHalfDoneAcceptRecovery( PersistedRecoveryPaxosData paxosData) throws IOException { if (paxosData == null) { return; } long segmentId = paxosData.getSegmentState().getStartTxId(); long epoch = paxosData.getAcceptedInEpoch(); File tmp = journalStorage.getSyncLogTemporaryFile(segmentId, epoch); if (tmp.exists()) { File dst = journalStorage.getInProgressEditLog(segmentId); LOG.info("Rolling forward previously half-completed synchronization: " + tmp + " -> " + dst); FileUtil.replaceFile(tmp, dst); } }
java
{ "resource": "" }
q161846
Journal.getPersistedPaxosData
train
private PersistedRecoveryPaxosData getPersistedPaxosData(long segmentTxId) throws IOException { File f = journalStorage.getPaxosFile(segmentTxId); if (!f.exists()) { // Default instance has no fields filled in (they're optional) return null; } InputStream in = new FileInputStream(f); try { PersistedRecoveryPaxosData ret = PersistedRecoveryPaxosData.parseDelimitedFrom(in); Preconditions.checkState(ret != null && ret.getSegmentState().getStartTxId() == segmentTxId, "Bad persisted data for segment %s: %s", segmentTxId, ret); return ret; } finally { IOUtils.closeStream(in); } }
java
{ "resource": "" }
q161847
Journal.persistPaxosData
train
private void persistPaxosData(long segmentTxId, PersistedRecoveryPaxosData newData) throws IOException { File f = journalStorage.getPaxosFile(segmentTxId); boolean success = false; AtomicFileOutputStream fos = new AtomicFileOutputStream(f); try { newData.writeDelimitedTo(fos); fos.write('\n'); // Write human-readable data after the protobuf. This is only // to assist in debugging -- it's not parsed at all. OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8); writer.write(String.valueOf(newData)); writer.write('\n'); writer.flush(); fos.flush(); success = true; } finally { if (success) { IOUtils.closeStream(fos); } else { fos.abort(); } } }
java
{ "resource": "" }
q161848
Journal.saveDigestAndRenameCheckpointImage
train
public void saveDigestAndRenameCheckpointImage(long txid, MD5Hash digest) throws IOException { MD5Hash storedDigest = checkpointImageDigests.get(txid); if (storedDigest == null || !storedDigest.equals(digest)) { throw new IOException("Digest of data written: " + storedDigest + " does not match requested digest: " + digest + " for txid: " + txid + ", journal: " + journalId); } imageManager.saveDigestAndRenameCheckpointImage(txid, digest); checkpointImageDigests.remove(txid); }
java
{ "resource": "" }
q161849
GetImageServlet.getThrottler
train
public static final DataTransferThrottler getThrottler(Configuration conf, boolean disableThrottler) { if (disableThrottler) { return null; } long transferBandwidth = conf.getLong(HdfsConstants.DFS_IMAGE_TRANSFER_RATE_KEY, HdfsConstants.DFS_IMAGE_TRANSFER_RATE_DEFAULT); DataTransferThrottler throttler = null; if (transferBandwidth > 0) { throttler = new DataTransferThrottler(transferBandwidth); } return throttler; }
java
{ "resource": "" }
q161850
GetImageServlet.setVerificationHeaders
train
public static void setVerificationHeaders(HttpServletResponse response, File file) throws IOException { response.setHeader(TransferFsImage.CONTENT_LENGTH, String.valueOf(file.length())); MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file); if (hash != null) { response.setHeader(TransferFsImage.MD5_HEADER, hash.toString()); } }
java
{ "resource": "" }
q161851
InMemoryFileSystem.reserveSpaceWithCheckSum
train
public boolean reserveSpaceWithCheckSum(Path f, long size) { RawInMemoryFileSystem mfs = (RawInMemoryFileSystem)getRawFileSystem(); synchronized(mfs) { boolean b = mfs.reserveSpace(f, size); if (b) { long checksumSize = getChecksumFileLength(f, size); b = mfs.reserveSpace(getChecksumFile(f), checksumSize); if (!b) { mfs.unreserveSpace(f); } } return b; } }
java
{ "resource": "" }
q161852
DnsMonitorSecurityManager.setTheManager
train
public static void setTheManager() { if ("true".equalsIgnoreCase(System.getProperty(ACTIVATE_FLAG))) { if (!(System.getSecurityManager() instanceof DnsMonitorSecurityManager)) { System.setSecurityManager(theManager); } } }
java
{ "resource": "" }
q161853
AvatarNodeZkUtil.checkZooKeeperBeforeFailover
train
static ZookeeperTxId checkZooKeeperBeforeFailover(Configuration startupConf, Configuration confg, boolean noverification) throws IOException { AvatarZooKeeperClient zk = null; String fsname = startupConf.get(NameNode.DFS_NAMENODE_RPC_ADDRESS_KEY); int maxTries = startupConf.getInt("dfs.avatarnode.zk.retries", 3); Exception lastException = null; for (int i = 0; i < maxTries; i++) { try { zk = new AvatarZooKeeperClient(confg, null, false); LOG.info("Failover: Checking if the primary is empty"); String zkRegistry = zk.getPrimaryAvatarAddress(fsname, new Stat(), false, i > 0); if (zkRegistry != null) { throw new IOException( "Can't switch the AvatarNode to primary since " + "zookeeper record is not clean. Either use shutdownAvatar to kill " + "the current primary and clean the ZooKeeper entry, " + "or clear out the ZooKeeper entry if the primary is dead"); } if (noverification) { return null; } LOG.info("Failover: Obtaining last transaction id from ZK"); String address = startupConf.get(NameNode.DFS_NAMENODE_RPC_ADDRESS_KEY); long sessionId = zk.getPrimarySsId(address, i > 0); ZookeeperTxId zkTxId = zk.getPrimaryLastTxId(address, i > 0); if (sessionId != zkTxId.getSessionId()) { throw new IOException("Session Id in the ssid node : " + sessionId + " does not match the session Id in the txid node : " + zkTxId.getSessionId()); } return zkTxId; } catch (Exception e) { LOG.error("Got Exception reading primary node registration " + "from ZooKeeper. Will retry...", e); lastException = e; } finally { shutdownZkClient(zk); } } throw new IOException(lastException); }
java
{ "resource": "" }
q161854
AvatarNodeZkUtil.writeToZooKeeperAfterFailover
train
static long writeToZooKeeperAfterFailover(Configuration startupConf, Configuration confg) throws IOException { AvatarZooKeeperClient zk = null; // Register client port address. String address = startupConf.get(NameNode.DFS_NAMENODE_RPC_ADDRESS_KEY); String realAddress = confg.get(NameNode.DFS_NAMENODE_RPC_ADDRESS_KEY); int maxTries = startupConf.getInt("dfs.avatarnode.zk.retries", 3); for (int i = 0; i < maxTries; i++) { try { zk = new AvatarZooKeeperClient(confg, null, false); LOG.info("Failover: Registering to ZK as primary"); final boolean toOverwrite = true; zk.registerPrimary(address, realAddress, toOverwrite); registerClientProtocolAddress(zk, startupConf, confg, toOverwrite); registerDnProtocolAddress(zk, startupConf, confg, toOverwrite); registerHttpAddress(zk, startupConf, confg, toOverwrite); LOG.info("Failover: Writting session id to ZK"); return writeSessionIdToZK(startupConf, zk); } catch (Exception e) { LOG.error("Got Exception registering the new primary " + "with ZooKeeper. Will retry...", e); } finally { shutdownZkClient(zk); } } throw new IOException("Cannot connect to zk"); }
java
{ "resource": "" }
q161855
AvatarNodeZkUtil.writeLastTxidToZookeeper
train
static void writeLastTxidToZookeeper(long lastTxid, long totalBlocks, long totalInodes, long ssid, Configuration startupConf, Configuration confg) throws IOException { AvatarZooKeeperClient zk = null; LOG.info("Writing lastTxId: " + lastTxid + ", total blocks: " + totalBlocks + ", total inodes: " + totalInodes); if (lastTxid < 0) { LOG.warn("Invalid last transaction id : " + lastTxid + " skipping write to zookeeper."); return; } ZookeeperTxId zkTxid = new ZookeeperTxId(ssid, lastTxid, totalBlocks, totalInodes); int maxTries = startupConf.getInt("dfs.avatarnode.zk.retries", 3); for (int i = 0; i < maxTries; i++) { try { zk = new AvatarZooKeeperClient(confg, null, false); zk.registerLastTxId(startupConf.get(NameNode.DFS_NAMENODE_RPC_ADDRESS_KEY), zkTxid); return; } catch (Exception e) { LOG.error("Got Exception when syncing last txid to zk. Will retry...", e); } finally { shutdownZkClient(zk); } } throw new IOException("Cannot connect to zk"); }
java
{ "resource": "" }
q161856
AvatarNodeZkUtil.getPrimaryRegistration
train
static String getPrimaryRegistration(Configuration startupConf, Configuration conf, String fsname) throws IOException { AvatarZooKeeperClient zk = null; int maxTries = startupConf.getInt("dfs.avatarnode.zk.retries", 3); for (int i = 0; i < maxTries; i++) { try { zk = new AvatarZooKeeperClient(conf, null, false); String zkRegistry = zk.getPrimaryAvatarAddress(fsname, new Stat(), false); return zkRegistry; } catch (Exception e) { LOG.error( "Got Exception when reading primary registration. Will retry...", e); } finally { shutdownZkClient(zk); } } throw new IOException("Cannot connect to zk"); }
java
{ "resource": "" }
q161857
AvatarNodeZkUtil.registerClientProtocolAddress
train
private static boolean registerClientProtocolAddress(AvatarZooKeeperClient zk, Configuration originalConf, Configuration conf, boolean toOverwrite) throws UnsupportedEncodingException, IOException { LOG.info("Updating Client Address information in ZooKeeper"); InetSocketAddress addr = NameNode.getClientProtocolAddress(conf); if (addr == null) { LOG.error(FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY + " for primary service is not defined"); return true; } InetSocketAddress defaultAddr = NameNode.getClientProtocolAddress(originalConf); if (defaultAddr == null) { LOG.error(FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY + " for default service is not defined"); return true; } registerSocketAddress(zk, originalConf.get(NameNode.DFS_NAMENODE_RPC_ADDRESS_KEY), conf.get(NameNode.DFS_NAMENODE_RPC_ADDRESS_KEY), toOverwrite); /** TODO later: need to handle alias leave it as it is now */ registerAliases(zk, conf, FSConstants.FS_NAMENODE_ALIASES, conf.get(NameNode.DFS_NAMENODE_RPC_ADDRESS_KEY), toOverwrite); return false; }
java
{ "resource": "" }
q161858
AvatarNodeZkUtil.registerDnProtocolAddress
train
private static void registerDnProtocolAddress(AvatarZooKeeperClient zk, Configuration originalConf, Configuration conf, boolean toOverwrite) throws UnsupportedEncodingException, IOException { LOG.info("Updating Service Address information in ZooKeeper"); registerSocketAddress(zk, originalConf.get(NameNode.DATANODE_PROTOCOL_ADDRESS), conf.get(NameNode.DATANODE_PROTOCOL_ADDRESS), toOverwrite); registerAliases(zk, conf, FSConstants.DFS_NAMENODE_DN_ALIASES, conf.get(NameNode.DATANODE_PROTOCOL_ADDRESS), toOverwrite); }
java
{ "resource": "" }
q161859
AvatarNodeZkUtil.registerHttpAddress
train
private static void registerHttpAddress(AvatarZooKeeperClient zk, Configuration originalConf, Configuration conf, boolean toOverwrite) throws UnsupportedEncodingException, IOException { LOG.info("Updating Http Address information in ZooKeeper"); String addr = conf.get(FSConstants.DFS_NAMENODE_HTTP_ADDRESS_KEY); String defaultAddr = originalConf .get(FSConstants.DFS_NAMENODE_HTTP_ADDRESS_KEY); registerSocketAddress(zk, defaultAddr, addr, toOverwrite); registerAliases(zk, conf, FSConstants.DFS_HTTP_ALIASES, addr, toOverwrite); }
java
{ "resource": "" }
q161860
ServerRegistry.load
train
private synchronized void load() { Map<String, HadoopServer> map = new TreeMap<String, HadoopServer>(); for (File file : saveDir.listFiles()) { try { HadoopServer server = new HadoopServer(file); map.put(server.getLocationName(), server); } catch (Exception exn) { System.err.println(exn); } } this.servers = map; }
java
{ "resource": "" }
q161861
ServerRegistry.updateServer
train
public synchronized void updateServer(String originalName, HadoopServer server) { // Update the map if the location name has changed if (!server.getLocationName().equals(originalName)) { servers.remove(originalName); servers.put(server.getLocationName(), server); } store(); fireListeners(server, SERVER_STATE_CHANGED); }
java
{ "resource": "" }
q161862
JournalSet.selectInputStreams
train
synchronized public boolean selectInputStreams( Collection<EditLogInputStream> streams, long fromTxId, boolean inProgressOk, boolean validateInProgressSegments, int minRedundancy) throws IOException { final PriorityQueue<EditLogInputStream> allStreams = new PriorityQueue<EditLogInputStream>(64, EDIT_LOG_INPUT_STREAM_COMPARATOR); for (JournalAndStream jas : journals) { if (jas.isDisabled()) { LOG.info("Skipping jas " + jas + " since it's disabled"); continue; } try { jas.getManager().selectInputStreams(allStreams, fromTxId, inProgressOk, validateInProgressSegments); } catch (IOException ioe) { LOG.warn("Unable to determine input streams from " + jas.getManager() + ". Skipping.", ioe); } } return chainAndMakeRedundantStreams(streams, allStreams, fromTxId, inProgressOk, minRedundancy); }
java
{ "resource": "" }
q161863
JournalSet.hasUnfinalizedSegments
train
synchronized boolean hasUnfinalizedSegments(long fromTxId) { List<EditLogInputStream> streams = new ArrayList<EditLogInputStream>(); for (JournalAndStream jas : journals) { if (jas.isDisabled()) { continue; } try { // get all streams, including inProgress ones. jas.getManager().selectInputStreams(streams, fromTxId, true, false); for (EditLogInputStream elis : streams) { if (elis.isInProgress()) { // we found an input stream that is in progress return true; } } } catch (IOException ioe) { LOG.warn("Unable to determine input streams from " + jas.getManager() + ". Skipping.", ioe); } } // all streams are finalized return false; }
java
{ "resource": "" }
q161864
JournalSet.isLocalJournal
train
private static boolean isLocalJournal(JournalManager jm) { if (jm == null || (!(jm instanceof FileJournalManager))) { return false; } return NNStorage.isPreferred(StorageLocationType.LOCAL, ((FileJournalManager) jm).getStorageDirectory()); }
java
{ "resource": "" }
q161865
JournalSet.disableAndReportErrorOnJournals
train
private void disableAndReportErrorOnJournals( List<JournalAndStream> badJournals, String status) throws IOException { if (badJournals == null || badJournals.isEmpty()) { if (forceJournalCheck) { // check status here, because maybe some other operation // (e.g., rollEditLog failed and disabled journals) but this // was missed by logSync() exit runtime forceJournalCheck = false; checkJournals(status); } return; // nothing to do } for (JournalAndStream j : badJournals) { LOG.error("Disabling journal " + j); j.abort(); j.setDisabled(true); // report errors on storage directories as well for FJMs if (j.journal instanceof FileJournalManager) { FileJournalManager fjm = (FileJournalManager) j.journal; // pass image to handle image managers storage.reportErrorsOnDirectory(fjm.getStorageDirectory(), image); } // report error on shared journal/image managers if (j.journal instanceof ImageManager) { ImageManager im = (ImageManager) j.journal; im.setImageDisabled(true); } } // update image manager metrics if (image != null) { image.updateImageMetrics(); } checkJournals(status); }
java
{ "resource": "" }
q161866
JournalSet.mapJournalsAndReportErrors
train
private void mapJournalsAndReportErrors( JournalClosure closure, String status) throws IOException{ List<JournalAndStream> badJAS = null; for (JournalAndStream jas : journals) { try { closure.apply(jas); } catch (Throwable t) { if (badJAS == null) badJAS = new LinkedList<JournalAndStream>(); LOG.error("Error: " + status + " failed for (journal " + jas + ")", t); badJAS.add(jas); } } disableAndReportErrorOnJournals(badJAS, status); }
java
{ "resource": "" }
q161867
JournalSet.mapJournalsAndReportErrorsParallel
train
private void mapJournalsAndReportErrorsParallel(JournalClosure closure, String status) throws IOException { // set-up calls List<Future<JournalAndStream>> jasResponeses = new ArrayList<Future<JournalAndStream>>( journals.size()); for (JournalAndStream jas : journals) { jasResponeses.add(executor.submit(new JournalSetWorker(jas, closure, status))); } List<JournalAndStream> badJAS = null; // iterate through responses for (Future<JournalAndStream> future : jasResponeses) { JournalAndStream jas = null; try { jas = future.get(); } catch (ExecutionException e) { throw new IOException("This should never happen!!!", e); } catch (InterruptedException e) { throw new IOException("Interrupted whe performing journal operations", e); } if (jas == null) continue; // the worker returns the journal if the operation failed if (badJAS == null) badJAS = new LinkedList<JournalAndStream>(); badJAS.add(jas); } disableAndReportErrorOnJournals(badJAS, status); }
java
{ "resource": "" }
q161868
JournalSet.updateJournalMetrics
train
void updateJournalMetrics() { if (metrics == null) { return; } int failedJournals = 0; for(JournalAndStream jas : journals) { if(jas.isDisabled()) { failedJournals++; } } metrics.journalsFailed.set(failedJournals); }
java
{ "resource": "" }
q161869
JournalSet.checkJournals
train
protected int checkJournals(String status) throws IOException { boolean abort = false; int journalsAvailable = 0; int nonLocalJournalsAvailable = 0; for(JournalAndStream jas : journals) { if(jas.isDisabled() && jas.isRequired()) { abort = true; } else if (jas.isResourceAvailable()) { journalsAvailable++; if (jas.isRemote() || jas.isShared()) { nonLocalJournalsAvailable++; } } } // update metrics updateJournalMetrics(); if (abort || journalsAvailable < minimumNumberOfJournals || nonLocalJournalsAvailable < minimumNumberOfNonLocalJournals) { forceJournalCheck = true; String message = status + " failed for too many journals, minimum: " + minimumNumberOfJournals + " current: " + journalsAvailable + ", non-local: " + minimumNumberOfNonLocalJournals + " current: " + nonLocalJournalsAvailable; LOG.error(message); throw new IOException(message); } return journalsAvailable; }
java
{ "resource": "" }
q161870
JournalSet.getEditLogManifest
train
public synchronized RemoteEditLogManifest getEditLogManifest(long fromTxId) { // Collect RemoteEditLogs available from each FileJournalManager List<RemoteEditLog> allLogs = new ArrayList<RemoteEditLog>(); for (JournalAndStream j : journals) { JournalManager jm = j.getManager(); try { allLogs.addAll(jm.getEditLogManifest(fromTxId).getLogs()); } catch (Throwable t) { LOG.warn("Cannot list edit logs in " + jm, t); } } // Group logs by their starting txid ImmutableListMultimap<Long, RemoteEditLog> logsByStartTxId = Multimaps.index(allLogs, RemoteEditLog.GET_START_TXID); long curStartTxId = fromTxId; List<RemoteEditLog> logs = new ArrayList<RemoteEditLog>(); while (true) { ImmutableList<RemoteEditLog> logGroup = logsByStartTxId.get(curStartTxId); if (logGroup.isEmpty()) { // we have a gap in logs - for example because we recovered some old // storage directory with ancient logs. Clear out any logs we've // accumulated so far, and then skip to the next segment of logs // after the gap. SortedSet<Long> startTxIds = new TreeSet<Long>(logsByStartTxId.keySet()); startTxIds = startTxIds.tailSet(curStartTxId); if (startTxIds.isEmpty()) { break; } else { if (LOG.isDebugEnabled()) { LOG.debug("Found gap in logs at " + curStartTxId + ": " + "not returning previous logs in manifest."); } logs.clear(); curStartTxId = startTxIds.first(); continue; } } // Find the one that extends the farthest forward RemoteEditLog bestLog = Collections.max(logGroup); logs.add(bestLog); // And then start looking from after that point curStartTxId = bestLog.getEndTxId() + 1; if (curStartTxId == 0) break; } RemoteEditLogManifest ret = new RemoteEditLogManifest(logs); if (LOG.isDebugEnabled()) { LOG.debug("Generated manifest for logs since " + fromTxId + ":" + ret); } return ret; }
java
{ "resource": "" }
q161871
JournalSet.getSyncTimes
train
String getSyncTimes() { StringBuilder buf = new StringBuilder(); for (JournalAndStream jas : journals) { if (jas.isActive()) { buf.append(jas.getCurrentStream().getTotalSyncTime()); buf.append(" "); } } return buf.toString(); }
java
{ "resource": "" }
q161872
JournalSet.transitionNonFileJournals
train
public void transitionNonFileJournals(StorageInfo nsInfo, boolean checkEmpty, Transition transition, StartupOption startOpt) throws IOException { for (JournalManager jm : getJournalManagers()) { if (!(jm instanceof FileJournalManager)) { if (checkEmpty && jm.hasSomeJournalData()) { LOG.warn("Journal " + jm + " is not empty."); continue; } LOG.info(transition + ": " + jm); jm.transitionJournal(nsInfo, transition, startOpt); } } }
java
{ "resource": "" }
q161873
JournalSet.getInputStream
train
public static EditLogInputStream getInputStream(JournalManager jm, long txid) throws IOException { List<EditLogInputStream> streams = new ArrayList<EditLogInputStream>(); jm.selectInputStreams(streams, txid, true, false); if (streams.size() < 1) { throw new IOException("Cannot obtain stream for txid: " + txid); } Collections.sort(streams, JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR); // we want the "oldest" available stream if (txid == HdfsConstants.INVALID_TXID) { return streams.get(0); } // we want a specific stream for (EditLogInputStream elis : streams) { if (elis.getFirstTxId() == txid) { return elis; } } // we cannot obtain the stream throw new IOException("Cannot obtain stream for txid: " + txid); }
java
{ "resource": "" }
q161874
JournalSet.getNonFileJournalManagers
train
public List<JournalManager> getNonFileJournalManagers() { List<JournalManager> list = new ArrayList<JournalManager>(); for (JournalManager jm : getJournalManagers()) { if (!(jm instanceof FileJournalManager)) { list.add(jm); } } return list; }
java
{ "resource": "" }
q161875
NNStorage.attemptRestoreRemovedStorage
train
void attemptRestoreRemovedStorage() { // if directory is "alive" - copy the images there... if(removedStorageDirs.size() == 0) return; //nothing to restore /* We don't want more than one thread trying to restore at a time */ synchronized (this.restorationLock) { LOG.info("attemptRestoreRemovedStorage: check removed(failed) "+ "storage. removedStorages size = " + removedStorageDirs.size()); for(Iterator<StorageDirectory> it = this.removedStorageDirs.iterator(); it.hasNext();) { StorageDirectory sd = it.next(); File root = sd.getRoot(); LOG.info("attemptRestoreRemovedStorage: currently disabled dir " + root.getAbsolutePath() + "; type=" + sd.getStorageDirType() + ";canwrite=" + root.canWrite()); try { if(root.exists() && root.canWrite()) { LOG.info("attemptRestoreRemovedStorage: restoring dir " + sd.getRoot().getAbsolutePath()); this.addStorageDir(sd); // restore it.remove(); sd.lock(); } } catch(IOException e) { LOG.warn("attemptRestoreRemovedStorage: failed to restore " + sd.getRoot().getAbsolutePath(), e); } } } }
java
{ "resource": "" }
q161876
NNStorage.getStorageDirectory
train
StorageDirectory getStorageDirectory(URI uri) { try { uri = Util.fileAsURI(new File(uri)); Iterator<StorageDirectory> it = dirIterator(); for (; it.hasNext(); ) { StorageDirectory sd = it.next(); if (Util.fileAsURI(sd.getRoot()).equals(uri)) { return sd; } } } catch (IOException ioe) { LOG.warn("Error converting file to URI", ioe); } return null; }
java
{ "resource": "" }
q161877
NNStorage.checkSchemeConsistency
train
private static void checkSchemeConsistency(URI u) throws IOException { String scheme = u.getScheme(); // the URI should have a proper scheme if(scheme == null) { throw new IOException("Undefined scheme for " + u); } }
java
{ "resource": "" }
q161878
NNStorage.getNumStorageDirs
train
int getNumStorageDirs(NameNodeDirType dirType) { if(dirType == null) return getNumStorageDirs(); Iterator<StorageDirectory> it = dirIterator(dirType); int numDirs = 0; for(; it.hasNext(); it.next()) numDirs++; return numDirs; }
java
{ "resource": "" }
q161879
NNStorage.getDirectories
train
Collection<File> getDirectories(NameNodeDirType dirType) throws IOException { ArrayList<File> list = new ArrayList<File>(); Iterator<StorageDirectory> it = (dirType == null) ? dirIterator() : dirIterator(dirType); for ( ;it.hasNext(); ) { StorageDirectory sd = it.next(); list.add(sd.getRoot()); } return list; }
java
{ "resource": "" }
q161880
NNStorage.readTransactionIdFile
train
static long readTransactionIdFile(StorageDirectory sd) throws IOException { File txidFile = getStorageFile(sd, NameNodeFile.SEEN_TXID); long txid = 0L; if (txidFile.exists() && txidFile.canRead()) { BufferedReader br = new BufferedReader(new FileReader(txidFile)); try { txid = Long.valueOf(br.readLine()); br.close(); br = null; } finally { IOUtils.cleanup(LOG, br); } } return txid; }
java
{ "resource": "" }
q161881
NNStorage.writeTransactionIdFile
train
void writeTransactionIdFile(StorageDirectory sd, long txid) throws IOException { if (txid < -1) { // -1 is valid when formatting throw new IOException("Bad txid: " + txid); } File txIdFile = getStorageFile(sd, NameNodeFile.SEEN_TXID); OutputStream fos = new AtomicFileOutputStream(txIdFile); try { fos.write(String.valueOf(txid).getBytes()); fos.write('\n'); fos.close(); fos = null; } finally { IOUtils.cleanup(LOG, fos); } }
java
{ "resource": "" }
q161882
NNStorage.writeTransactionIdFileToStorage
train
public void writeTransactionIdFileToStorage(long txid, FSImage image) throws IOException { // Write txid marker in all storage directories List<StorageDirectory> badSDs = new ArrayList<StorageDirectory>(); for (StorageDirectory sd : storageDirs) { try { writeTransactionIdFile(sd, txid); } catch(IOException e) { // Close any edits stream associated with this dir and remove directory LOG.warn("writeTransactionIdToStorage failed on " + sd, e); badSDs.add(sd); } } reportErrorsOnDirectories(badSDs, image); if (image != null) { } }
java
{ "resource": "" }
q161883
NNStorage.getFsImageNameCheckpoint
train
public File[] getFsImageNameCheckpoint(long txid) { ArrayList<File> list = new ArrayList<File>(); for (Iterator<StorageDirectory> it = dirIterator(NameNodeDirType.IMAGE); it.hasNext();) { list.add(getStorageFile(it.next(), NameNodeFile.IMAGE_NEW, txid)); } return list.toArray(new File[list.size()]); }
java
{ "resource": "" }
q161884
NNStorage.getFsImageName
train
public File getFsImageName(StorageLocationType type, long txid) { File lastCandidate = null; for (Iterator<StorageDirectory> it = dirIterator(NameNodeDirType.IMAGE); it.hasNext();) { StorageDirectory sd = it.next(); File fsImage = getStorageFile(sd, NameNodeFile.IMAGE, txid); if(sd.getRoot().canRead() && fsImage.exists()) { if (isPreferred(type, sd)) { return fsImage; } lastCandidate = fsImage; } } return lastCandidate; }
java
{ "resource": "" }
q161885
NNStorage.format
train
public void format() throws IOException { this.layoutVersion = FSConstants.LAYOUT_VERSION; this.namespaceID = newNamespaceID(); this.cTime = 0L; for (Iterator<StorageDirectory> it = dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); format(sd); } }
java
{ "resource": "" }
q161886
NNStorage.newNamespaceID
train
static int newNamespaceID() { Random r = new Random(); r.setSeed(FSNamesystem.now()); int newID = 0; while(newID == 0) newID = r.nextInt(0x7FFFFFFF); // use 31 bits only return newID; }
java
{ "resource": "" }
q161887
NNStorage.getDeprecatedProperty
train
String getDeprecatedProperty(String prop) { assert getLayoutVersion() > FSConstants.LAYOUT_VERSION : "getDeprecatedProperty should only be done when loading " + "storage from past versions during upgrade."; return deprecatedProperties.get(prop); }
java
{ "resource": "" }
q161888
NNStorage.setFields
train
@Override // Storage protected void setFields(Properties props, StorageDirectory sd) throws IOException { super.setFields(props, sd); boolean uState = getDistributedUpgradeState(); int uVersion = getDistributedUpgradeVersion(); if (uState && uVersion != getLayoutVersion()) { props.setProperty("distributedUpgradeState", Boolean.toString(uState)); props .setProperty("distributedUpgradeVersion", Integer.toString(uVersion)); } }
java
{ "resource": "" }
q161889
NNStorage.setDeprecatedPropertiesForUpgrade
train
private void setDeprecatedPropertiesForUpgrade(Properties props) { deprecatedProperties = new HashMap<String, String>(); String md5 = props.getProperty(MESSAGE_DIGEST_PROPERTY); if (md5 != null) { deprecatedProperties.put(MESSAGE_DIGEST_PROPERTY, md5); } }
java
{ "resource": "" }
q161890
NNStorage.findFinalizedEditsFile
train
File findFinalizedEditsFile(long startTxId, long endTxId) throws IOException { File ret = findFile(NameNodeDirType.EDITS, getFinalizedEditsFileName(startTxId, endTxId)); if (ret == null) { throw new IOException( "No edits file for txid " + startTxId + "-" + endTxId + " exists!"); } return ret; }
java
{ "resource": "" }
q161891
NNStorage.findInProgressEditsFile
train
File findInProgressEditsFile(long startTxId) throws IOException { File ret = findFile(NameNodeDirType.EDITS, getInProgressEditsFileName(startTxId)); if (ret == null) { throw new IOException( "No edits file for txid " + startTxId + "-in progress"); } return ret; }
java
{ "resource": "" }
q161892
NNStorage.findFile
train
private File findFile(NameNodeDirType dirType, String name) { for (StorageDirectory sd : dirIterable(dirType)) { File candidate = new File(sd.getCurrentDir(), name); if (sd.getCurrentDir().canRead() && candidate.exists()) { return candidate; } } return null; }
java
{ "resource": "" }
q161893
NNStorage.isPreferred
train
static boolean isPreferred(StorageLocationType type, StorageDirectory sd) { if ((sd instanceof NNStorageDirectory)) { return ((NNStorageDirectory) sd).type == type; } // by default all are preferred return true; }
java
{ "resource": "" }
q161894
NNStorage.getType
train
static StorageLocationType getType(StorageDirectory sd) { if ((sd instanceof NNStorageDirectory)) { return ((NNStorageDirectory) sd).type; } // by default all are local return StorageLocationType.LOCAL; }
java
{ "resource": "" }
q161895
NNStorage.verifyDistributedUpgradeProgress
train
void verifyDistributedUpgradeProgress(StartupOption startOpt ) throws IOException { if(startOpt == StartupOption.ROLLBACK || startOpt == StartupOption.IMPORT) return; assert upgradeManager != null : "FSNameSystem.upgradeManager is null."; if(startOpt != StartupOption.UPGRADE) { if(upgradeManager.getUpgradeState()) throw new IOException( "\n Previous distributed upgrade was not completed. " + "\n Please restart NameNode with -upgrade option."); if(upgradeManager.getDistributedUpgrades() != null) throw new IOException("\n Distributed upgrade for NameNode version " + upgradeManager.getUpgradeVersion() + " to current LV " + layoutVersion + " is required.\n Please restart NameNode" + " with -upgrade option."); } }
java
{ "resource": "" }
q161896
NNStorage.initializeDistributedUpgrade
train
void initializeDistributedUpgrade() throws IOException { if(! upgradeManager.initializeUpgrade()) return; // write new upgrade state into disk writeAll(); LOG.info("\n Distributed upgrade for NameNode version " + upgradeManager.getUpgradeVersion() + " to current LV " + layoutVersion + " is initialized."); }
java
{ "resource": "" }
q161897
NNStorage.reportErrorsOnDirectories
train
synchronized void reportErrorsOnDirectories(List<StorageDirectory> sds, FSImage image) throws IOException { for (StorageDirectory sd : sds) { reportErrorsOnDirectory(sd, image); } // check image managers (this will update image metrics) if (image != null) { image.checkImageManagers(); } // only check if something was wrong if(!sds.isEmpty()) { if (this.getNumStorageDirs() == 0) throw new IOException("No more storage directories left"); // check image directories, edits are checked withing FSEditLog.checkJournals if (getNumStorageDirs(NameNodeDirType.IMAGE) == 0) throw new IOException("No more image storage directories left"); } }
java
{ "resource": "" }
q161898
NNStorage.reportErrorsOnDirectory
train
synchronized void reportErrorsOnDirectory(StorageDirectory sd, FSImage image) { String lsd = listStorageDirectories(); LOG.info("reportErrorsOnDirectory: Current list of storage dirs:" + lsd); LOG.error("reportErrorsOnDirectory: Error reported on storage directory " + sd.getRoot()); if (this.storageDirs.remove(sd)) { try { sd.unlock(); } catch (Exception e) { LOG.warn( "reportErrorsOnDirectory: Unable to unlock bad storage directory: " + sd.getRoot().getPath(), e); } this.removedStorageDirs.add(sd); } if (image != null) { image.reportErrorsOnImageManager(sd); } lsd = listStorageDirectories(); LOG.info("reportErrorsOnDirectory: Current list of storage dirs:" + lsd); }
java
{ "resource": "" }
q161899
NNStorage.reportErrorOnFile
train
public void reportErrorOnFile(File f) { // We use getAbsolutePath here instead of getCanonicalPath since we know // that there is some IO problem on that drive. // getCanonicalPath may need to call stat() or readlink() and it's likely // those calls would fail due to the same underlying IO problem. String absPath = f.getAbsolutePath(); for (StorageDirectory sd : storageDirs) { String dirPath = sd.getRoot().getAbsolutePath(); if (!dirPath.endsWith("/")) { dirPath += "/"; } if (absPath.startsWith(dirPath)) { reportErrorsOnDirectory(sd, null); return; } } }
java
{ "resource": "" }