_id
stringlengths
2
7
title
stringlengths
3
140
partition
stringclasses
3 values
text
stringlengths
73
34.1k
language
stringclasses
1 value
meta_information
dict
q160300
RaidCodec.addCodec
train
static void addCodec(RaidCodec codec) { List<RaidCodec> newCodecs = new ArrayList<RaidCodec>(); newCodecs.addAll(codecs); newCodecs.add(codec); codecs = Collections.unmodifiableList(newCodecs); Map<String, RaidCodec> newIdToCodec = new HashMap<String, RaidCodec>(); newIdToCodec.putAll(idToCodec); newIdToCodec.put(codec.id, codec); idToCodec = Collections.unmodifiableMap(newIdToCodec); }
java
{ "resource": "" }
q160301
RaidCodec.getSourceBlocks
train
public BlockInfo[] getSourceBlocks(BlockInfo[] blocks) { int numSourceBlocks = blocks.length - (blocks.length / numStripeBlocks) * numParityBlocks - ((blocks.length % numStripeBlocks == 0) ? 0 : numParityBlocks); BlockInfo[] sourceBlocks = new BlockInfo[numSourceBlocks]; int pos = numParityBlocks; int stripeEnd = numStripeBlocks; for (int i = 0; i < numSourceBlocks; i++) { sourceBlocks[i] = blocks[pos]; pos++; if (pos == stripeEnd) { pos += numParityBlocks; stripeEnd += numStripeBlocks; } } return sourceBlocks; }
java
{ "resource": "" }
q160302
RaidCodec.getParityBlocks
train
public BlockInfo[] getParityBlocks(BlockInfo[] blocks) { int numBlocks = (blocks.length / numStripeBlocks) * numParityBlocks + ((blocks.length % numStripeBlocks == 0) ? 0 : numParityBlocks); BlockInfo[] parityBlocks = new BlockInfo[numBlocks]; int pos = 0; int parityEnd = numParityBlocks; for (int i = 0; i < numBlocks; i++) { parityBlocks[i] = blocks[pos]; pos++; if (pos == parityEnd) { pos += numDataBlocks; parityEnd += numStripeBlocks; } } return parityBlocks; }
java
{ "resource": "" }
q160303
RaidCodec.checkRaidProgress
train
public boolean checkRaidProgress(INodeFile sourceINode, LightWeightLinkedSet<RaidBlockInfo> raidEncodingTasks, FSNamesystem fs, boolean forceAdd) throws IOException { boolean result = true; BlockInfo[] blocks = sourceINode.getBlocks(); for (int i = 0; i < blocks.length; i += numStripeBlocks) { boolean hasParity = true; if (!forceAdd) { for (int j = 0; j < numParityBlocks; j++) { if (fs.countLiveNodes(blocks[i + j]) < this.parityReplication) { hasParity = false; break; } } } if (!hasParity || forceAdd) { raidEncodingTasks.add(new RaidBlockInfo(blocks[i], parityReplication, i)); result = false; } } return result; }
java
{ "resource": "" }
q160304
ServerLogReaderPreTransactional.handleNullRead
train
private void handleNullRead() throws IOException { if (curStreamFinished && readNullAfterStreamFinished) { // If we read a null operation after the NameNode closed // the stream, then we surely reached the end of the file. curStreamConsumed = true; } else { try { // This affects how much we wait after we reached the end of the // current stream. Thread.sleep(100); } catch (InterruptedException e) { throw new IOException(e); } } if (curStreamFinished) readNullAfterStreamFinished = true; refreshStreamPosition(); }
java
{ "resource": "" }
q160305
ServerLogReaderPreTransactional.tryReloadingEditLog
train
private void tryReloadingEditLog() throws IOException { LOG.info("Trying to reload the edit log ..."); // The roll image count is 1 after edits.new was renamed to edits if (rollImageCount.get() == 1) { try { LOG.info("Trying to reload the edit log from " + editsFile.getAbsolutePath()); openInputStream(editsFile); LOG.info("Successfully reloaded the edit log from " + editsFile.getAbsolutePath() + ". Trying to refresh position."); refreshStreamPosition(); LOG.info("Successfully refreshed stream position"); return; } catch (IOException e) { LOG.warn("Failed to reload from " + editsFile.getAbsolutePath(), e); } } try { LOG.info("Trying to reload the edit log from " + editsNewFile.getAbsolutePath()); openInputStream(editsNewFile); LOG.info("Successfully reloaded the edit log from " + editsNewFile.getAbsolutePath() + ". Trying to refresh position."); refreshStreamPosition(); LOG.info("Successfully refreshed stream position"); return; } catch (IOException e) { LOG.error("Failed to reload from " + editsFile.getAbsolutePath(), e); throw e; } }
java
{ "resource": "" }
q160306
ServerLogReaderPreTransactional.trySwitchingEditLog
train
private void trySwitchingEditLog() throws IOException { if (shouldSwitchEditLog()) { curStreamFinished = true; if (LOG.isDebugEnabled()) { LOG.debug("Should switch edit log. rollImageCount=" + rollImageCount + ". curStreamConsumed=" + curStreamConsumed); } if (curStreamConsumed) { if (LOG.isDebugEnabled()) { LOG.debug("Reloading edit log ..."); } openEditLog(); rollImageCount.decrementAndGet(); } } }
java
{ "resource": "" }
q160307
ServerLogReaderPreTransactional.openInputStream
train
private void openInputStream(File txFile) throws IOException { int ioExceptionRetryCount = 0, logHeaderCorruptRetryCount = 0; LOG.info("Trying to load the edit log from " + txFile.getAbsolutePath()); do { try { inputStream = new EditLogFileInputStream(txFile); editLogFilePosition = inputStream.getPosition(); curStreamConsumed = false; curStreamFinished = false; readNullAfterStreamFinished = false; LOG.info("Successfully loaded the edits log from " + txFile.getAbsolutePath()); break; } catch (LogHeaderCorruptException e1) { if (logHeaderCorruptRetryCount == LOG_HEADER_CORRUPT_RETRY_MAX) { LOG.error("Failed to load the edit log. No retries left.", e1); throw new IOException("Could not load the edit log"); } logHeaderCorruptRetryCount ++; LOG.warn("Failed to load the edit log. Retry " + logHeaderCorruptRetryCount + " ...", e1); try { Thread.sleep(ioExceptionRetryCount * LOG_HEADER_CORRUPT_BASE_SLEEP); } catch (InterruptedException e) { throw new IOException(e); } } catch (IOException e2) { if (ioExceptionRetryCount == IO_EXCEPTION_RETRY_MAX) { LOG.error("Failed to load the edit log. No retries left.", e2); throw new IOException("Could not load the edit log"); } ioExceptionRetryCount ++; LOG.warn("Failed to load the edit log. Retry " + ioExceptionRetryCount + " ...", e2); try { Thread.sleep(ioExceptionRetryCount * IO_EXCEPTION_BASE_SLEEP); } catch (InterruptedException e) { throw new IOException(e); } } } while (true); }
java
{ "resource": "" }
q160308
DistributedCache.getLocalCache
train
public static Path getLocalCache(URI cache, Configuration conf, Path subdir, FileStatus fileStatus, boolean isArchive, long confFileStamp, Path currentWorkDir, boolean honorSymLinkConf, MRAsyncDiskService asyncDiskService, LocalDirAllocator lDirAllocator) throws IOException { return getLocalCache(cache, conf, subdir, fileStatus, isArchive, confFileStamp, fileStatus.getLen(), currentWorkDir, honorSymLinkConf, asyncDiskService, lDirAllocator); }
java
{ "resource": "" }
q160309
DistributedCache.releaseCache
train
public static void releaseCache(URI cache, Configuration conf, long timeStamp) throws IOException { String cacheId = getKey(cache, conf, timeStamp); synchronized (cachedArchives) { CacheStatus lcacheStatus = cachedArchives.get(cacheId); if (lcacheStatus == null) { LOG.warn("Cannot find localized cache: " + cache + " (key: " + cacheId + ") in releaseCache!"); return; } lcacheStatus.refcount--; } }
java
{ "resource": "" }
q160310
DistributedCache.deleteCache
train
private static void deleteCache(Configuration conf, MRAsyncDiskService asyncDiskService) throws IOException { List<CacheStatus> deleteSet = new LinkedList<CacheStatus>(); // try deleting cache Status with refcount of zero synchronized (cachedArchives) { for (Iterator<String> it = cachedArchives.keySet().iterator(); it.hasNext();) { String cacheId = (String) it.next(); CacheStatus lcacheStatus = cachedArchives.get(cacheId); if (lcacheStatus.refcount == 0) { // delete this cache entry from the global list // and mark the localized file for deletion deleteSet.add(lcacheStatus); it.remove(); } } } // do the deletion asynchronously, after releasing the global lock Thread cacheFileCleaner = new Thread( new CacheFileCleanTask(asyncDiskService, FileSystem.getLocal(conf), deleteSet)); cacheFileCleaner.start(); }
java
{ "resource": "" }
q160311
DistributedCache.deleteLocalPath
train
private static void deleteLocalPath(MRAsyncDiskService asyncDiskService, LocalFileSystem fs, Path path) throws IOException { boolean deleted = false; if (asyncDiskService != null) { // Try to delete using asyncDiskService String localPathToDelete = path.toUri().getPath(); deleted = asyncDiskService.moveAndDeleteAbsolutePath(localPathToDelete); if (!deleted) { LOG.warn("Cannot find DistributedCache path " + localPathToDelete + " on any of the asyncDiskService volumes!"); } } if (!deleted) { // If no asyncDiskService, we will delete the files synchronously fs.delete(path, true); } LOG.info("Deleted path " + path); }
java
{ "resource": "" }
q160312
DistributedCache.localizeCache
train
private static Path localizeCache(Configuration conf, URI cache, long confFileStamp, CacheStatus cacheStatus, boolean isArchive) throws IOException { FileSystem fs = getFileSystem(cache, conf); FileSystem localFs = FileSystem.getLocal(conf); Path parchive = null; if (isArchive) { parchive = new Path(cacheStatus.localizedLoadPath, new Path(cacheStatus.localizedLoadPath.getName())); } else { parchive = cacheStatus.localizedLoadPath; } if (!localFs.mkdirs(parchive.getParent())) { throw new IOException("Mkdirs failed to create directory " + cacheStatus.localizedLoadPath.toString()); } String cacheId = cache.getPath(); fs.copyToLocalFile(new Path(cacheId), parchive); if (isArchive) { String tmpArchive = parchive.toString().toLowerCase(); File srcFile = new File(parchive.toString()); File destDir = new File(parchive.getParent().toString()); if (tmpArchive.endsWith(".jar")) { RunJar.unJar(srcFile, destDir); } else if (tmpArchive.endsWith(".zip")) { FileUtil.unZip(srcFile, destDir); } else if (isTarFile(tmpArchive)) { FileUtil.unTar(srcFile, destDir); } // else will not do anyhting // and copy the file into the dir as it is } long cacheSize = FileUtil.getDU(new File(parchive.getParent().toString())); cacheStatus.size = cacheSize; addCacheInfoUpdate(cacheStatus); // do chmod here try { //Setting recursive permission to grant everyone read and execute Path localDir = new Path(cacheStatus.localizedBaseDir, cacheStatus.uniqueParentDir); LOG.info("Doing chmod on localdir :" + localDir); FileUtil.chmod(localDir.toString(), "ugo+rx", true); } catch(InterruptedException e) { LOG.warn("Exception in chmod" + e.toString()); } // update cacheStatus to reflect the newly cached file cacheStatus.mtime = getTimestamp(conf, cache); return cacheStatus.localizedLoadPath; }
java
{ "resource": "" }
q160313
DistributedCache.ifExistsAndFresh
train
private static boolean ifExistsAndFresh(Configuration conf, FileSystem fs, URI cache, long confFileStamp, CacheStatus lcacheStatus, FileStatus fileStatus) throws IOException { // check for existence of the cache long dfsFileStamp; if (fileStatus != null) { dfsFileStamp = fileStatus.getModificationTime(); } else { dfsFileStamp = getTimestamp(conf, cache); } // ensure that the file on hdfs hasn't been modified since the job started if (dfsFileStamp != confFileStamp) { LOG.fatal("File: " + cache + " has changed on HDFS since job started"); throw new IOException("File: " + cache + " has changed on HDFS since job started"); } if (dfsFileStamp != lcacheStatus.mtime) { // needs refreshing return false; } return true; }
java
{ "resource": "" }
q160314
DistributedCache.getTimestamp
train
public static long getTimestamp(Configuration conf, URI cache) throws IOException { FileSystem fileSystem = FileSystem.get(cache, conf); Path filePath = new Path(cache.getPath()); return fileSystem.getFileStatus(filePath).getModificationTime(); }
java
{ "resource": "" }
q160315
DistributedCache.createAllSymlink
train
public static void createAllSymlink(Configuration conf, File jobCacheDir, File workDir) throws IOException{ if ((jobCacheDir == null || !jobCacheDir.isDirectory()) || workDir == null || (!workDir.isDirectory())) { return; } boolean createSymlink = getSymlink(conf); if (createSymlink){ File[] list = jobCacheDir.listFiles(); for (int i=0; i < list.length; i++){ FileUtil.symLink(list[i].getAbsolutePath(), new File(workDir, list[i].getName()).toString()); } } }
java
{ "resource": "" }
q160316
DistributedCache.setCacheArchives
train
public static void setCacheArchives(URI[] archives, Configuration conf) { String sarchives = StringUtils.uriToString(archives); conf.set("mapred.cache.archives", sarchives); }
java
{ "resource": "" }
q160317
DistributedCache.setCacheFiles
train
public static void setCacheFiles(URI[] files, Configuration conf) { String sfiles = StringUtils.uriToString(files); conf.set("mapred.cache.files", sfiles); }
java
{ "resource": "" }
q160318
DistributedCache.addCacheArchive
train
public static void addCacheArchive(URI uri, Configuration conf) { String archives = conf.get("mapred.cache.archives"); conf.set("mapred.cache.archives", archives == null ? uri.toString() : archives + "," + uri.toString()); }
java
{ "resource": "" }
q160319
DistributedCache.addCacheFile
train
public static void addCacheFile(URI uri, Configuration conf) { String files = conf.get("mapred.cache.files"); conf.set("mapred.cache.files", files == null ? uri.toString() : files + "," + uri.toString()); }
java
{ "resource": "" }
q160320
DistributedCache.addFileToClassPath
train
public static void addFileToClassPath(Path file, Configuration conf) throws IOException { String classpath = conf.get("mapred.job.classpath.files"); conf.set("mapred.job.classpath.files", classpath == null ? file.toString() : classpath + System.getProperty("path.separator") + file.toString()); URI uri = file.makeQualified(file.getFileSystem(conf)).toUri(); addCacheFile(uri, conf); }
java
{ "resource": "" }
q160321
DistributedCache.getArchiveClassPaths
train
public static Path[] getArchiveClassPaths(Configuration conf) { String classpath = conf.get("mapred.job.classpath.archives"); if (classpath == null) return null; ArrayList list = Collections.list(new StringTokenizer(classpath, System .getProperty("path.separator"))); Path[] paths = new Path[list.size()]; for (int i = 0; i < list.size(); i++) { paths[i] = new Path((String) list.get(i)); } return paths; }
java
{ "resource": "" }
q160322
DistributedCache.getSymlink
train
public static boolean getSymlink(Configuration conf){ String result = conf.get("mapred.create.symlink"); if ("yes".equals(result)){ return true; } return false; }
java
{ "resource": "" }
q160323
DistributedCache.checkURIs
train
public static boolean checkURIs(URI[] uriFiles, URI[] uriArchives){ if ((uriFiles == null) && (uriArchives == null)){ return true; } if (uriFiles != null){ for (int i = 0; i < uriFiles.length; i++){ String frag1 = uriFiles[i].getFragment(); if (frag1 == null) return false; for (int j=i+1; j < uriFiles.length; j++){ String frag2 = uriFiles[j].getFragment(); if (frag2 == null) return false; if (frag1.equalsIgnoreCase(frag2)) return false; } if (uriArchives != null){ for (int j = 0; j < uriArchives.length; j++){ String frag2 = uriArchives[j].getFragment(); if (frag2 == null){ return false; } if (frag1.equalsIgnoreCase(frag2)) return false; for (int k=j+1; k < uriArchives.length; k++){ String frag3 = uriArchives[k].getFragment(); if (frag3 == null) return false; if (frag2.equalsIgnoreCase(frag3)) return false; } } } } } return true; }
java
{ "resource": "" }
q160324
DistributedCache.purgeCache
train
public static void purgeCache(Configuration conf, MRAsyncDiskService service) throws IOException { synchronized (cachedArchives) { LocalFileSystem localFs = FileSystem.getLocal(conf); for (Map.Entry<String,CacheStatus> f: cachedArchives.entrySet()) { try { deleteLocalPath(service, localFs, f.getValue().localizedLoadPath); } catch (IOException ie) { LOG.debug("Error cleaning up cache", ie); } } cachedArchives.clear(); } }
java
{ "resource": "" }
q160325
DistributedCache.deleteCacheInfoUpdate
train
private static void deleteCacheInfoUpdate(CacheStatus cacheStatus) { if (!cacheStatus.isInited()) { // if it is not created yet, do nothing. return; } synchronized (baseDirSize) { Long dirSize = baseDirSize.get(cacheStatus.getBaseDir()); if ( dirSize != null ) { dirSize -= cacheStatus.size; baseDirSize.put(cacheStatus.getBaseDir(), dirSize); } } synchronized (baseDirNumberSubDir) { Integer dirSubDir = baseDirNumberSubDir.get(cacheStatus.getBaseDir()); if ( dirSubDir != null ) { dirSubDir--; baseDirNumberSubDir.put(cacheStatus.getBaseDir(), dirSubDir); } } }
java
{ "resource": "" }
q160326
DistributedCache.addCacheInfoUpdate
train
private static void addCacheInfoUpdate(CacheStatus cacheStatus) { long cacheSize = cacheStatus.size; synchronized (baseDirSize) { Long dirSize = baseDirSize.get(cacheStatus.getBaseDir()); if( dirSize == null ) { dirSize = Long.valueOf(cacheSize); } else { dirSize += cacheSize; } baseDirSize.put(cacheStatus.getBaseDir(), dirSize); } synchronized (baseDirNumberSubDir) { Integer dirSubDir = baseDirNumberSubDir.get(cacheStatus.getBaseDir()); if( dirSubDir == null ) { dirSubDir = 1; } else { dirSubDir += 1; } baseDirNumberSubDir.put(cacheStatus.getBaseDir(), dirSubDir); } }
java
{ "resource": "" }
q160327
FileSystem.getDefaultUri
train
public static URI getDefaultUri(Configuration conf) { return URI.create(fixName(conf.get(FS_DEFAULT_NAME_KEY, "file:///"))); }
java
{ "resource": "" }
q160328
FileSystem.getLocal
train
public static LocalFileSystem getLocal(Configuration conf) throws IOException { return (LocalFileSystem)get(LocalFileSystem.NAME, conf); }
java
{ "resource": "" }
q160329
FileSystem.newInstanceLocal
train
public static LocalFileSystem newInstanceLocal(Configuration conf) throws IOException { return (LocalFileSystem)newInstance(LocalFileSystem.NAME, conf); }
java
{ "resource": "" }
q160330
FileSystem.create
train
public static FSDataOutputStream create(FileSystem fs, Path file, FsPermission permission) throws IOException { // create the file with default permission FSDataOutputStream out = fs.create(file); // set its permission to the supplied one fs.setPermission(file, permission); return out; }
java
{ "resource": "" }
q160331
FileSystem.mkdirs
train
public static boolean mkdirs(FileSystem fs, Path dir, FsPermission permission) throws IOException { // create the directory using the default permission boolean result = fs.mkdirs(dir); // set its permission to be the supplied one fs.setPermission(dir, permission); return result; }
java
{ "resource": "" }
q160332
FileSystem.checkPath
train
protected void checkPath(Path path) { URI uri = path.toUri(); if (uri.getScheme() == null) // fs is relative return; String thisScheme = this.getUri().getScheme(); String thatScheme = uri.getScheme(); String thisAuthority = this.getUri().getAuthority(); String thatAuthority = uri.getAuthority(); //authority and scheme are not case sensitive if (thisScheme.equalsIgnoreCase(thatScheme)) {// schemes match if (thisAuthority == thatAuthority || // & authorities match (thisAuthority != null && thisAuthority.equalsIgnoreCase(thatAuthority))) return; if (thatAuthority == null && // path's authority is null thisAuthority != null) { // fs has an authority URI defaultUri = getDefaultUri(getConf()); // & is the conf default if (thisScheme.equalsIgnoreCase(defaultUri.getScheme()) && thisAuthority.equalsIgnoreCase(defaultUri.getAuthority())) return; try { // or the default fs's uri defaultUri = get(getConf()).getUri(); } catch (IOException e) { throw new RuntimeException(e); } if (thisScheme.equalsIgnoreCase(defaultUri.getScheme()) && thisAuthority.equalsIgnoreCase(defaultUri.getAuthority())) return; } } throw new IllegalArgumentException("Wrong FS: "+path+ ", expected: "+this.getUri()); }
java
{ "resource": "" }
q160333
FileSystem.getFileBlockLocations
train
public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) throws IOException { if (file == null) { return null; } if ( (start<0) || (len < 0) ) { throw new IllegalArgumentException("Invalid start or len parameter"); } if (file.getLen() < start) { return new BlockLocation[0]; } String[] name = { "localhost:50010" }; String[] host = { "localhost" }; return new BlockLocation[] { new BlockLocation(name, host, 0, file.getLen()) }; }
java
{ "resource": "" }
q160334
FileSystem.create
train
public FSDataOutputStream create(Path f, Progressable progress) throws IOException { return create(f, CreateOptions.progress(progress)); }
java
{ "resource": "" }
q160335
FileSystem.create
train
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, int bytesPerChecksum, Progressable progress) throws IOException { return create(f, CreateOptions.perms(permission), CreateOptions.writeOptions(overwrite, null), CreateOptions.bufferSize(bufferSize), CreateOptions.replicationFactor(replication), CreateOptions.blockSize(blockSize), CreateOptions.bytesPerChecksum(bytesPerChecksum), CreateOptions.progress(progress)); }
java
{ "resource": "" }
q160336
FileSystem.createNewFile
train
public boolean createNewFile(Path f) throws IOException { if (exists(f)) { return false; } else { create(f, false, getDefaultBufferSize()).close(); return true; } }
java
{ "resource": "" }
q160337
FileSystem.deleteOnExit
train
public boolean deleteOnExit(Path f) throws IOException { if (!exists(f)) { return false; } synchronized (deleteOnExit) { deleteOnExit.add(f); } return true; }
java
{ "resource": "" }
q160338
FileSystem.processDeleteOnExit
train
protected void processDeleteOnExit() { synchronized (deleteOnExit) { for (Iterator<Path> iter = deleteOnExit.iterator(); iter.hasNext();) { Path path = iter.next(); try { delete(path, true); } catch (IOException e) { LOG.info("Ignoring failure to deleteOnExit for path " + path); } iter.remove(); } } }
java
{ "resource": "" }
q160339
FileSystem.isFile
train
public boolean isFile(Path f) throws IOException { try { return !getFileStatus(f).isDir(); } catch (FileNotFoundException e) { return false; // f does not exist } }
java
{ "resource": "" }
q160340
FileSystem.listLocatedStatus
train
@Deprecated public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f, final PathFilter filter) throws FileNotFoundException, IOException { return new RemoteIterator<LocatedFileStatus>() { private final FileStatus[] stats; private int i = 0; { // initializer stats = listStatus(f, filter); if (stats == null) { throw new FileNotFoundException( "File " + f + " does not exist."); } } @Override public boolean hasNext() { return i<stats.length; } @Override public LocatedFileStatus next() throws IOException { if (!hasNext()) { throw new NoSuchElementException("No more entry in " + f); } FileStatus result = stats[i++]; BlockLocation[] locs = result.isDir() ? null : getFileBlockLocations(result, 0, result.getLen()); return new LocatedFileStatus(result, locs); } }; }
java
{ "resource": "" }
q160341
FileSystem.listLocatedBlockStatus
train
public RemoteIterator<LocatedBlockFileStatus> listLocatedBlockStatus( final Path f, final PathFilter filter) throws FileNotFoundException, IOException { return new RemoteIterator<LocatedBlockFileStatus>() { private final FileStatus[] stats; private int i = 0; { // initializer stats = listStatus(f, filter); if (stats == null) { throw new FileNotFoundException( "File " + f + " does not exist."); } } @Override public boolean hasNext() { return i<stats.length; } @Override public LocatedBlockFileStatus next() throws IOException { if (!hasNext()) { throw new NoSuchElementException("No more entry in " + f); } FileStatus result = stats[i++]; BlockAndLocation[] locs = null; if (!result.isDir()) { String[] name = { "localhost:50010" }; String[] host = { "localhost" }; // create a dummy blockandlocation locs = new BlockAndLocation[] { new BlockAndLocation(0L, 0L, name, host, new String[0], 0, result.getLen(), false) }; } return new LocatedBlockFileStatus(result, locs, false); } }; }
java
{ "resource": "" }
q160342
FileSystem.globStatus
train
public FileStatus[] globStatus(Path pathPattern, PathFilter filter) throws IOException { String filename = pathPattern.toUri().getPath(); List<String> filePatterns = GlobExpander.expand(filename); if (filePatterns.size() == 1) { return globStatusInternal(pathPattern, filter); } else { List<FileStatus> results = new ArrayList<FileStatus>(); for (String filePattern : filePatterns) { FileStatus[] files = globStatusInternal(new Path(filePattern), filter); for (FileStatus file : files) { results.add(file); } } return results.toArray(new FileStatus[results.size()]); } }
java
{ "resource": "" }
q160343
FileSystem.getHomeDirectory
train
public Path getHomeDirectory(String userName) { if (userName == null) userName = System.getProperty("user.name"); return new Path("/user/"+ userName).makeQualified(this); }
java
{ "resource": "" }
q160344
FileSystem.copyFromLocalFile
train
@Deprecated public void copyFromLocalFile(Path src, Path dst) throws IOException { copyFromLocalFile(false, true, false, src, dst); }
java
{ "resource": "" }
q160345
FileSystem.moveFromLocalFile
train
public void moveFromLocalFile(Path[] srcs, Path dst) throws IOException { copyFromLocalFile(true, true, false, srcs, dst); }
java
{ "resource": "" }
q160346
FileSystem.moveFromLocalFile
train
public void moveFromLocalFile(Path src, Path dst) throws IOException { copyFromLocalFile(true, true, false, src, dst); }
java
{ "resource": "" }
q160347
FileSystem.copyFromLocalFile
train
public void copyFromLocalFile(boolean delSrc, boolean overwrite, boolean validate, Path src, Path dst) throws IOException { Configuration conf = getConf(); FileUtil.copy(getLocal(conf), src, this, dst, delSrc, overwrite, validate, conf); }
java
{ "resource": "" }
q160348
FileSystem.copyToLocalFile
train
@Deprecated public void copyToLocalFile(Path src, Path dst) throws IOException { copyToLocalFile(false, false, src, dst); }
java
{ "resource": "" }
q160349
FileSystem.moveToLocalFile
train
public void moveToLocalFile(Path src, Path dst) throws IOException { copyToLocalFile(true, false, src, dst); }
java
{ "resource": "" }
q160350
FileSystem.copyToLocalFile
train
public void copyToLocalFile(boolean delSrc, boolean validate, Path src, Path dst) throws IOException { FileUtil.copy(this, src, getLocal(getConf()), dst, delSrc, validate, getConf()); }
java
{ "resource": "" }
q160351
FileSystem.getUsed
train
public long getUsed() throws IOException{ long used = 0; FileStatus[] files = listStatus(new Path("/")); for(FileStatus file:files){ used += file.getLen(); } return used; }
java
{ "resource": "" }
q160352
FileSystem.getFileStatus
train
private FileStatus[] getFileStatus(Path[] paths) throws IOException { if (paths == null) { return null; } ArrayList<FileStatus> results = new ArrayList<FileStatus>(paths.length); for (int i = 0; i < paths.length; i++) { try { results.add(getFileStatus(paths[i])); } catch (FileNotFoundException e) { // do nothing } } return results.toArray(new FileStatus[results.size()]); }
java
{ "resource": "" }
q160353
FileSystem.getStatistics
train
public static synchronized Map<String, Statistics> getStatistics() { Map<String, Statistics> result = new HashMap<String, Statistics>(); for(Statistics stat: statisticsTable.values()) { result.put(stat.getScheme(), stat); } return result; }
java
{ "resource": "" }
q160354
FileSystem.getStatistics
train
public static synchronized Statistics getStatistics(String scheme, Class<? extends FileSystem> cls) { Statistics result = statisticsTable.get(cls); if (result == null) { result = new Statistics(scheme); statisticsTable.put(cls, result); } return result; }
java
{ "resource": "" }
q160355
ServerLogReaderUtil.createNotification
train
static NamespaceNotification createNotification(FSEditLogOp op) { switch (op.opCode) { case OP_ADD: return new NamespaceNotification(((AddOp)op).path, EventType.FILE_ADDED.getByteValue(), op.getTransactionId()); case OP_CLOSE: return new NamespaceNotification(((CloseOp)op).path, EventType.FILE_CLOSED.getByteValue(), op.getTransactionId()); case OP_DELETE: return new NamespaceNotification(((DeleteOp)op).path, EventType.NODE_DELETED.getByteValue(), op.getTransactionId()); case OP_MKDIR: return new NamespaceNotification(((MkdirOp)op).path, EventType.DIR_ADDED.getByteValue(), op.getTransactionId()); default: return null; } }
java
{ "resource": "" }
q160356
ServerLogReaderUtil.shouldSkipOp
train
static boolean shouldSkipOp(long currentTransactionId, FSEditLogOp op) { if (currentTransactionId == -1 || op.getTransactionId() > currentTransactionId) { return false; } return true; }
java
{ "resource": "" }
q160357
ServerLogReaderUtil.checkTransactionId
train
static long checkTransactionId(long currentTransactionId, FSEditLogOp op) throws IOException { if (currentTransactionId != -1) { if (op.getTransactionId() != currentTransactionId + 1) { LOG.error("Read invalid txId=" + op.getTransactionId() + " expectedTxId=" + (currentTransactionId + 1) + ":" + op); throw new IOException("checkTransactionId failed"); } } currentTransactionId = op.getTransactionId(); return currentTransactionId; }
java
{ "resource": "" }
q160358
Utils.writeString
train
public static void writeString(DataOutput out, String s) throws IOException { if (s != null) { Text text = new Text(s); byte[] buffer = text.getBytes(); int len = text.getLength(); writeVInt(out, len); out.write(buffer, 0, len); } else { writeVInt(out, -1); } }
java
{ "resource": "" }
q160359
Utils.readString
train
public static String readString(DataInput in) throws IOException { int length = readVInt(in); if (length == -1) return null; byte[] buffer = new byte[length]; in.readFully(buffer); return Text.decode(buffer); }
java
{ "resource": "" }
q160360
Parser.parse
train
static Node parse(String expr, JobConf job) throws IOException { if (null == expr) { throw new IOException("Expression is null"); } Class<? extends WritableComparator> cmpcl = job.getClass("mapred.join.keycomparator", null, WritableComparator.class); Lexer lex = new Lexer(expr); Stack<Token> st = new Stack<Token>(); Token tok; while ((tok = lex.next()) != null) { if (TType.RPAREN.equals(tok.getType())) { st.push(reduce(st, job)); } else { st.push(tok); } } if (st.size() == 1 && TType.CIF.equals(st.peek().getType())) { Node ret = st.pop().getNode(); if (cmpcl != null) { ret.setKeyComparator(cmpcl); } return ret; } throw new IOException("Missing ')'"); }
java
{ "resource": "" }
q160361
BookKeeperJournalOutputStream.addBookKeeperEntry
train
private synchronized void addBookKeeperEntry(byte[] buf, int off, int len) throws IOException { try { ledger.addEntry(buf, off, len); if (LOG.isDebugEnabled()) { LOG.debug("Last add pushed to ledger " + ledger.getId() + " is " + ledger.getLastAddPushed()); LOG.debug("Last add confirmed to ledger " + ledger.getId() + " is " + ledger.getLastAddConfirmed()); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IOException("Interrupted writing to BookKeeper", e); } catch (BKException e) { throw new IOException("Failed to write to BookKeeper", e); } }
java
{ "resource": "" }
q160362
BookKeeperJournalOutputStream.write
train
@Override public void write(byte[] buf, int off, int len) throws IOException { addBookKeeperEntry(buf, off, len); }
java
{ "resource": "" }
q160363
DistributedAvatarFileSystem.initStandbyFS
train
private void initStandbyFS() { lastStandbyFSInit = System.currentTimeMillis(); try { if (standbyFS != null) { standbyFS.close(); } LOG.info("DAFS initializing standbyFS"); LOG.info("DAFS primary=" + primaryURI.toString() + " standby=" + standbyURI.toString()); standbyFS = new StandbyFS(); standbyFS.initialize(standbyURI, conf); } catch (Exception e) { LOG.info("DAFS cannot initialize standbyFS: " + StringUtils.stringifyException(e)); standbyFS = null; } }
java
{ "resource": "" }
q160364
DataChecksum.newDataChecksum
train
public static DataChecksum newDataChecksum( int type, int bytesPerChecksum, Checksum sum ) { if ( bytesPerChecksum <= 0 ) { return null; } int checksumSize = getChecksumSizeByType(type); switch ( type ) { case CHECKSUM_NULL : return new DataChecksum( CHECKSUM_NULL, new ChecksumNull(), checksumSize, bytesPerChecksum ); case CHECKSUM_CRC32 : return new DataChecksum( CHECKSUM_CRC32, sum, checksumSize, bytesPerChecksum ); default: return null; } }
java
{ "resource": "" }
q160365
DataChecksum.verifyChunkedSums
train
public void verifyChunkedSums(ByteBuffer data, ByteBuffer checksums, String fileName, long basePos) throws ChecksumException { if (size == 0) return; if (data.isDirect() && checksums.isDirect() && NativeCrc32.isAvailable()) { NativeCrc32.verifyChunkedSums(bytesPerChecksum, type, checksums, data, fileName, basePos); return; } if (data.hasArray() && checksums.hasArray()) { verifyChunkedSums( data.array(), data.arrayOffset() + data.position(), data.remaining(), checksums.array(), checksums.arrayOffset() + checksums.position(), fileName, basePos); return; } int startDataPos = data.position(); data.mark(); checksums.mark(); try { byte[] buf = new byte[bytesPerChecksum]; byte[] sum = new byte[size]; while (data.remaining() > 0) { int n = Math.min(data.remaining(), bytesPerChecksum); checksums.get(sum); data.get(buf, 0, n); summer.reset(); summer.update(buf, 0, n); int calculated = (int)summer.getValue(); int stored = (sum[0] << 24 & 0xff000000) | (sum[1] << 16 & 0xff0000) | (sum[2] << 8 & 0xff00) | sum[3] & 0xff; if (calculated != stored) { long errPos = basePos + data.position() - startDataPos - n; throw new ChecksumException( "Checksum error: "+ fileName + " at "+ errPos + " exp: " + stored + " got: " + calculated, errPos); } } } finally { data.reset(); checksums.reset(); } }
java
{ "resource": "" }
q160366
DataChecksum.verifyChunkedSums
train
private void verifyChunkedSums( byte[] data, int dataOff, int dataLen, byte[] checksums, int checksumsOff, String fileName, long basePos) throws ChecksumException { int remaining = dataLen; int dataPos = 0; while (remaining > 0) { int n = Math.min(remaining, bytesPerChecksum); summer.reset(); summer.update(data, dataOff + dataPos, n); dataPos += n; remaining -= n; int calculated = (int)summer.getValue(); int stored = (checksums[checksumsOff] << 24 & 0xff000000) | (checksums[checksumsOff + 1] << 16 & 0xff0000) | (checksums[checksumsOff + 2] << 8 & 0xff00) | checksums[checksumsOff + 3] & 0xff; checksumsOff += 4; if (calculated != stored) { long errPos = basePos + dataPos - n; throw new ChecksumException( "Checksum error: "+ fileName + " at "+ errPos + " exp: " + stored + " got: " + calculated, errPos); } } }
java
{ "resource": "" }
q160367
DataChecksum.calculateChunkedSums
train
public void calculateChunkedSums(ByteBuffer data, ByteBuffer checksums) { if (size == 0) return; if (data.hasArray() && checksums.hasArray()) { calculateChunkedSums(data.array(), data.arrayOffset() + data.position(), data.remaining(), checksums.array(), checksums.arrayOffset() + checksums.position()); return; } data.mark(); checksums.mark(); try { byte[] buf = new byte[bytesPerChecksum]; while (data.remaining() > 0) { int n = Math.min(data.remaining(), bytesPerChecksum); data.get(buf, 0, n); summer.reset(); summer.update(buf, 0, n); checksums.putInt((int)summer.getValue()); } } finally { data.reset(); checksums.reset(); } }
java
{ "resource": "" }
q160368
DataChecksum.calculateChunkedSums
train
private void calculateChunkedSums( byte[] data, int dataOffset, int dataLength, byte[] sums, int sumsOffset) { int remaining = dataLength; while (remaining > 0) { int n = Math.min(remaining, bytesPerChecksum); summer.reset(); summer.update(data, dataOffset, n); dataOffset += n; remaining -= n; long calculated = summer.getValue(); sums[sumsOffset++] = (byte) (calculated >> 24); sums[sumsOffset++] = (byte) (calculated >> 16); sums[sumsOffset++] = (byte) (calculated >> 8); sums[sumsOffset++] = (byte) (calculated); } }
java
{ "resource": "" }
q160369
AsyncDiskService.shutdown
train
public synchronized void shutdown() { LOG.info("Shutting down all AsyncDiskService threads..."); for (Map.Entry<String, ThreadPoolExecutor> e : executors.entrySet()) { e.getValue().shutdown(); } }
java
{ "resource": "" }
q160370
AsyncDiskService.shutdownNow
train
public synchronized List<Runnable> shutdownNow() { LOG.info("Shutting down all AsyncDiskService threads immediately..."); List<Runnable> list = new ArrayList<Runnable>(); for (Map.Entry<String, ThreadPoolExecutor> e : executors.entrySet()) { list.addAll(e.getValue().shutdownNow()); } return list; }
java
{ "resource": "" }
q160371
UploadImageServlet.clearObsoleteImageUploads
train
public synchronized static void clearObsoleteImageUploads(long minTxIdToKeep, String journalId) { for (Iterator<Map.Entry<Long, SessionDescriptor>> it = sessions.entrySet() .iterator(); it.hasNext();) { Map.Entry<Long, SessionDescriptor> entry = it.next(); if (entry.getValue().journalId.equals(journalId) && entry.getValue().txid < minTxIdToKeep) { it.remove(); } } }
java
{ "resource": "" }
q160372
UploadImageServlet.getStorage
train
private static Journal getStorage(ServletContext context, UploadImageParam params) throws IOException { final Journal journal = JournalNodeHttpServer .getJournalFromContextIfExists(context, params.journalId); if (journal == null) { throwIOException("Journal: " + params.journalId + " does not exist"); } final JNStorage storage = journal.getImageStorage(); // check namespace metadata storage.checkConsistentNamespace(params.getStorageInfo()); // check if this is the active writer if (!journal.checkWriterEpoch(params.epoch)) { throwIOException("This is not the active writer"); } return journal; }
java
{ "resource": "" }
q160373
Reducer.reduce
train
@SuppressWarnings("unchecked") protected void reduce(KEYIN key, Iterable<VALUEIN> values, Context context ) throws IOException, InterruptedException { for(VALUEIN value: values) { context.write((KEYOUT) key, (VALUEOUT) value); } }
java
{ "resource": "" }
q160374
BufferedByteOutputStream.wrapOutputStream
train
public static DataOutputStream wrapOutputStream(OutputStream os, int bufferSize, int writeBufferSize) { // wrapping BufferedByteOutputStream in BufferedOutputStream decreases // pressure on BBOS internal locks, and we read from the BBOS in // bigger chunks return new DataOutputStream(new BufferedOutputStream( new BufferedByteOutputStream(os, bufferSize, writeBufferSize))); }
java
{ "resource": "" }
q160375
BufferedByteOutputStream.close
train
public void close() throws IOException { if (closed) { checkWriteThread(); return; } try { buffer.close(); // writeThread should exit after this // and close underlying stream try { writeThread.join(); } catch (InterruptedException e) { throw new IOException(e); } } finally { checkWriteThread(); } }
java
{ "resource": "" }
q160376
BufferedByteOutputStream.flush
train
public void flush() throws IOException { // check if the stream has been closed checkError(); // how many bytes were written to the buffer long totalBytesWritten = buffer.totalWritten(); // unblock reads from the buffer buffer.unblockReads(); // wait until the write thread transfers everything from the // buffer to the stream while (writeThread.totalBytesTransferred < totalBytesWritten) { BufferedByteInputOutput.sleep(1); } InjectionHandler.processEvent( InjectionEventCore.BUFFEREDBYTEOUTPUTSTREAM_FLUSH, writeThread.totalBytesTransferred); // check error checkError(); // block reads buffer.blockReads(); // flush the underlying buffer underlyingOutputStream.flush(); }
java
{ "resource": "" }
q160377
ImageSet.validate
train
private void validate(File root, Collection<URI> dirs) throws IOException { if (dirs == null) return; for (URI dir : dirs) { if (new File(dir.getPath()).getAbsolutePath().equals( root.getAbsolutePath())) { // we found the corresponding entry return; } } throwIOException("Error. Storage directory: " + root + " is not in the configured list of storage directories: " + dirs); }
java
{ "resource": "" }
q160378
ImageSet.validate
train
private void validate(URI location, Collection<URI> dirs) throws IOException { if (dirs != null && !dirs.contains(location)) { throwIOException("Error. Location: " + location + " is not in the configured list of storage directories: " + dirs); } }
java
{ "resource": "" }
q160379
ImageSet.getCheckpointImageOutputStreams
train
public synchronized List<OutputStream> getCheckpointImageOutputStreams( long imageTxId) throws IOException { List<OutputStream> list = new ArrayList<OutputStream>(); for (ImageManager im : imageManagers) { list.add(im.getCheckpointOutputStream(imageTxId)); } return list; }
java
{ "resource": "" }
q160380
ImageSet.saveDigestAndRenameCheckpointImage
train
public synchronized void saveDigestAndRenameCheckpointImage(long txid, MD5Hash digest) throws IOException { for (ImageManager im : imageManagers) { if (im.saveDigestAndRenameCheckpointImage(txid, digest)) { // restore enabled state im.setImageDisabled(false); } else { // failed image im.setImageDisabled(true); } } checkImageManagers(); }
java
{ "resource": "" }
q160381
ImageSet.checkImageManagers
train
void checkImageManagers() throws IOException { updateImageMetrics(); int numAvailable = 0; for (ImageManager im : imageManagers) { if (!im.isImageDisabled()) { numAvailable++; } } if (numAvailable == 0) { throwIOException("No image locations are available"); } }
java
{ "resource": "" }
q160382
ImageSet.updateImageMetrics
train
void updateImageMetrics() { if (metrics == null) { return; } int failedImageDirs = 0; for (ImageManager im : imageManagers) { if(im.isImageDisabled()) { failedImageDirs++; } } // update only images, journals are handled in JournalSet metrics.imagesFailed.set(failedImageDirs); }
java
{ "resource": "" }
q160383
ImageSet.transitionNonFileImages
train
public void transitionNonFileImages(StorageInfo nsInfo, boolean checkEmpty, Transition transition, StartupOption startOpt) throws IOException { for (ImageManager im : imageManagers) { if (!(im instanceof FileImageManager)) { if (checkEmpty && im.hasSomeImageData()) { LOG.warn("Image " + im + " is not empty."); continue; } LOG.info(transition + " : " + im); im.transitionImage(nsInfo, transition, startOpt); } } }
java
{ "resource": "" }
q160384
ImageSet.getNonFileImageManagers
train
List<ImageManager> getNonFileImageManagers() { List<ImageManager> nonFile = new ArrayList<ImageManager>(); for (ImageManager im : imageManagers) { if (!(im instanceof FileImageManager)) { nonFile.add(im); } } return nonFile; }
java
{ "resource": "" }
q160385
ImageSet.convertFilesToStreams
train
public static List<OutputStream> convertFilesToStreams(File[] localPaths, Storage dstStorage, String str) throws IOException { List<OutputStream> outputStreams = new ArrayList<OutputStream>(); if (localPaths != null) { for (File f : localPaths) { try { if (f.exists()) { LOG.warn("Overwriting existing file " + f + " with file downloaded form " + str); } outputStreams.add(new FileOutputStream(f)); } catch (IOException ioe) { LOG.warn("Unable to download file " + f, ioe); if (dstStorage != null && (dstStorage instanceof StorageErrorReporter)) { ((StorageErrorReporter) dstStorage).reportErrorOnFile(f); } } } if (outputStreams.isEmpty()) { throw new IOException("Unable to download to any storage directory"); } } return outputStreams; }
java
{ "resource": "" }
q160386
TClientProxyProtocolServer.getServerConfig
train
private static ThriftServerConfig getServerConfig(Configuration conf) { ThriftServerConfig serverConfig = new ThriftServerConfig(); serverConfig.setPort(conf.getInt(StorageServiceConfigKeys.PROXY_THRIFT_PORT_KEY, StorageServiceConfigKeys.PROXY_THRIFT_PORT_DEFAULT)); return serverConfig; }
java
{ "resource": "" }
q160387
SensorsParser.query
train
public EventRecord query(String s) throws Exception { StringBuffer sb; //sb = Environment.runCommand("sensors -A"); sb = Environment.runCommand("cat sensors.out"); EventRecord retval = new EventRecord(InetAddress.getLocalHost() .getCanonicalHostName(), InetAddress.getAllByName(InetAddress.getLocalHost() .getHostName()), Calendar.getInstance(), "lm-sensors", "Unknown", "sensors -A", "-"); readGroup(retval, sb, "fan"); readGroup(retval, sb, "in"); readGroup(retval, sb, "temp"); readGroup(retval, sb, "Core"); return retval; }
java
{ "resource": "" }
q160388
SensorsParser.readGroup
train
private EventRecord readGroup(EventRecord er, StringBuffer sb, String prefix) { Pattern pattern = Pattern.compile(".*(" + prefix + "\\s*\\d*)\\s*:\\s*(\\+?\\d+)", Pattern.MULTILINE); Matcher matcher = pattern.matcher(sb); while (matcher.find()) er.set(matcher.group(1), matcher.group(2)); return er; }
java
{ "resource": "" }
q160389
ChecksumFileSystem.isChecksumFile
train
public static boolean isChecksumFile(Path file) { String name = file.getName(); return name.startsWith(".") && name.endsWith(".crc"); }
java
{ "resource": "" }
q160390
ChecksumFileSystem.copyToLocalFile
train
public void copyToLocalFile(Path src, Path dst, boolean copyCrc) throws IOException { if (!fs.isDirectory(src)) { // source is a file fs.copyToLocalFile(src, dst); FileSystem localFs = getLocal(getConf()).getRawFileSystem(); if (localFs.isDirectory(dst)) { dst = new Path(dst, src.getName()); } dst = getChecksumFile(dst); if (localFs.exists(dst)) { //remove old local checksum file localFs.delete(dst, true); } Path checksumFile = getChecksumFile(src); if (copyCrc && fs.exists(checksumFile)) { //copy checksum file fs.copyToLocalFile(checksumFile, dst); } } else { FileStatus[] srcs = listStatus(src); for (FileStatus srcFile : srcs) { copyToLocalFile(srcFile.getPath(), new Path(dst, srcFile.getPath().getName()), copyCrc); } } }
java
{ "resource": "" }
q160391
ChecksumFileSystem.reportChecksumFailure
train
public boolean reportChecksumFailure(Path f, FSDataInputStream in, long inPos, FSDataInputStream sums, long sumsPos) { return false; }
java
{ "resource": "" }
q160392
Encoder.recoverParityBlockToStream
train
public CRC32 recoverParityBlockToStream( FileSystem fs, FileStatus srcStat, long blockSize, Path parityFile, long corruptOffset, OutputStream out, Progressable progress) throws IOException { LOG.info("Recovering parity block" + parityFile + ":" + corruptOffset); Path srcFile = srcStat.getPath(); // Get the start offset of the corrupt block. corruptOffset = (corruptOffset / blockSize) * blockSize; // Output streams to each block in the parity file stripe. OutputStream[] outs = new OutputStream[codec.parityLength]; long indexOfCorruptBlockInParityStripe = (corruptOffset / blockSize) % codec.parityLength; LOG.info("Index of corrupt block in parity stripe: " + indexOfCorruptBlockInParityStripe); CRC32[] crcOuts = null; if (checksumStore != null) { crcOuts = new CRC32[codec.parityLength]; } // Create a real output stream for the block we want to recover, // and create null streams for the rest. for (int i = 0; i < codec.parityLength; i++) { if (indexOfCorruptBlockInParityStripe == i) { outs[i] = out; if (checksumStore != null) { crcOuts[i] = new CRC32(); } } else { outs[i] = new NullOutputStream(); } } // Get the stripe index and start offset of stripe. long stripeIdx = corruptOffset / (codec.parityLength * blockSize); StripeReader sReader = StripeReader.getStripeReader(codec, conf, blockSize, fs, stripeIdx, srcStat); // Get input streams to each block in the source file stripe. assert sReader.hasNext() == true; InputStream[] blocks = sReader.getNextStripeInputs().getInputs(); LOG.info("Starting recovery by using source stripe " + srcFile + ": stripe " + stripeIdx); try { // Read the data from the blocks and write to the parity file. encodeStripe(blocks, blockSize, outs, crcOuts, progress, false, null); if (checksumStore != null) { return crcOuts[(int)indexOfCorruptBlockInParityStripe]; } else { return null; } } finally { RaidUtils.closeStreams(blocks); } }
java
{ "resource": "" }
q160393
Encoder.encodeStripe
train
void encodeStripe( InputStream[] blocks, long blockSize, OutputStream[] outs, CRC32[] crcOuts, Progressable reporter, boolean computeSrcChecksum, List<Integer> errorLocations) throws IOException { configureBuffers(blockSize); int boundedBufferCapacity = 1; ParallelStreamReader parallelReader = new ParallelStreamReader( reporter, blocks, bufSize, parallelism, boundedBufferCapacity, blockSize, computeSrcChecksum, outs); parallelReader.start(); try { for (long encoded = 0; encoded < blockSize; encoded += bufSize) { ParallelStreamReader.ReadResult readResult = null; try { readResult = parallelReader.getReadResult(); } catch (InterruptedException e) { throw new IOException("Interrupted while waiting for read result"); } // Cannot tolerate any IO errors. IOException readEx = readResult.getException(); if (readEx != null) { if (errorLocations != null) { errorLocations.clear(); for (int idx : readResult.getErrorIdx()) { errorLocations.add(idx); } } throw readEx; } code.encodeBulk(readResult.readBufs, writeBufs); reporter.progress(); // Assume each byte is independently encoded int toWrite = (int)Math.min(blockSize - encoded, bufSize); // Now that we have some data to write, send it to the temp files. for (int i = 0; i < codec.parityLength; i++) { outs[i].write(writeBufs[i], 0, toWrite); if (crcOuts != null && crcOuts[i] != null) { crcOuts[i].update(writeBufs[i], 0, toWrite); } reporter.progress(); } } if (computeSrcChecksum) { parallelReader.collectSrcBlocksChecksum(checksumStore); } } finally { parallelReader.shutdown(); } }
java
{ "resource": "" }
q160394
Text.writeStringOpt
train
public static void writeStringOpt(DataOutput out, String str) throws IOException{ if (str == null) { WritableUtils.writeVInt(out, NULL_STRING_LENGTH); return; } final int len = str.length(); TempArrays ta = UTF8.getArrays(len); byte[] rawBytes = ta.byteArray; char[] charArray = ta.charArray; str.getChars(0, len, charArray, 0); boolean ascii = true; for (int i = 0; i < len; i++) { if (charArray[i] > UTF8.MAX_ASCII_CODE) { ascii = false; break; } rawBytes[i] = (byte) charArray[i]; } if(ascii) { WritableUtils.writeVInt(out, len); out.write(rawBytes, 0, len); } else { writeString(out, str); } }
java
{ "resource": "" }
q160395
NICParser.query
train
public EventRecord query(String device) throws UnknownHostException { StringBuffer sb = Environment.runCommand("/sbin/ifconfig " + device); EventRecord retval = new EventRecord(InetAddress.getLocalHost() .getCanonicalHostName(), InetAddress.getAllByName(InetAddress.getLocalHost() .getHostName()), Calendar.getInstance(), "NIC", "Unknown", device, "-"); retval.set("hwAddress", findPattern("HWaddr\\s*([\\S{2}:]{17})", sb .toString(), 1)); retval.set("ipAddress", findPattern("inet\\s+addr:\\s*([\\w.?]*)", sb .toString(), 1)); String tmp = findPattern("inet\\s+addr:\\s*([\\w.?]*)", sb.toString(), 1); retval.set("status", (tmp == null) ? "DOWN" : "UP"); if (tmp != null) retval.set("ipAddress", tmp); retval.set("rxPackets", findPattern("RX\\s*packets\\s*:\\s*(\\d+)", sb .toString(), 1)); retval.set("rxErrors", findPattern("RX.+errors\\s*:\\s*(\\d+)", sb .toString(), 1)); retval.set("rxDropped", findPattern("RX.+dropped\\s*:\\s*(\\d+)", sb .toString(), 1)); retval.set("rxOverruns", findPattern("RX.+overruns\\s*:\\s*(\\d+)", sb .toString(), 1)); retval.set("rxFrame", findPattern("RX.+frame\\s*:\\s*(\\d+)", sb.toString(), 1)); retval.set("txPackets", findPattern("TX\\s*packets\\s*:\\s*(\\d+)", sb .toString(), 1)); retval.set("txErrors", findPattern("TX.+errors\\s*:\\s*(\\d+)", sb .toString(), 1)); retval.set("txDropped", findPattern("TX.+dropped\\s*:\\s*(\\d+)", sb .toString(), 1)); retval.set("txOverruns", findPattern("TX.+overruns\\s*:\\s*(\\d+)", sb .toString(), 1)); retval.set("txCarrier", findPattern("TX.+carrier\\s*:\\s*(\\d+)", sb .toString(), 1)); retval.set("collisions", findPattern("\\s+collisions\\s*:\\s*(\\d+)", sb .toString(), 1)); retval.set("rxBytes", findPattern("RX\\s*bytes\\s*:\\s*(\\d+)", sb .toString(), 1)); retval.set("txBytes", findPattern("TX\\s*bytes\\s*:\\s*(\\d+)", sb .toString(), 1)); return retval; }
java
{ "resource": "" }
q160396
JoinRecordReader.next
train
public boolean next(K key, TupleWritable value) throws IOException { if (jc.flush(value)) { WritableUtils.cloneInto(key, jc.key()); return true; } jc.clear(); K iterkey = createKey(); final PriorityQueue<ComposableRecordReader<K,?>> q = getRecordReaderQueue(); while (!q.isEmpty()) { fillJoinCollector(iterkey); jc.reset(iterkey); if (jc.flush(value)) { WritableUtils.cloneInto(key, jc.key()); return true; } jc.clear(); } return false; }
java
{ "resource": "" }
q160397
CompletedJobStatusStore.store
train
public void store(JobInProgress job) { if (active && retainTime > 0) { JobID jobId = job.getStatus().getJobID(); Path jobStatusFile = getInfoFilePath(jobId); try { FSDataOutputStream dataOut = fs.create(jobStatusFile); job.getStatus().write(dataOut); job.getProfile().write(dataOut); job.getCounters().write(dataOut); TaskCompletionEvent[] events = job.getTaskCompletionEvents(0, Integer.MAX_VALUE); dataOut.writeInt(events.length); for (TaskCompletionEvent event : events) { event.write(dataOut); } dataOut.close(); } catch (IOException ex) { LOG.warn("Could not store [" + jobId + "] job info : " + ex.getMessage(), ex); try { fs.delete(jobStatusFile, true); } catch (IOException ex1) { //ignore } } } }
java
{ "resource": "" }
q160398
CompletedJobStatusStore.readJobStatus
train
public JobStatus readJobStatus(JobID jobId) { JobStatus jobStatus = null; if (null == jobId) { LOG.warn("Could not read job status for null jobId"); return null; } if (active) { try { FSDataInputStream dataIn = getJobInfoFile(jobId); if (dataIn != null) { jobStatus = readJobStatus(dataIn); dataIn.close(); } } catch (IOException ex) { LOG.warn("Could not read [" + jobId + "] job status : " + ex, ex); } } return jobStatus; }
java
{ "resource": "" }
q160399
CompletedJobStatusStore.readJobProfile
train
public JobProfile readJobProfile(JobID jobId) { JobProfile jobProfile = null; if (active) { try { FSDataInputStream dataIn = getJobInfoFile(jobId); if (dataIn != null) { readJobStatus(dataIn); jobProfile = readJobProfile(dataIn); dataIn.close(); } } catch (IOException ex) { LOG.warn("Could not read [" + jobId + "] job profile : " + ex, ex); } } return jobProfile; }
java
{ "resource": "" }