_id stringlengths 2 7 | title stringlengths 3 140 | partition stringclasses 3
values | text stringlengths 73 34.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q162000 | CoronaStateUpdate.getTaskStatus | train | public TaskStatus getTaskStatus() {
TaskStatusUpdate update = get(TaskStatusUpdate.class);
if (update == null) {
return null;
}
return update.getStatus();
} | java | {
"resource": ""
} |
q162001 | UniqValueCount.setMaxItems | train | public long setMaxItems(long n) {
if (n >= numItems) {
this.maxNumItems = n;
} else if (this.maxNumItems >= this.numItems) {
this.maxNumItems = this.numItems;
}
return this.maxNumItems;
} | java | {
"resource": ""
} |
q162002 | PendingReplicationBlocks.add | train | void add(BlockInfo block, int numReplicas) {
synchronized (pendingReplications) {
PendingBlockInfo found = pendingReplications.get(block);
if (found == null) {
pendingReplications.put(block, new PendingBlockInfo(numReplicas));
} else {
found.incrementReplicas(numReplicas);
found.setTimeStamp();
}
}
} | java | {
"resource": ""
} |
q162003 | PendingReplicationBlocks.remove | train | void remove(Block block) {
synchronized (pendingReplications) {
PendingBlockInfo found = pendingReplications.get(block);
if (found != null) {
if (FSNamesystem.LOG.isDebugEnabled()) {
FSNamesystem.LOG.debug(
"Removing pending replication for block" + block);
}
found.decrementReplicas();
if (found.getNumReplicas() <= 0) {
pendingReplications.remove(block);
}
}
}
} | java | {
"resource": ""
} |
q162004 | PendingReplicationBlocks.getNumReplicas | train | int getNumReplicas(Block block) {
synchronized (pendingReplications) {
PendingBlockInfo found = pendingReplications.get(block);
if (found != null) {
return found.getNumReplicas();
}
}
return 0;
} | java | {
"resource": ""
} |
q162005 | PendingReplicationBlocks.getTimedOutBlocks | train | BlockInfo[] getTimedOutBlocks() {
synchronized (timedOutItems) {
if (timedOutItems.size() <= 0) {
return null;
}
BlockInfo[] blockList = timedOutItems.toArray(
new BlockInfo[timedOutItems.size()]);
timedOutItems.clear();
return blockList;
}
} | java | {
"resource": ""
} |
q162006 | ReadaheadPool.getInstance | train | public static ReadaheadPool getInstance() {
synchronized (ReadaheadPool.class) {
if (instance == null && NativeIO.isAvailable()) {
instance = new ReadaheadPool();
}
return instance;
}
} | java | {
"resource": ""
} |
q162007 | ReadaheadPool.readaheadStream | train | public ReadaheadRequest readaheadStream(
String identifier,
FileDescriptor fd,
long curPos,
long readaheadLength,
long maxOffsetToRead,
ReadaheadRequest lastReadahead) {
Preconditions.checkArgument(curPos <= maxOffsetToRead,
"Readahead position %s higher than maxOffsetToRead %s",
curPos, maxOffsetToRead);
if (readaheadLength <= 0) {
return null;
}
long lastOffset = Long.MIN_VALUE;
if (lastReadahead != null) {
lastOffset = lastReadahead.getOffset();
}
// trigger each readahead when we have reached the halfway mark
// in the previous readahead. This gives the system time
// to satisfy the readahead before we start reading the data.
long nextOffset = lastOffset + readaheadLength / 2;
if (curPos >= nextOffset) {
// cancel any currently pending readahead, to avoid
// piling things up in the queue. Each reader should have at most
// one outstanding request in the queue.
if (lastReadahead != null) {
lastReadahead.cancel();
lastReadahead = null;
}
long length = Math.min(readaheadLength,
maxOffsetToRead - curPos);
if (length <= 0) {
// we've reached the end of the stream
return null;
}
return submitReadahead(identifier, fd, curPos, length);
} else {
return lastReadahead;
}
} | java | {
"resource": ""
} |
q162008 | ReadaheadPool.submitReadahead | train | public ReadaheadRequest submitReadahead(
String identifier, FileDescriptor fd, long off, long len) {
ReadaheadRequestImpl req = new ReadaheadRequestImpl(
identifier, fd, off, len);
pool.execute(req);
if (LOG.isTraceEnabled()) {
LOG.trace("submit readahead: " + req);
}
return req;
} | java | {
"resource": ""
} |
q162009 | StatisticsCollector.getSaving | train | public long getSaving() {
if (lastRaidStatistics == null) {
return -1;
}
long saving = 0;
for (Codec codec : Codec.getCodecs()) {
String code = codec.id;
long s = lastRaidStatistics.get(code).getSaving(conf);
if (s == -1) {
return -1;
}
saving += s;
}
return saving;
} | java | {
"resource": ""
} |
q162010 | StatisticsCollector.populateSaving | train | public void populateSaving(RaidNodeMetrics metrics) {
if (lastRaidStatistics == null) {
return;
}
long saving = 0;
for (Codec codec : Codec.getCodecs()) {
String code = codec.id;
long s = lastRaidStatistics.get(code).getSaving(conf);
if (s > 0) {
metrics.savingForCode.get(code).set(s);
saving += s;
}
}
if (saving > 0) {
metrics.saving.set(saving);
}
} | java | {
"resource": ""
} |
q162011 | StatisticsCollector.sortPathByDepth | train | private void sortPathByDepth(Path[] paths) {
Arrays.sort(paths, new Comparator<Path> (){
@Override
public int compare(Path o1, Path o2) {
return ((Integer)o1.depth()).compareTo(o2.depth());
}});
} | java | {
"resource": ""
} |
q162012 | StatisticsCollector.mergeRoots | train | private List<Path> mergeRoots(Path[] dupRoots) {
sortPathByDepth(dupRoots);
List<Path> roots = new ArrayList<Path>();
for (Path candidate : dupRoots) {
boolean shouldAdd = true;
for (Path root : roots) {
if (isAncestorPath(root.toUri().getPath(),
candidate.toUri().getPath())) {
shouldAdd = false;
break;
}
}
if (shouldAdd) {
roots.add(candidate);
}
}
return roots;
} | java | {
"resource": ""
} |
q162013 | StatisticsCollector.submitRaidJobsWhenPossible | train | private List<FileStatus> submitRaidJobsWhenPossible(PolicyInfo info,
List<FileStatus> filesToRaid, boolean submitAll) {
if (!submitRaidJobs || !info.getShouldRaid()) {
return filesToRaid;
}
try {
int maxFilesPerJob = configManager.getMaxFilesPerJob();
int maxJobs = configManager.getMaxJobsPerPolicy();
while (!filesToRaid.isEmpty() &&
(submitAll || filesToRaid.size() >= maxFilesPerJob) &&
raidNode.getRunningJobsForPolicy(info.getName()) < maxJobs) {
int numFiles = Math.min(maxFilesPerJob, filesToRaid.size());
LOG.info("Invoking raidFiles with " + numFiles + " files");
raidNode.raidFiles(info, filesToRaid.subList(0, numFiles));
filesToRaid =
filesToRaid.subList(numFiles, filesToRaid.size());
}
} catch (IOException e) {
LOG.warn("Failed to raid files for policy:" + info.getName(), e);
}
return filesToRaid;
} | java | {
"resource": ""
} |
q162014 | ServiceAuthorizationManager.authorize | train | public static void authorize(Subject user, Class<?> protocol)
throws AuthorizationException {
Permission permission = protocolToPermissionMap.get(protocol);
if (permission == null) {
permission = new ConnectionPermission(protocol);
protocolToPermissionMap.put(protocol, permission);
}
checkPermission(user, permission);
} | java | {
"resource": ""
} |
q162015 | FSDataset.getFinalizedBlockLength | train | public long getFinalizedBlockLength(int namespaceId, Block b) throws IOException {
DatanodeBlockInfo info = volumeMap.get(namespaceId, b);
if (info == null) {
throw new IOException("Can't find block " + b + " in volumeMap");
}
return info.getFinalizedSize();
} | java | {
"resource": ""
} |
q162016 | FSDataset.getBlockFile | train | public File getBlockFile(int namespaceId, Block b) throws IOException {
File f = validateBlockFile(namespaceId, b);
if (f == null) {
if (InterDatanodeProtocol.LOG.isDebugEnabled()) {
InterDatanodeProtocol.LOG
.debug("b=" + b + ", volumeMap=" + volumeMap);
}
throw new IOException("Block " + b + ", namespace= " + namespaceId
+ " is not valid.");
}
return f;
} | java | {
"resource": ""
} |
q162017 | FSDataset.detachBlock | train | public boolean detachBlock(int namespaceId, Block block, int numLinks) throws IOException {
DatanodeBlockInfo info = null;
lock.readLock().lock();
try {
info = volumeMap.get(namespaceId, block);
} finally {
lock.readLock().unlock();
}
return info.detachBlock(namespaceId, block, numLinks);
} | java | {
"resource": ""
} |
q162018 | FSDataset.interruptAndJoinThreads | train | private boolean interruptAndJoinThreads(List<Thread> threads) {
// interrupt and wait for all ongoing create threads
for(Thread t : threads) {
t.interrupt();
}
for(Thread t : threads) {
try {
t.join();
} catch (InterruptedException e) {
DataNode.LOG.warn("interruptOngoingCreates: t=" + t, e);
return false;
}
}
return true;
} | java | {
"resource": ""
} |
q162019 | FSDataset.getActiveThreads | train | private ArrayList<Thread> getActiveThreads(int namespaceId, Block block) {
lock.writeLock().lock();
try {
//check ongoing create threads
final ActiveFile activefile = volumeMap.getOngoingCreates(namespaceId, block);
if (activefile != null && !activefile.threads.isEmpty()) {
//remove dead threads
for(Iterator<Thread> i = activefile.threads.iterator(); i.hasNext(); ) {
final Thread t = i.next();
if (!t.isAlive()) {
i.remove();
}
}
//return living threads
if (!activefile.threads.isEmpty()) {
return new ArrayList<Thread>(activefile.threads);
}
}
} finally {
lock.writeLock().unlock();
}
return null;
} | java | {
"resource": ""
} |
q162020 | FSDataset.isBlockFinalizedWithLock | train | private boolean isBlockFinalizedWithLock(int namespaceId, Block b) {
lock.readLock().lock();
try {
return isBlockFinalizedInternal(namespaceId, b, true);
} finally {
lock.readLock().unlock();
}
} | java | {
"resource": ""
} |
q162021 | FSDataset.delBlockFromDisk | train | private boolean delBlockFromDisk(File blockFile, File metaFile, Block b) {
if (blockFile == null) {
DataNode.LOG.warn("No file exists for block: " + b);
return true;
}
if (!blockFile.delete()) {
DataNode.LOG.warn("Not able to delete the block file: " + blockFile);
return false;
} else { // remove the meta file
if (metaFile != null && !metaFile.delete()) {
DataNode.LOG.warn(
"Not able to delete the meta block file: " + metaFile);
return false;
}
}
return true;
} | java | {
"resource": ""
} |
q162022 | FSDataset.getBlocksBeingWrittenReport | train | public Block[] getBlocksBeingWrittenReport(int namespaceId) throws IOException {
LightWeightHashSet<Block> blockSet = new LightWeightHashSet<Block>();
volumes.getBlocksBeingWrittenInfo(namespaceId, blockSet);
Block blockTable[] = new Block[blockSet.size()];
int i = 0;
for (Iterator<Block> it = blockSet.iterator(); it.hasNext(); i++) {
blockTable[i] = it.next();
}
return blockTable;
} | java | {
"resource": ""
} |
q162023 | FSDataset.getBlockReport | train | public Block[] getBlockReport(int namespaceId) throws IOException {
ArrayList<Block> ret = new ArrayList<Block>();
org.apache.hadoop.hdfs.server.datanode.NamespaceMap nm = volumeMap
.getNamespaceMap(namespaceId);
if (nm == null) {
return new Block[0];
}
int n = nm.getNumBucket();
for (int i = 0; i < n; i++) {
BlockBucket bb = nm.getBucket(i);
bb.getBlockReport(ret);
}
return ret.toArray(new Block[ret.size()]);
} | java | {
"resource": ""
} |
q162024 | FSDataset.isValidBlock | train | public boolean isValidBlock(int namespaceId, Block b, boolean checkSize)
throws IOException {
File f = null;
;
try {
f = getValidateBlockFile(namespaceId, b, checkSize);
} catch (IOException e) {
DataNode.LOG.warn("Block " + b + " is not valid:", e);
}
return ((f != null) ? isBlockFinalizedWithLock(namespaceId, b) : false);
} | java | {
"resource": ""
} |
q162025 | FSDataset.getFile | train | public File getFile(int namespaceId, Block b) {
lock.readLock().lock();
try {
DatanodeBlockInfo info = volumeMap.get(namespaceId, b);
if (info != null) {
return info.getDataFileToRead();
}
return null;
} finally {
lock.readLock().unlock();
}
} | java | {
"resource": ""
} |
q162026 | FSDataset.checkDataDir | train | public void checkDataDir() throws DiskErrorException {
long total_blocks=0, removed_blocks=0;
List<FSVolume> failed_vols = null;
failed_vols = volumes.checkDirs();
//if there no failed volumes return
if(failed_vols == null)
return;
// else
// remove related blocks
long mlsec = System.currentTimeMillis();
lock.writeLock().lock();
try {
volumeMap.removeUnhealthyVolumes(failed_vols);
} finally {
lock.writeLock().unlock();
}
mlsec = System.currentTimeMillis() - mlsec;
DataNode.LOG.warn(">>>>>>>>>>>>Removed " + removed_blocks + " out of " + total_blocks +
"(took " + mlsec + " millisecs)");
// report the error
StringBuilder sb = new StringBuilder();
for(FSVolume fv : failed_vols) {
sb.append(fv.toString() + ";");
}
throw new DiskErrorException("DataNode failed volumes:" + sb);
} | java | {
"resource": ""
} |
q162027 | FSDataset.removeVolumes | train | public void removeVolumes(Configuration conf, List<File> directories)
throws Exception {
if (directories == null || directories.isEmpty()) {
DataNode.LOG.warn("There were no directories to remove. Exiting ");
return;
}
List<FSVolume> volArray = null;
lock.readLock().lock();
try {
volArray = volumes.removeBVolumes(directories);
} finally {
lock.readLock().unlock();
}
// remove related blocks
long mlsec = System.currentTimeMillis();
lock.writeLock().lock();
try {
volumeMap.removeUnhealthyVolumes(volArray);
} finally {
lock.writeLock().unlock();
}
mlsec = System.currentTimeMillis() - mlsec;
DataNode.LOG.warn(">>>>>>>>>Removing these blocks took " + mlsec +
" millisecs in refresh<<<<<<<<<<<<<<< ");
StringBuilder sb = new StringBuilder();
for(FSVolume fv : volArray) {
sb.append(fv.toString() + ";");
}
throw new DiskErrorException("These volumes were removed: " + sb);
} | java | {
"resource": ""
} |
q162028 | FSDataset.copyFile | train | public void copyFile(File src, File dst, boolean hardlink) throws IOException {
if (src == null || dst == null) {
throw new IOException("src/dst file is null");
}
try {
if (hardlink && shouldHardLinkBlockCopy) {
// Remove destination before hard linking, since this file might already
// exist and a hardlink would fail as a result.
if (dst.exists()) {
if(!dst.delete()) {
throw new IOException("Deletion of file : " + dst + " failed");
}
}
NativeIO.link(src, dst);
DataNode.LOG.info("Hard Link Created from : " + src + " to " + dst);
return;
}
} catch (IOException e) {
DataNode.LOG.warn("Hard link failed from : " + src + " to " + dst
+ " continuing with regular file copy");
}
FileChannel input = null;
FileChannel output = null;
try {
// This improves copying performance a lot, it uses native buffers
// for copying.
input = new FileInputStream(src).getChannel();
output = new FileOutputStream(dst).getChannel();
if (input == null || output == null) {
throw new IOException("Could not create file channels for src : " + src
+ " dst : " + dst);
}
long bytesLeft = input.size();
long position = 0;
while (bytesLeft > 0) {
long bytesWritten = output.transferFrom(input, position, bytesLeft);
bytesLeft -= bytesWritten;
position += bytesWritten;
}
if (datanode.syncOnClose) {
output.force(true);
}
} finally {
if (input != null) {
input.close();
}
if (output != null) {
output.close();
}
}
} | java | {
"resource": ""
} |
q162029 | FSDataset.findVolumeForHardLink | train | private FSVolume findVolumeForHardLink(String srcFileSystem,
int srcNamespaceId, Block srcBlock, File srcBlockFile)
throws IOException {
FSVolume dstVol = null;
if (srcBlockFile == null || !srcBlockFile.exists()) {
throw new IOException("File " + srcBlockFile
+ " is not valid or does not have"
+ " a valid block file");
}
// The source file might not necessarily be a part of the FSVolumeSet of
// this datanode, it could be part of a FSVolumeSet of another datanode on
// the same host.
DatanodeBlockInfo blockInfo = volumeMap.get(srcNamespaceId, srcBlock);
if (blockInfo != null) {
dstVol = blockInfo.getBlockDataFile().getVolume();
} else {
for(FSVolume volume : volumes.getVolumes()) {
String volFileSystem = volume.getFileSystem();
if (volFileSystem.equals(srcFileSystem)) {
dstVol = volume;
break;
}
}
}
return dstVol;
} | java | {
"resource": ""
} |
q162030 | FSDataset.copyBlockLocalAdd | train | private boolean copyBlockLocalAdd(String srcFileSystem, File srcBlockFile,
int srcNamespaceId, Block srcBlock, int dstNamespaceId, Block dstBlock)
throws IOException {
boolean hardlink = true;
File dstBlockFile = null;
lock.writeLock().lock();
try {
if (isValidBlock(dstNamespaceId, dstBlock, false) ||
volumeMap.getOngoingCreates(dstNamespaceId, dstBlock) != null) {
throw new BlockAlreadyExistsException("Block " + dstBlock
+ " already exists");
}
if (srcBlockFile == null || !srcBlockFile.exists()) {
throw new IOException("Block " + srcBlock.getBlockName()
+ " is not valid or does not have a valid block file");
}
boolean inlineChecksum = Block.isInlineChecksumBlockFilename(srcBlockFile
.getName());
FSVolume dstVol = null;
if (shouldHardLinkBlockCopy) {
dstVol = findVolumeForHardLink(
srcFileSystem, srcNamespaceId, srcBlock, srcBlockFile);
}
// Could not find a volume for a hard link, fall back to regular file
// copy.
if (dstVol == null) {
dstVol = volumes.getNextVolume(srcBlock.getNumBytes());
hardlink = false;
}
int checksumType = DataChecksum.CHECKSUM_UNKNOWN;
int bytesPerChecksum = -1;
if (inlineChecksum) {
GenStampAndChecksum sac = BlockInlineChecksumReader
.getGenStampAndChecksumFromInlineChecksumFile(srcBlockFile
.getName());
checksumType = sac.checksumType;
bytesPerChecksum = sac.bytesPerChecksum;
}
List<Thread> threads = null;
// We do not want to create a BBW, hence treat this as a replication
// request.
dstBlockFile = createTmpFile(dstNamespaceId, dstVol, dstBlock, true,
inlineChecksum, checksumType, bytesPerChecksum);
DatanodeBlockInfo binfo = new DatanodeBlockInfo(dstVol, dstBlockFile,
DatanodeBlockInfo.UNFINALIZED, true, inlineChecksum, checksumType,
bytesPerChecksum, false, 0);
volumeMap.add(dstNamespaceId, dstBlock, binfo);
volumeMap.addOngoingCreates(dstNamespaceId, dstBlock, new ActiveFile(
binfo, threads, ActiveFile.UNKNOWN_SIZE, false));
} finally {
lock.writeLock().unlock();
}
if (dstBlockFile == null) {
throw new IOException("Could not allocate block file for : " +
dstBlock.getBlockName());
}
return hardlink;
} | java | {
"resource": ""
} |
q162031 | FSDataset.copyBlockLocalFinalize | train | private void copyBlockLocalFinalize(int dstNamespaceId,
Block dstBlock, File dstBlockFile)
throws IOException {
boolean inlineChecksum = Block.isInlineChecksumBlockFilename(dstBlockFile
.getName());
long blkSize = 0;
long fileSize = dstBlockFile.length();
lock.writeLock().lock();
try {
DatanodeBlockInfo info = volumeMap.get(dstNamespaceId, dstBlock);
if (info == null) {
throw new IOException("Could not find information for " + dstBlock);
}
if (inlineChecksum) {
blkSize = BlockInlineChecksumReader.getBlockSizeFromFileLength(fileSize,
info.getChecksumType(), info.getBytesPerChecksum());
} else {
blkSize = fileSize;
}
FSVolume dstVol = info.getBlockDataFile().getVolume();
// Finalize block on disk.
File dest = dstVol.addBlock(dstNamespaceId, dstBlock, dstBlockFile,
info.isInlineChecksum(), info.getChecksumType(),
info.getBytesPerChecksum());
volumeMap.add(dstNamespaceId, dstBlock,
new DatanodeBlockInfo(dstVol, dest, blkSize, true, inlineChecksum,
info.getChecksumType(), info.getBytesPerChecksum(), false, 0));
volumeMap.removeOngoingCreates(dstNamespaceId, dstBlock);
} finally {
lock.writeLock().unlock();
}
} | java | {
"resource": ""
} |
q162032 | DFSUtil.byteArray2String | train | public static String byteArray2String(byte[][] pathComponents) {
if (pathComponents.length == 0)
return "";
if (pathComponents.length == 1 && pathComponents[0].length == 0) {
return Path.SEPARATOR;
}
StringBuilder result = new StringBuilder();
for (int i = 0; i < pathComponents.length; i++) {
String converted = bytes2String(pathComponents[i]);
if (converted == null)
return null;
result.append(converted);
if (i < pathComponents.length - 1) {
result.append(Path.SEPARATOR_CHAR);
}
}
return result.toString();
} | java | {
"resource": ""
} |
q162033 | DFSUtil.bytes2String | train | public static String bytes2String(byte[] bytes) {
try {
final int len = bytes.length;
char[] charArray = UTF8.getCharArray(len);
for (int i = 0; i < bytes.length; i++) {
if (bytes[i] < UTF8.MIN_ASCII_CODE) {
// non-ASCII codepoints' higher bytes
// are of the form (10xxxxxx), hence the bytes
// represent a non-ASCII string
// do expensive conversion
return new String(bytes, utf8charsetName);
}
// copy to temporary array
charArray[i] = (char) bytes[i];
}
// only ASCII bytes, do fast conversion
// using bytes as actual characters
return new String(charArray, 0, len);
} catch (UnsupportedEncodingException e) {
assert false : "UTF8 encoding is not supported ";
}
return null;
} | java | {
"resource": ""
} |
q162034 | DFSUtil.string2Bytes | train | public static byte[] string2Bytes(String str) {
try {
final int len = str.length();
// if we can, we will use it to return the bytes
byte[] rawBytes = new byte[len];
// get all chars of the given string
char[] charArray = UTF8.getCharArray(len);
str.getChars(0, len, charArray, 0);
for (int i = 0; i < len; i++) {
if (charArray[i] > UTF8.MAX_ASCII_CODE) {
// non-ASCII chars present
// do expensive conversion
return str.getBytes(utf8charsetName);
}
// copy to output array
rawBytes[i] = (byte) charArray[i];
}
// only ASCII present - return raw bytes
return rawBytes;
} catch (UnsupportedEncodingException e) {
assert false : "UTF8 encoding is not supported ";
}
return null;
} | java | {
"resource": ""
} |
q162035 | DFSUtil.bytes2byteArray | train | public static byte[][] bytes2byteArray(byte[] bytes, int len, byte separator) {
assert len <= bytes.length;
int splits = 0;
if (len == 0) {
return new byte[][] { null };
}
// Count the splits. Omit multiple separators and the last one
for (int i = 0; i < len; i++) {
if (bytes[i] == separator) {
splits++;
}
}
int last = len - 1;
while (last > -1 && bytes[last--] == separator) {
splits--;
}
if (splits == 0 && bytes[0] == separator) {
return new byte[][] { new byte[0] };
}
splits++;
byte[][] result = new byte[splits][];
int startIndex = 0;
int nextIndex = 0;
int index = 0;
// Build the splits
while (index < splits) {
while (nextIndex < len && bytes[nextIndex] != separator) {
nextIndex++;
}
result[index] = new byte[nextIndex - startIndex];
System.arraycopy(bytes, startIndex, result[index], 0, nextIndex
- startIndex);
index++;
startIndex = nextIndex + 1;
nextIndex = startIndex;
}
return result;
} | java | {
"resource": ""
} |
q162036 | DFSUtil.extractBytes | train | private static byte[] extractBytes(
String str,
int startIndex,
int endIndex,
char[] charArray,
boolean canFastConvert) throws UnsupportedEncodingException {
if (canFastConvert) {
// fast conversion, just copy the raw bytes
final int len = endIndex - startIndex;
byte[] strBytes = new byte[len];
for (int i = 0; i < len; i++) {
strBytes[i] = (byte) charArray[startIndex + i];
}
return strBytes;
}
// otherwise, do expensive conversion
return str.substring(startIndex, endIndex).getBytes(utf8charsetName);
} | java | {
"resource": ""
} |
q162037 | DFSUtil.getConfValue | train | private static String getConfValue(String defaultValue, String keySuffix,
Configuration conf, String... keys) {
String value = null;
for (String key : keys) {
if (keySuffix != null) {
key += "." + keySuffix;
}
value = conf.get(key);
if (value != null) {
break;
}
}
if (value == null) {
value = defaultValue;
}
return value;
} | java | {
"resource": ""
} |
q162038 | DFSUtil.getAddresses | train | public static List<InetSocketAddress> getAddresses(Configuration conf,
String defaultAddress, String... keys) {
return getAddresses(conf, getNameServiceIds(conf), defaultAddress, keys);
} | java | {
"resource": ""
} |
q162039 | DFSUtil.getRPCAddresses | train | public static List<InetSocketAddress> getRPCAddresses(String suffix,
Configuration conf, Collection<String> serviceIds, String... keys)
throws IOException {
// Use default address as fall back
String defaultAddress = null;
try {
defaultAddress = conf.get(FileSystem.FS_DEFAULT_NAME_KEY + suffix);
if (defaultAddress != null) {
defaultAddress = NameNode.getDefaultAddress(conf);
}
} catch (IllegalArgumentException e) {
defaultAddress = null;
}
for (int i = 0; i < keys.length; i++) {
keys[i] += suffix;
}
List<InetSocketAddress> addressList = DFSUtil.getAddresses(conf,
serviceIds, defaultAddress,
keys);
if (addressList == null) {
String keyStr = "";
for (String key: keys) {
keyStr += key + " ";
}
throw new IOException("Incorrect configuration: namenode address "
+ keyStr
+ " is not configured.");
}
return addressList;
} | java | {
"resource": ""
} |
q162040 | DFSUtil.setGenericConf | train | public static String[] setGenericConf(String[] argv, Configuration conf) {
String[] serviceId = new String[1];
serviceId[0] = "";
String[] filteredArgv = getServiceName(argv, serviceId);
if (!serviceId[0].equals("")) {
if (!NameNode.validateServiceName(conf, serviceId[0])) {
throw new IllegalArgumentException("Service Id doesn't match the config");
}
setGenericConf(conf, serviceId[0], NameNode.NAMESERVICE_SPECIFIC_KEYS);
NameNode.setupDefaultURI(conf);
}
return filteredArgv;
} | java | {
"resource": ""
} |
q162041 | DFSUtil.getServiceName | train | public static String[] getServiceName(String[] argv, String[] serviceId)
throws IllegalArgumentException {
ArrayList<String> newArgvList = new ArrayList<String>();
for (int i = 0; i < argv.length; i++) {
if ("-service".equals(argv[i])) {
if (i+1 == argv.length ) {
throw new IllegalArgumentException("Doesn't have service id");
}
serviceId[0] = argv[++i];
} else {
newArgvList.add(argv[i]);
}
}
String[] newArgvs = new String[newArgvList.size()];
newArgvList.toArray(newArgvs);
return newArgvs;
} | java | {
"resource": ""
} |
q162042 | DFSUtil.getAddresses | train | public static List<InetSocketAddress> getAddresses(Configuration conf,
Collection<String> serviceIds, String defaultAddress, String... keys) {
Collection<String> nameserviceIds = getNameServiceIds(conf);
List<InetSocketAddress> isas = new ArrayList<InetSocketAddress>();
// Configuration with a single namenode
if (nameserviceIds == null || nameserviceIds.isEmpty()) {
String address = getConfValue(defaultAddress, null, conf, keys);
if (address == null) {
return null;
}
isas.add(NetUtils.createSocketAddr(address));
} else {
// Get the namenodes for all the configured nameServiceIds
for (String nameserviceId : nameserviceIds) {
String address = getConfValue(null, nameserviceId, conf, keys);
if (address == null) {
return null;
}
isas.add(NetUtils.createSocketAddr(address));
}
}
return isas;
} | java | {
"resource": ""
} |
q162043 | DFSUtil.getClientRpcAddresses | train | public static List<InetSocketAddress> getClientRpcAddresses(
Configuration conf, Collection<String> suffixes) throws IOException {
List<InetSocketAddress> addressList;
if(suffixes != null && !suffixes.isEmpty()){
addressList = new ArrayList<InetSocketAddress>();
for (String s : suffixes) {
addressList.addAll(getRPCAddresses(s, conf, getNameServiceIds(conf),
FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY));
}
} else {
// Use default address as fall back
String defaultAddress;
try {
defaultAddress = NameNode.getDefaultAddress(conf);
} catch (IllegalArgumentException e) {
defaultAddress = null;
}
addressList = getAddresses(conf, defaultAddress,
FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY);
}
if (addressList == null || addressList.isEmpty()) {
throw new IOException("Incorrect configuration: namenode address "
+ FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY
+ " is not configured.");
}
return addressList;
} | java | {
"resource": ""
} |
q162044 | DFSUtil.getNNServiceRpcAddresses | train | public static List<InetSocketAddress> getNNServiceRpcAddresses(
Configuration conf) throws IOException {
// Use default address as fall back
String defaultAddress;
try {
defaultAddress = NameNode.getDefaultAddress(conf);
} catch (IllegalArgumentException e) {
defaultAddress = null;
}
List<InetSocketAddress> addressList = getAddresses(conf, defaultAddress,
NameNode.DATANODE_PROTOCOL_ADDRESS, FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY);
if (addressList == null) {
throw new IOException("Incorrect configuration: namenode address "
+ NameNode.DATANODE_PROTOCOL_ADDRESS + " or "
+ FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY
+ " is not configured.");
}
return addressList;
} | java | {
"resource": ""
} |
q162045 | DFSUtil.getInfoServer | train | public static String getInfoServer(
InetSocketAddress namenode, Configuration conf, boolean isAvatar) {
String httpAddressDefault =
NetUtils.getServerAddress(conf, "dfs.info.bindAddress",
"dfs.info.port", "dfs.http.address");
String httpAddress = null;
if(namenode != null) {
if (!isAvatar) {
// if non-default namenode, try reverse look up
// the nameServiceID if it is available
String nameServiceId = DFSUtil.getNameServiceIdFromAddress(
conf, namenode,
FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY);
if (nameServiceId != null) {
httpAddress = conf.get(DFSUtil.getNameServiceIdKey(
FSConstants.DFS_NAMENODE_HTTP_ADDRESS_KEY, nameServiceId));
}
} else {
// federated, avatar addresses
String suffix = "0";
String nameServiceId = DFSUtil.getNameServiceIdFromAddress(
conf, namenode,
FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY + "0");
if (nameServiceId == null) {
nameServiceId = DFSUtil.getNameServiceIdFromAddress(
conf, namenode,
FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY + "1");
suffix = "1";
}
if (nameServiceId != null) {
httpAddress = conf.get(DFSUtil.getNameServiceIdKey(
FSConstants.DFS_NAMENODE_HTTP_ADDRESS_KEY + suffix, nameServiceId));
}
// federated, avatar addresses - ok
if (httpAddress != null) {
return httpAddress;
}
// non-federated, avatar adresses
httpAddress = getNonFederatedAvatarInfoServer(namenode, "0", conf);
if (httpAddress != null) {
return httpAddress;
}
httpAddress = getNonFederatedAvatarInfoServer(namenode, "1", conf);
}
}
// else - Use non-federation, non-avatar configuration
if (httpAddress == null) {
httpAddress = conf.get(FSConstants.DFS_NAMENODE_HTTP_ADDRESS_KEY, httpAddressDefault);
}
return httpAddress;
} | java | {
"resource": ""
} |
q162046 | DFSUtil.isDefaultNamenodeAddress | train | public static boolean isDefaultNamenodeAddress(Configuration conf,
InetSocketAddress address, String... keys) {
for (String key : keys) {
String candidateAddress = conf.get(key);
if (candidateAddress != null
&& address.equals(NetUtils.createSocketAddr(candidateAddress)))
return true;
}
return false;
} | java | {
"resource": ""
} |
q162047 | DFSUtil.setGenericConf | train | public static void setGenericConf(Configuration conf,
String nameserviceId, String... keys) {
for (String key : keys) {
String value = conf.get(getNameServiceIdKey(key, nameserviceId));
if (value != null) {
conf.set(key, value);
}
}
} | java | {
"resource": ""
} |
q162048 | NativeCrc32.verifyChunkedSums | train | public static void verifyChunkedSums(int bytesPerSum, int checksumType,
ByteBuffer sums, ByteBuffer data, String fileName, long basePos)
throws ChecksumException {
nativeVerifyChunkedSums(bytesPerSum, checksumType,
sums, sums.position(),
data, data.position(), data.remaining(),
fileName, basePos);
} | java | {
"resource": ""
} |
q162049 | ShortWritable.compareTo | train | @Override
public int compareTo(ShortWritable o) {
short thisValue = this.value;
short thatValue = (o).value;
return (thisValue < thatValue ? -1 : (thisValue == thatValue ? 0 : 1));
} | java | {
"resource": ""
} |
q162050 | AbstractMapWritable.addToMap | train | private synchronized void addToMap(Class clazz, byte id) {
if (classToIdMap.containsKey(clazz)) {
byte b = classToIdMap.get(clazz);
if (b != id) {
throw new IllegalArgumentException ("Class " + clazz.getName() +
" already registered but maps to " + b + " and not " + id);
}
}
if (idToClassMap.containsKey(id)) {
Class c = idToClassMap.get(id);
if (!c.equals(clazz)) {
throw new IllegalArgumentException("Id " + id + " exists but maps to " +
c.getName() + " and not " + clazz.getName());
}
}
classToIdMap.put(clazz, id);
idToClassMap.put(id, clazz);
} | java | {
"resource": ""
} |
q162051 | AbstractMapWritable.addToMap | train | protected synchronized void addToMap(Class clazz) {
if (classToIdMap.containsKey(clazz)) {
return;
}
if (newClasses + 1 > Byte.MAX_VALUE) {
throw new IndexOutOfBoundsException("adding an additional class would" +
" exceed the maximum number allowed");
}
byte id = ++newClasses;
addToMap(clazz, id);
} | java | {
"resource": ""
} |
q162052 | AbstractMapWritable.copy | train | protected synchronized void copy(Writable other) {
if (other != null) {
try {
DataOutputBuffer out = new DataOutputBuffer();
other.write(out);
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
readFields(in);
} catch (IOException e) {
throw new IllegalArgumentException("map cannot be copied: " +
e.getMessage());
}
} else {
throw new IllegalArgumentException("source map cannot be null");
}
} | java | {
"resource": ""
} |
q162053 | UpgradeObjectDatanode.preUpgradeAction | train | boolean preUpgradeAction(NamespaceInfo nsInfo) throws IOException {
int nsUpgradeVersion = nsInfo.getDistributedUpgradeVersion();
if(nsUpgradeVersion >= getVersion())
return false; // name-node will perform the upgrade
// Missed the upgrade. Report problem to the name-node and throw exception
String errorMsg =
"\n Data-node missed a distributed upgrade and will shutdown."
+ "\n " + getDescription() + "."
+ " Name-node version = " + nsInfo.getLayoutVersion() + ".";
DataNode.LOG.fatal( errorMsg );
try {
dataNode.getNSNamenode(nsInfo.getNamespaceID()).errorReport(
dataNode.getDNRegistrationForNS(nsInfo.getNamespaceID()),
DatanodeProtocol.NOTIFY, errorMsg);
} catch(SocketTimeoutException e) { // namenode is busy
DataNode.LOG.info("Problem connecting to server: "
+ dataNode.getNSNamenode(nsInfo.getNamespaceID()).toString());
}
throw new IOException(errorMsg);
} | java | {
"resource": ""
} |
q162054 | BlocksMap.removeBlockFromMap | train | private BlockInfo removeBlockFromMap(Block b) {
if (b == null) {
return null;
}
ns.decrementSafeBlockCountForBlockRemoval(b);
return blocks.remove(b);
} | java | {
"resource": ""
} |
q162055 | BlocksMap.addINode | train | BlockInfo addINode(Block b, INodeFile iNode, short replication) {
BlockInfo info = checkBlockInfo(b, replication);
info.inode = iNode;
return info;
} | java | {
"resource": ""
} |
q162056 | BlocksMap.addINodeForLoading | train | BlockInfo addINodeForLoading(Block b, INodeFile iNode) {
// allocate new block when loading the image
// for hardlinked files, we need to check if the blocks are already there
BlockInfo info = checkBlockInfo(b, iNode.getReplication(),
iNode.isHardlinkFile());
info.inode = iNode;
return info;
} | java | {
"resource": ""
} |
q162057 | BlocksMap.updateINode | train | public BlockInfo updateINode(BlockInfo oldBlock, Block newBlock, INodeFile iNode,
short replication, boolean forceUpdate) throws IOException {
// If the old block is not same as the new block, probably the GS was
// bumped up, hence update the block with new GS/size.
// If forceUpdate is true, we will always remove the old block and
// update with new block, it's used by raid
List<DatanodeDescriptor> locations = null;
if (oldBlock != null && (!oldBlock.equals(newBlock) || forceUpdate)) {
if (oldBlock.getBlockId() != newBlock.getBlockId()) {
throw new IOException("block ids don't match : " + oldBlock + ", "
+ newBlock);
}
if (forceUpdate) {
// save locations of the old block
locations = new ArrayList<DatanodeDescriptor>();
for (int i=0; i<oldBlock.numNodes(); i++) {
locations.add(oldBlock.getDatanode(i));
}
} else {
if (!iNode.isUnderConstruction()) {
throw new IOException(
"Try to update generation of a finalized block old block: "
+ oldBlock + ", new block: " + newBlock);
}
}
removeBlock(oldBlock);
}
BlockInfo info = checkBlockInfo(newBlock, replication);
info.set(newBlock.getBlockId(), newBlock.getNumBytes(), newBlock.getGenerationStamp());
info.inode = iNode;
if (locations != null) {
// add back the locations if needed
if (locations != null) {
for (DatanodeDescriptor d : locations) {
d.addBlock(info);
}
}
}
return info;
} | java | {
"resource": ""
} |
q162058 | BlocksMap.removeINode | train | void removeINode(Block b) {
BlockInfo info = blocks.get(b);
if (info != null) {
info.inode = null;
if (info.getDatanode(0) == null) { // no datanodes left
removeBlockFromMap(b); // remove block from the map
}
}
} | java | {
"resource": ""
} |
q162059 | BlocksMap.removeBlock | train | void removeBlock(Block block) {
BlockInfo blockInfo = removeBlockFromMap(block);
if (blockInfo == null)
return;
blockInfo.inode = null;
for(int idx = blockInfo.numNodes()-1; idx >= 0; idx--) {
DatanodeDescriptor dn = blockInfo.getDatanode(idx);
dn.removeBlock(blockInfo); // remove from the list and wipe the location
}
} | java | {
"resource": ""
} |
q162060 | BlocksMap.numNodes | train | int numNodes(Block b) {
BlockInfo info = blocks.get(b);
return info == null ? 0 : info.numNodes();
} | java | {
"resource": ""
} |
q162061 | BlocksMap.addNode | train | boolean addNode(Block b, DatanodeDescriptor node, int replication) {
// insert into the map if not there yet
BlockInfo info = checkBlockInfo(b, replication);
// add block to the data-node list and the node to the block info
return node.addBlock(info);
} | java | {
"resource": ""
} |
q162062 | BlocksMap.removeNode | train | boolean removeNode(Block b, DatanodeDescriptor node) {
BlockInfo info = blocks.get(b);
if (info == null)
return false;
// remove block from the data-node list and the node from the block info
boolean removed = node.removeBlock(info);
if (info.getDatanode(0) == null // no datanodes left
&& info.inode == null) { // does not belong to a file
removeBlockFromMap(b); // remove block from the map
}
return removed;
} | java | {
"resource": ""
} |
q162063 | BlocksMap.getBlocksIterarors | train | List<Iterator<BlockInfo>> getBlocksIterarors(int numShards) {
List<Iterator<BlockInfo>> iterators = new ArrayList<Iterator<BlockInfo>>();
if (numShards <= 0) {
throw new IllegalArgumentException("Number of shards must be greater than 0");
}
for (int i = 0; i < numShards; i++) {
Iterator<BlockInfo> iterator = blocks.shardIterator(i, numShards);
if (iterator != null) {
iterators.add(iterator);
}
}
return iterators;
} | java | {
"resource": ""
} |
q162064 | BlocksMap.contains | train | boolean contains(Block block, DatanodeDescriptor datanode) {
BlockInfo info = blocks.get(block);
if (info == null)
return false;
if (-1 == info.findDatanode(datanode))
return false;
return true;
} | java | {
"resource": ""
} |
q162065 | FSImagePreTransactionalStorageInspector.readCheckpointTime | train | static long readCheckpointTime(StorageDirectory sd) throws IOException {
File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME);
long timeStamp = 0L;
if (timeFile.exists() && timeFile.canRead()) {
DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
try {
timeStamp = in.readLong();
} finally {
IOUtils.cleanup(LOG, in);
}
}
return timeStamp;
} | java | {
"resource": ""
} |
q162066 | LimitTasksPerJobTaskScheduler.getMaxMapAndReduceLoad | train | protected synchronized int[] getMaxMapAndReduceLoad(int localMaxMapLoad,
int localMaxReduceLoad) {
// Approximate because of concurrency
final int numTaskTrackers =
taskTrackerManager.getClusterStatus().getTaskTrackers();
/* Hold the result */
int maxMapLoad = 0;
int maxReduceLoad = 0;
int neededMaps = 0;
int neededReduces = 0;
Collection<JobInProgress> jobQueue =
jobQueueJobInProgressListener.getJobQueue();
synchronized (jobQueue) {
for (JobInProgress job : jobQueue) {
if (job.getStatus().getRunState() == JobStatus.RUNNING) {
neededMaps += job.desiredMaps() - job.finishedMaps();
neededReduces += job.desiredReduces() - job.finishedReduces();
}
}
}
if (numTaskTrackers > 0) {
maxMapLoad = Math.min(localMaxMapLoad, (int) Math
.ceil((double) neededMaps / numTaskTrackers));
maxReduceLoad = Math.min(localMaxReduceLoad, (int) Math
.ceil((double) neededReduces / numTaskTrackers));
}
return new int[] { maxMapLoad, maxReduceLoad };
} | java | {
"resource": ""
} |
q162067 | MetricsRegistry.add | train | public void add(final String metricsName, final MetricsBase theMetricsObj) {
if (metricsList.putIfAbsent(metricsName, theMetricsObj) != null) {
throw new IllegalArgumentException("Duplicate metricsName:" + metricsName);
}
} | java | {
"resource": ""
} |
q162068 | PiEstimator.main | train | public static void main(String[] argv) throws Exception {
System.exit(ToolRunner.run(null, new PiEstimator(), argv));
} | java | {
"resource": ""
} |
q162069 | FileStatus.compareTo | train | public int compareTo(Object o) {
FileStatus other = (FileStatus)o;
return this.getPath().compareTo(other.getPath());
} | java | {
"resource": ""
} |
q162070 | JobID.compareTo | train | @Override
public int compareTo(ID o) {
JobID that = (JobID)o;
int jtComp = this.jtIdentifier.compareTo(that.jtIdentifier);
if(jtComp == 0) {
return this.id - that.id;
}
else return jtComp;
} | java | {
"resource": ""
} |
q162071 | JobID.appendTo | train | public StringBuilder appendTo(StringBuilder builder) {
builder.append(SEPARATOR);
builder.append(jtIdentifier);
builder.append(SEPARATOR);
builder.append(idFormat.format(id));
return builder;
} | java | {
"resource": ""
} |
q162072 | JspHelper.bestNode | train | public DatanodeInfo[] bestNode(LocatedBlocks blks) throws IOException {
// insert all known replica locations into a tree map where the
// key is the DatanodeInfo
TreeMap<DatanodeInfo, NodeRecord> map =
new TreeMap<DatanodeInfo, NodeRecord>();
for (int i = 0; i < blks.getLocatedBlocks().size(); i++) {
DatanodeInfo [] nodes = blks.get(i).getLocations();
for (int j = 0; j < nodes.length; j++) {
NodeRecord obj = map.get(nodes[j]);
if (obj != null) {
obj.frequency++;
} else {
map.put(nodes[j], new NodeRecord(nodes[j], 1));
}
}
}
// sort all locations by their frequency of occurance
Collection<NodeRecord> values = map.values();
NodeRecord[] nodes = (NodeRecord[])
values.toArray(new NodeRecord[values.size()]);
Arrays.sort(nodes, new NodeRecordComparator());
try {
List<NodeRecord> candidates = bestNode(nodes, false);
return candidates.toArray(new DatanodeInfo[candidates.size()]);
} catch (IOException e) {
return new DatanodeInfo[] {randomNode()};
}
} | java | {
"resource": ""
} |
q162073 | JspHelper.bestNode | train | public static DatanodeInfo bestNode(LocatedBlock blk) throws IOException {
DatanodeInfo [] nodes = blk.getLocations();
return bestNode(nodes, true).get(0);
} | java | {
"resource": ""
} |
q162074 | JspHelper.bestNode | train | public static <T extends DatanodeID> List<T> bestNode(T[] nodes, boolean doRandom)
throws IOException {
TreeSet<T> deadNodes = new TreeSet<T>();
T chosenNode = null;
int failures = 0;
Socket s = null;
int index = -1;
if (nodes == null || nodes.length == 0) {
throw new IOException("No nodes contain this block");
}
while (s == null) {
if (chosenNode == null) {
do {
if (doRandom) {
index = rand.nextInt(nodes.length);
} else {
index++;
}
chosenNode = nodes[index];
} while (deadNodes.contains(chosenNode));
}
chosenNode = nodes[index];
//just ping to check whether the node is alive
InetSocketAddress targetAddr = NetUtils.createSocketAddr(
chosenNode.getHost() + ":" + chosenNode.getInfoPort());
try {
s = new Socket();
s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
} catch (IOException e) {
HttpServer.LOG.warn("Failed to connect to "+chosenNode.name, e);
deadNodes.add(chosenNode);
s.close();
s = null;
failures++;
} finally {
if (s!=null) {
s.close();
}
}
if (failures == nodes.length)
throw new IOException("Could not reach the block containing the data. Please try again");
}
List<T> candidates;
if (doRandom) {
candidates = new ArrayList<T>(1);
candidates.add(chosenNode);
} else {
candidates = new ArrayList<T>(nodes.length - index);
for (int i=index; i<nodes.length - index; i++) {
candidates.add(nodes[i]);
}
}
return candidates;
} | java | {
"resource": ""
} |
q162075 | JspHelper.getUrlParam | train | public static String getUrlParam(String name, String val) {
return val == null ? "" : "&" + name + "=" + val;
} | java | {
"resource": ""
} |
q162076 | JspHelper.getDFSClient | train | public static DFSClient getDFSClient(final HttpServletRequest request,
final Configuration conf) throws IOException, InterruptedException {
final String nnAddr = request.getParameter(JspHelper.NAMENODE_ADDRESS);
return new DFSClient(DFSUtil.getSocketAddress(nnAddr), conf);
} | java | {
"resource": ""
} |
q162077 | IdentityReducer.reduce | train | public void reduce(K key, Iterator<V> values,
OutputCollector<K, V> output, Reporter reporter)
throws IOException {
while (values.hasNext()) {
output.collect(key, values.next());
}
} | java | {
"resource": ""
} |
q162078 | CoronaAdmin.restartTaskTracker | train | private int restartTaskTracker(boolean forceFlag, int batchSize) throws IOException {
// Get the current configuration
CoronaConf conf = new CoronaConf(getConf());
InetSocketAddress address = NetUtils.createSocketAddr(conf
.getClusterManagerAddress());
TFramedTransport transport = new TFramedTransport(
new TSocket(address.getHostName(), address.getPort()));
ClusterManagerService.Client client = new ClusterManagerService.Client(
new TBinaryProtocol(transport));
int restartBatch = (batchSize > 0) ? batchSize :
conf.getCoronaNodeRestartBatch();
try {
transport.open();
RestartNodesArgs restartNodeArgs = new RestartNodesArgs(
forceFlag, restartBatch);
client.restartNodes(restartNodeArgs);
} catch (SafeModeException e) {
System.err.println("ClusterManager is in Safe Mode");
} catch (TException e) {
throw new IOException(e);
}
return 0;
} | java | {
"resource": ""
} |
q162079 | CoronaAdmin.setSafeMode | train | private int setSafeMode(boolean safeMode) throws IOException {
// Get the current configuration
CoronaConf conf = new CoronaConf(getConf());
InetSocketAddress address = NetUtils.createSocketAddr(conf
.getClusterManagerAddress());
TFramedTransport transport = new TFramedTransport(
new TSocket(address.getHostName(), address.getPort()));
ClusterManagerService.Client client = new ClusterManagerService.Client(
new TBinaryProtocol(transport));
try {
transport.open();
if (client.setSafeMode(safeMode)) {
System.out.println("The safeMode is: " +
(safeMode ? "ON" : "OFF"));
} else {
System.err.println("Could not set the safeMode flag");
}
} catch (TException e) {
throw new IOException(e);
}
return 0;
} | java | {
"resource": ""
} |
q162080 | CoronaAdmin.forceSetSafeModeOnPJT | train | private int forceSetSafeModeOnPJT(boolean safeMode) throws IOException {
CoronaConf conf = new CoronaConf(getConf());
try {
ClusterManagerAvailabilityChecker.getPJTClient(conf).
setClusterManagerSafeModeFlag(safeMode);
} catch (IOException e) {
System.err.println("Could not set the Safe Mode flag on the PJT: " + e);
} catch (TException e) {
System.err.println("Could not set the Safe Mode flag on the PJT: " + e);
}
return 0;
} | java | {
"resource": ""
} |
q162081 | CoronaAdmin.main | train | public static void main(String[] args) throws Exception {
int result = ToolRunner.run(new CoronaAdmin(), args);
System.exit(result);
} | java | {
"resource": ""
} |
q162082 | ProxyJobTracker.urlInJobHistory | train | public static String urlInJobHistory(
Path jobHistoryFileLocation, String jobId)
throws IOException {
try {
FileSystem fs = jobHistoryFileLocation.getFileSystem(conf);
fs.getFileStatus(jobHistoryFileLocation);
} catch (FileNotFoundException e) {
return null;
}
return "http://" + LOCALMACHINE + ":" + LOCALPORT +
"/coronajobdetailshistory.jsp?jobid=" + jobId +
"&logFile=" + URLEncoder.encode(jobHistoryFileLocation.toString());
} | java | {
"resource": ""
} |
q162083 | ValueHistogram.addNextValue | train | public void addNextValue(Object val) {
String valCountStr = val.toString();
int pos = valCountStr.lastIndexOf("\t");
String valStr = valCountStr;
String countStr = "1";
if (pos >= 0) {
valStr = valCountStr.substring(0, pos);
countStr = valCountStr.substring(pos + 1);
}
Long count = (Long) this.items.get(valStr);
long inc = Long.parseLong(countStr);
if (count == null) {
count = inc;
} else {
count = count.longValue() + inc;
}
items.put(valStr, count);
} | java | {
"resource": ""
} |
q162084 | DataGenerator.run | train | public int run(String[] args) throws Exception {
int exitCode = 0;
exitCode = init(args);
if (exitCode != 0) {
return exitCode;
}
// genDirStructure();
genFiles();
return exitCode;
} | java | {
"resource": ""
} |
q162085 | DataGenerator.genDirStructure | train | @SuppressWarnings("unused")
private void genDirStructure() throws IOException {
BufferedReader in = new BufferedReader(new FileReader(new File(inDir,
StructureGenerator.DIR_STRUCTURE_FILE_NAME)));
String line;
while ((line = in.readLine()) != null) {
fs.mkdirs(new Path(root + line));
}
} | java | {
"resource": ""
} |
q162086 | DataGenerator.genFiles | train | private void genFiles() throws IOException {
//
// BufferedReader in = new BufferedReader(new FileReader(new File(inDir,
// StructureGenerator.FILE_STRUCTURE_FILE_NAME)));
// String line;
// while ((line = in.readLine()) != null) {
// String[] tokens = line.split(" ");
// if (tokens.length != 2) {
// throw new IOException("Expect at most 2 tokens per line: "
// + line);
// }
// String fileName = root + tokens[0];
// long fileSize = (long) (BLOCK_SIZE * Double.parseDouble(tokens[1]));
// genFile(new Path(fileName), fileSize);
// }
config = new Configuration(getConf());
config.setInt("dfs.replication", 3);
config.set("dfs.rootdir", root.toString());
JobConf job = new JobConf(config, DataGenerator.class);
job.setJobName("data-genarator");
FileOutputFormat.setOutputPath(job, new Path("data-generator-result"));
// create the input for the map-reduce job
Path inputPath = new Path(ROOT + "load_input");
fs.mkdirs(inputPath);
fs.copyFromLocalFile(new Path(inDir + "/"
+ StructureGenerator.FILE_STRUCTURE_FILE_NAME), inputPath);
FileInputFormat.setInputPaths(job, new Path(ROOT + "load_input"));
job.setInputFormat(TextInputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setMapperClass(CreateFiles.class);
job.setNumMapTasks(nFiles/10);
job.setNumReduceTasks(0);
JobClient.runJob(job);
} | java | {
"resource": ""
} |
q162087 | SimulatorEngine.startTaskTrackers | train | void startTaskTrackers(ClusterStory clusterStory, long now) {
/** port assigned to TTs, incremented by 1 for each TT */
int port = 10000;
long ms = now + 100;
for (MachineNode node : clusterStory.getMachines()) {
String hostname = node.getName();
RackNode rackNode = node.getRackNode();
StaticMapping.addNodeToRack(hostname, rackNode.getName());
String taskTrackerName = "tracker_" + hostname + ":localhost/127.0.0.1:"
+ port;
port++;
SimulatorTaskTracker tt = new SimulatorTaskTracker(jt, taskTrackerName,
hostname, node.getMapSlots(), node.getReduceSlots());
queue.addAll(tt.init(ms++));
}
} | java | {
"resource": ""
} |
q162088 | SimulatorEngine.init | train | @SuppressWarnings("deprecation")
void init() throws InterruptedException, IOException {
long now = System.currentTimeMillis();
JobConf jobConf = new JobConf(getConf());
jobConf.setClass("topology.node.switch.mapping.impl",
StaticMapping.class, DNSToSwitchMapping.class);
jobConf.set("fs.default.name", "file:///");
jobConf.set("mapred.job.tracker", "localhost:8012");
jobConf.setInt("mapred.jobtracker.job.history.block.size", 512);
jobConf.setInt("mapred.jobtracker.job.history.buffer.size", 512);
jobConf.setLong("mapred.tasktracker.expiry.interval", 5000);
jobConf.setInt("mapred.reduce.copy.backoff", 4);
jobConf.setLong("mapred.job.reuse.jvm.num.tasks", -1);
jobConf.setUser("mumak");
jobConf.set("mapred.system.dir",
jobConf.get("hadoop.log.dir", "/tmp/hadoop-"+jobConf.getUser()) + "/mapred/system");
jobConf.set("mapred.jobtracker.taskScheduler", JobQueueTaskScheduler.class.getName());
FileSystem lfs = FileSystem.getLocal(getConf());
Path logPath =
new Path(System.getProperty("hadoop.log.dir")).makeQualified(lfs);
jobConf.set("mapred.system.dir", logPath.toString());
jobConf.set("hadoop.job.history.location", (new Path(logPath, "history")
.toString()));
jt = SimulatorJobTracker.startTracker(jobConf, now, this);
jt.offerService();
// max Map/Reduce tasks per node
int maxMaps = getConf().getInt("mapred.tasktracker.map.tasks.maximum",
DEFAULT_MAP_SLOTS_PER_NODE);
int maxReduces = getConf().getInt(
"mapred.tasktracker.reduce.tasks.maximum",
DEFAULT_REDUCE_SLOTS_PER_NODE);
MachineNode defaultNode = new MachineNode.Builder("default", 2)
.setMapSlots(maxMaps).setReduceSlots(maxReduces).build();
ZombieCluster cluster = new ZombieCluster(new Path(topologyFile),
defaultNode, jobConf);
long firstJobStartTime = now + 60000;
JobStoryProducer jobStoryProducer = new SimulatorJobStoryProducer(
new Path(traceFile), cluster, firstJobStartTime, jobConf);
jc = new SimulatorJobClient(jt, jobStoryProducer);
queue.addAll(jc.init(firstJobStartTime));
// create TTs based on topology.json
startTaskTrackers(cluster, now);
terminateTime = getConf().getLong("mumak.terminate.time", Long.MAX_VALUE);
if (terminateTime <= 0) {
throw new IllegalArgumentException("Terminate time must be positive: "
+ terminateTime);
}
} | java | {
"resource": ""
} |
q162089 | JenkinsHash.main | train | public static void main(String[] args) throws IOException {
if (args.length != 1) {
System.err.println("Usage: JenkinsHash filename");
System.exit(-1);
}
FileInputStream in = new FileInputStream(args[0]);
byte[] bytes = new byte[512];
int value = 0;
JenkinsHash hash = new JenkinsHash();
for (int length = in.read(bytes); length > 0 ; length = in.read(bytes)) {
value = hash.hash(bytes, length, value);
}
System.out.println(Math.abs(value));
} | java | {
"resource": ""
} |
q162090 | TaskStatus.setStartTime | train | void setStartTime(long startTime) {
//Making the assumption of passed startTime to be a positive
//long value explicit.
if (startTime > 0) {
this.startTime = startTime;
} else {
//Using String utils to get the stack trace.
LOG.error("Trying to set illegal startTime for task : " + taskid +
".Stack trace is : " +
StringUtils.stringifyException(new Exception()));
}
} | java | {
"resource": ""
} |
q162091 | TaskStatus.setPhase | train | void setPhase(Phase phase){
TaskStatus.Phase oldPhase = getPhase();
if (oldPhase != phase){
// sort phase started
if (phase == TaskStatus.Phase.SORT){
setShuffleFinishTime(JobTracker.getClock().getTime());
} else if (phase == TaskStatus.Phase.REDUCE){
setSortFinishTime(JobTracker.getClock().getTime());
}
}
this.phase = phase;
} | java | {
"resource": ""
} |
q162092 | TaskStatus.statusUpdate | train | synchronized void statusUpdate(State runState,
float progress,
String state,
Phase phase,
long finishTime) {
setRunState(runState);
setProgress(progress);
setStateString(state);
setPhase(phase);
if (finishTime > 0) {
setFinishTime(finishTime);
}
} | java | {
"resource": ""
} |
q162093 | MD5FileUtils.verifySavedMD5 | train | public static void verifySavedMD5(File dataFile, MD5Hash expectedMD5)
throws IOException {
MD5Hash storedHash = readStoredMd5ForFile(dataFile);
// Check the hash itself
if (!expectedMD5.equals(storedHash)) {
throw new IOException(
"File " + dataFile + " did not match stored MD5 checksum " +
" (stored: " + storedHash + ", computed: " + expectedMD5);
}
} | java | {
"resource": ""
} |
q162094 | MD5FileUtils.readStoredMd5ForFile | train | public static MD5Hash readStoredMd5ForFile(File dataFile) throws IOException {
File md5File = getDigestFileForFile(dataFile);
String md5Line;
if (!md5File.exists()) {
return null;
}
BufferedReader reader =
new BufferedReader(new FileReader(md5File));
try {
md5Line = reader.readLine();
if (md5Line == null) { md5Line = ""; }
md5Line = md5Line.trim();
} catch (IOException ioe) {
throw new IOException("Error reading md5 file at " + md5File, ioe);
} finally {
IOUtils.cleanup(LOG, reader);
}
Matcher matcher = LINE_REGEX.matcher(md5Line);
if (!matcher.matches()) {
throw new IOException("Invalid MD5 file at " + md5File
+ " (does not match expected pattern)");
}
String storedHash = matcher.group(1);
File referencedFile = new File(matcher.group(2));
// Sanity check: Make sure that the file referenced in the .md5 file at
// least has the same name as the file we expect
if (!referencedFile.getName().equals(dataFile.getName())) {
throw new IOException(
"MD5 file at " + md5File + " references file named " +
referencedFile.getName() + " but we expected it to reference " +
dataFile);
}
return new MD5Hash(storedHash);
} | java | {
"resource": ""
} |
q162095 | MD5FileUtils.computeMd5ForFile | train | public static MD5Hash computeMd5ForFile(File dataFile) throws IOException {
InputStream in = new FileInputStream(dataFile);
try {
MessageDigest digester = MD5Hash.getDigester();
DigestInputStream dis = new DigestInputStream(in, digester);
IOUtils.copyBytes(dis, new IOUtils.NullOutputStream(), 128*1024, false);
return new MD5Hash(digester.digest());
} finally {
IOUtils.closeStream(in);
}
} | java | {
"resource": ""
} |
q162096 | MD5FileUtils.saveMD5File | train | public static void saveMD5File(File dataFile, MD5Hash digest)
throws IOException {
File md5File = getDigestFileForFile(dataFile);
String digestString = StringUtils.byteToHexString(
digest.getDigest());
String md5Line = digestString + " *" + dataFile.getName() + "\n";
AtomicFileOutputStream afos = new AtomicFileOutputStream(md5File);
afos.write(md5Line.getBytes());
afos.close();
LOG.info("Saved MD5 " + digest + " to " + md5File);
} | java | {
"resource": ""
} |
q162097 | VIntWritable.compareTo | train | public int compareTo(Object o) {
int thisValue = this.value;
int thatValue = ((VIntWritable)o).value;
return (thisValue < thatValue ? -1 : (thisValue == thatValue ? 0 : 1));
} | java | {
"resource": ""
} |
q162098 | StatisticsEditsVisitor.incrementOpCodeCount | train | private void incrementOpCodeCount(Byte opCode) {
if(!opCodeCount.containsKey(opCode)) {
opCodeCount.put(opCode, 0L);
}
Long newValue = opCodeCount.get(opCode) + 1;
opCodeCount.put(opCode, newValue);
} | java | {
"resource": ""
} |
q162099 | EditLogInputStream.readOp | train | public FSEditLogOp readOp() throws IOException {
FSEditLogOp ret;
if (cachedOp != null) {
ret = cachedOp;
cachedOp = null;
return ret;
}
return nextOp();
} | java | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.