_id stringlengths 2 7 | title stringlengths 3 140 | partition stringclasses 3
values | text stringlengths 73 34.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q161100 | NameNode.abandonBlock | train | public void abandonBlock(Block b, String src, String holder
) throws IOException {
abandonBlockInternal(b, src, holder);
} | java | {
"resource": ""
} |
q161101 | NameNode.sendHeartbeat | train | public DatanodeCommand[] sendHeartbeat(DatanodeRegistration nodeReg,
long capacity,
long dfsUsed,
long remaining,
long namespaceUsed,
int xmitsInProgress,
int xceiverCount) throws IOException {
verifyRequest(nodeReg);
myMetrics.numHeartbeat.inc();
return namesystem.handleHeartbeat(nodeReg, capacity, dfsUsed, remaining, namespaceUsed,
xceiverCount, xmitsInProgress);
} | java | {
"resource": ""
} |
q161102 | NameNode.blocksBeingWrittenReport | train | public void blocksBeingWrittenReport(DatanodeRegistration nodeReg,
BlockReport blocks) throws IOException {
verifyRequest(nodeReg);
long[] blocksAsLong = blocks.getBlockReportInLongs();
BlockListAsLongs blist = new BlockListAsLongs(blocksAsLong);
boolean processed = namesystem.processBlocksBeingWrittenReport(nodeReg, blist);
String message = "*BLOCK* NameNode.blocksBeingWrittenReport: "
+"from "+nodeReg.getName()+" "+blist.getNumberOfBlocks() +" blocks";
if (!processed) {
message += " was discarded.";
}
stateChangeLog.info(message);
} | java | {
"resource": ""
} |
q161103 | NameNode.verifyRequest | train | public void verifyRequest(DatanodeRegistration nodeReg) throws IOException {
verifyVersion(nodeReg.getVersion(), LAYOUT_VERSION, "layout");
// The ctime of the namenode has to be greater than or equal to the ctime of
// the datanode. The ctime of the namenode is updated when it is upgraded
// and only then the ctime of the datanode is updated. Hence the datanode's
// ctime should never be greater than the namenode's ctime.
if (getNamespaceID() != nodeReg.storageInfo.namespaceID
|| getCTime() < nodeReg.storageInfo.cTime) {
LOG.warn("Invalid Request : NN namespaceId, cTime : " + getNamespaceID()
+ ", " + getCTime() + " DN namespaceId, cTime : "
+ nodeReg.storageInfo.namespaceID + ", " + nodeReg.storageInfo.cTime);
throw new UnregisteredDatanodeException(nodeReg);
}
myMetrics.numVersionRequest.inc();
} | java | {
"resource": ""
} |
q161104 | NameNode.verifyVersion | train | public static void verifyVersion(int reportedVersion,
int expectedVersion, String annotation) throws IOException {
if ((reportedVersion ^ expectedVersion) < 0) {
throw new IOException("reportedVersion and expectedVersion have" +
" different signs : " + reportedVersion + ", " + expectedVersion);
}
// layout_version is negative and data_transfer is positive, so we need to
// look at the absolute.
if (Math.abs(reportedVersion) < Math.abs(expectedVersion))
throw new IncorrectVersionException(
reportedVersion, "data node " + annotation, expectedVersion);
} | java | {
"resource": ""
} |
q161105 | NameNode.getRandomFilesSample | train | public List<FileStatusExtended> getRandomFilesSample(double percentage) {
if (!(percentage > 0 && percentage <= 1.0)) {
throw new IllegalArgumentException("Invalid percentage : " + percentage +
" value should be between (0 - 1.0]");
}
LOG.info("Sampling : " + (percentage * 100) + " percent of files");
return namesystem.getRandomFiles(percentage);
} | java | {
"resource": ""
} |
q161106 | NameNode.format | train | static boolean format(Configuration conf,
boolean force,
boolean isConfirmationNeeded
) throws IOException {
boolean allowFormat = conf.getBoolean("dfs.namenode.support.allowformat",
true);
if (!allowFormat) {
throw new IOException("The option dfs.namenode.support.allowformat is "
+ "set to false for this filesystem, so it "
+ "cannot be formatted. You will need to set "
+ "dfs.namenode.support.allowformat parameter "
+ "to true in order to format this filesystem");
}
Collection<URI> dirsToFormat = NNStorageConfiguration.getNamespaceDirs(conf);
Collection<URI> editDirsToFormat =
NNStorageConfiguration.getNamespaceEditsDirs(conf);
FSNamesystem nsys = new FSNamesystem(new FSImage(conf, dirsToFormat,
editDirsToFormat, null), conf);
try {
if (!nsys.dir.fsImage.confirmFormat(force, isConfirmationNeeded)) {
return true; // aborted
}
nsys.dir.fsImage.format();
return false;
} finally {
nsys.close();
}
} | java | {
"resource": ""
} |
q161107 | NameNode.validateServiceName | train | public static boolean validateServiceName(Configuration conf,
String nameServiceId) {
Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
if (nameserviceIds != null && !nameserviceIds.isEmpty()) {
if (nameServiceId == null) {
System.err.println("Need to input a nameservice id");
return false;
} else if (!nameserviceIds.contains(nameServiceId)) {
System.err.println("An invalid nameservice id: " + nameServiceId);
return false;
}
} else if (nameServiceId != null) {
System.err.println("An invalid nameservice id: " + nameServiceId);
return false;
}
return true;
} | java | {
"resource": ""
} |
q161108 | NameNode.validateCheckpointerAddress | train | protected void validateCheckpointerAddress(InetAddress configuredRemoteAddress)
throws IOException {
InetAddress remoteAddress = Server.getRemoteIp();
InjectionHandler.processEvent(InjectionEvent.NAMENODE_VERIFY_CHECKPOINTER,
remoteAddress);
LOG.info("Verify: received request from: " + remoteAddress);
if (remoteAddress == null) {
LOG.info("Verify: Remote address is NULL");
throw new IOException("Verify: Remote address is null");
}
// if the address is not configured then skip checking
if (configuredRemoteAddress == null
|| configuredRemoteAddress.equals(new InetSocketAddress("0.0.0.0", 0)
.getAddress())) {
LOG.info("Verify: Skipping check since the configured address is: "
+ configuredRemoteAddress);
return;
}
// compare addresses
if (!remoteAddress.equals(configuredRemoteAddress)) {
String msg = "Verify: Configured standby is :"
+ configuredRemoteAddress + ", not allowing: " + remoteAddress
+ " to register";
LOG.warn(msg);
throw new IOException(msg);
}
} | java | {
"resource": ""
} |
q161109 | FastFileCheck.checkFile | train | public static boolean checkFile(Configuration conf,
FileSystem srcFs, FileSystem parityFs,
Path srcPath, Path parityPath, Codec codec,
Progressable reporter,
boolean sourceOnly)
throws IOException, InterruptedException {
FileStatus stat = srcFs.getFileStatus(srcPath);
long blockSize = stat.getBlockSize();
long len = stat.getLen();
List<Long> offsets = new ArrayList<Long>();
// check a small part of each stripe.
for (int i = 0; i * blockSize < len; i += codec.stripeLength) {
offsets.add(i * blockSize);
}
for (long blockOffset : offsets) {
if (sourceOnly) {
if (!verifySourceFile(conf, srcFs,stat,
codec, blockOffset, reporter)) {
return false;
}
}
else {
if (!verifyFile(conf, srcFs, parityFs, stat,
parityPath, codec, blockOffset, reporter)) {
return false;
}
}
}
return true;
} | java | {
"resource": ""
} |
q161110 | FastFileCheck.verifyFile | train | private static boolean verifyFile(Configuration conf,
FileSystem srcFs, FileSystem parityFs,
FileStatus stat, Path parityPath, Codec codec,
long blockOffset, Progressable reporter)
throws IOException, InterruptedException {
Path srcPath = stat.getPath();
LOG.info("Verify file: " + srcPath + " at offset: " + blockOffset);
int limit = (int) Math.min(stat.getBlockSize(), DEFAULT_VERIFY_LEN);
if (reporter == null) {
reporter = RaidUtils.NULL_PROGRESSABLE;
}
// try to decode.
Decoder decoder = new Decoder(conf, codec);
if (codec.isDirRaid) {
decoder.connectToStore(srcPath);
}
List<Long> errorOffsets = new ArrayList<Long>();
// first limit bytes
errorOffsets.add(blockOffset);
long left = Math.min(stat.getBlockSize(), stat.getLen() - blockOffset);
if (left > limit) {
// last limit bytes
errorOffsets.add(blockOffset + left - limit);
// random limit bytes.
errorOffsets.add(blockOffset +
rand.nextInt((int)(left - limit)));
}
byte[] buffer = new byte[limit];
FSDataInputStream is = srcFs.open(srcPath);
try {
for (long errorOffset : errorOffsets) {
is.seek(errorOffset);
is.read(buffer);
// calculate the oldCRC.
CRC32 oldCrc = new CRC32();
oldCrc.update(buffer);
CRC32 newCrc = new CRC32();
DecoderInputStream stream = decoder.new DecoderInputStream(
RaidUtils.NULL_PROGRESSABLE, limit, stat.getBlockSize(), errorOffset,
srcFs, srcPath, parityFs, parityPath, null, null, false);
try {
stream.read(buffer);
newCrc.update(buffer);
if (oldCrc.getValue() != newCrc.getValue()) {
LogUtils.logFileCheckMetrics(LOGRESULTS.FAILURE, codec, srcPath,
srcFs, errorOffset, limit, null, reporter);
LOG.error("mismatch crc, old " + oldCrc.getValue() +
", new " + newCrc.getValue() + ", for file: " + srcPath
+ " at offset " + errorOffset + ", read limit " + limit);
return false;
}
} finally {
reporter.progress();
if (stream != null) {
stream.close();
}
}
}
return true;
} finally {
is.close();
}
} | java | {
"resource": ""
} |
q161111 | FileInputFormat.addLocatedInputPathRecursively | train | protected void addLocatedInputPathRecursively(List<LocatedFileStatus> result,
FileSystem fs, Path path, PathFilter inputFilter)
throws IOException {
for(RemoteIterator<LocatedFileStatus> itor =
fs.listLocatedStatus(path, inputFilter); itor.hasNext();) {
LocatedFileStatus stat = itor.next();
if (stat.isDir()) {
addLocatedInputPathRecursively(result, fs, stat.getPath(), inputFilter);
} else {
result.add(stat);
}
}
} | java | {
"resource": ""
} |
q161112 | FileInputFormat.getPathStrings | train | private static String[] getPathStrings(String commaSeparatedPaths) {
int length = commaSeparatedPaths.length();
int curlyOpen = 0;
int pathStart = 0;
boolean globPattern = false;
List<String> pathStrings = new ArrayList<String>();
for (int i=0; i<length; i++) {
char ch = commaSeparatedPaths.charAt(i);
switch(ch) {
case '{' : {
curlyOpen++;
if (!globPattern) {
globPattern = true;
}
break;
}
case '}' : {
curlyOpen--;
if (curlyOpen == 0 && globPattern) {
globPattern = false;
}
break;
}
case ',' : {
if (!globPattern) {
pathStrings.add(commaSeparatedPaths.substring(pathStart, i));
pathStart = i + 1 ;
}
break;
}
}
}
pathStrings.add(commaSeparatedPaths.substring(pathStart, length));
return pathStrings.toArray(new String[0]);
} | java | {
"resource": ""
} |
q161113 | FileInputFormat.getSplitHosts | train | protected String[] getSplitHosts(BlockLocation[] blkLocations,
long offset, long splitSize, NetworkTopology clusterMap)
throws IOException {
int startIndex = getBlockIndex(blkLocations, offset);
long bytesInThisBlock = blkLocations[startIndex].getOffset() +
blkLocations[startIndex].getLength() - offset;
//If this is the only block, just return
if (bytesInThisBlock >= splitSize) {
return blkLocations[startIndex].getHosts();
}
long bytesInFirstBlock = bytesInThisBlock;
int index = startIndex + 1;
splitSize -= bytesInThisBlock;
while (splitSize > 0) {
bytesInThisBlock =
Math.min(splitSize, blkLocations[index++].getLength());
splitSize -= bytesInThisBlock;
}
long bytesInLastBlock = bytesInThisBlock;
int endIndex = index - 1;
Map <Node,NodeInfo> hostsMap = new IdentityHashMap<Node,NodeInfo>();
Map <Node,NodeInfo> racksMap = new IdentityHashMap<Node,NodeInfo>();
String [] allTopos = new String[0];
// Build the hierarchy and aggregate the contribution of
// bytes at each level. See TestGetSplitHosts.java
for (index = startIndex; index <= endIndex; index++) {
// Establish the bytes in this block
if (index == startIndex) {
bytesInThisBlock = bytesInFirstBlock;
}
else if (index == endIndex) {
bytesInThisBlock = bytesInLastBlock;
}
else {
bytesInThisBlock = blkLocations[index].getLength();
}
allTopos = blkLocations[index].getTopologyPaths();
// If no topology information is available, just
// prefix a fakeRack
if (allTopos.length == 0) {
allTopos = fakeRacks(blkLocations, index);
}
// NOTE: This code currently works only for one level of
// hierarchy (rack/host). However, it is relatively easy
// to extend this to support aggregation at different
// levels
for (String topo: allTopos) {
Node node, parentNode;
NodeInfo nodeInfo, parentNodeInfo;
node = clusterMap.getNode(topo);
if (node == null) {
node = new NodeBase(topo);
clusterMap.add(node);
}
nodeInfo = hostsMap.get(node);
if (nodeInfo == null) {
nodeInfo = new NodeInfo(node);
hostsMap.put(node,nodeInfo);
parentNode = node.getParent();
parentNodeInfo = racksMap.get(parentNode);
if (parentNodeInfo == null) {
parentNodeInfo = new NodeInfo(parentNode);
racksMap.put(parentNode,parentNodeInfo);
}
parentNodeInfo.addLeaf(nodeInfo);
}
else {
nodeInfo = hostsMap.get(node);
parentNode = node.getParent();
parentNodeInfo = racksMap.get(parentNode);
}
nodeInfo.addValue(index, bytesInThisBlock);
parentNodeInfo.addValue(index, bytesInThisBlock);
} // for all topos
} // for all indices
return identifyHosts(allTopos.length, racksMap);
} | java | {
"resource": ""
} |
q161114 | CoronaTaskLauncher.killJob | train | @SuppressWarnings("deprecation")
public void killJob(JobID jobId, Map<String, InetAddress> allTrackers) {
for (Map.Entry<String, InetAddress> entry : allTrackers.entrySet()) {
String trackerName = entry.getKey();
InetAddress addr = entry.getValue();
String description = "KillJobAction " + jobId;
ActionToSend action = new ActionToSend(trackerName, addr,
new KillJobAction(jobId), description);
allWorkQueues.enqueueAction(action);
LOG.info("Queueing " + description + " to worker " +
trackerName + "(" + addr.host + ":" + addr.port + ")");
}
} | java | {
"resource": ""
} |
q161115 | CoronaTaskLauncher.killTasks | train | public void killTasks(
String trackerName, InetAddress addr, List<KillTaskAction> killActions) {
for (KillTaskAction killAction : killActions) {
String description = "KillTaskAction " + killAction.getTaskID();
LOG.info("Queueing " + description + " to worker " +
trackerName + "(" + addr.host + ":" + addr.port + ")");
allWorkQueues.enqueueAction(
new ActionToSend(trackerName, addr, killAction, description));
}
} | java | {
"resource": ""
} |
q161116 | CoronaTaskLauncher.commitTask | train | public void commitTask(
String trackerName, InetAddress addr, CommitTaskAction action) {
String description = "KillTaskAction " + action.getTaskID();
LOG.info("Queueing " + description + " to worker " +
trackerName + "(" + addr.host + ":" + addr.port + ")");
allWorkQueues.enqueueAction(new ActionToSend(
trackerName, addr, action, description));
} | java | {
"resource": ""
} |
q161117 | CoronaTaskLauncher.launchTask | train | public void launchTask(Task task, String trackerName, InetAddress addr) {
CoronaSessionInfo info = new CoronaSessionInfo(
coronaJT.getSessionId(), coronaJT.getJobTrackerAddress(),
coronaJT.getSecondaryTrackerAddress());
LaunchTaskAction action = new LaunchTaskAction(task, info);
String description = "LaunchTaskAction " + action.getTask().getTaskID();
ActionToSend actionToSend =
new ActionToSend(trackerName, addr, action, description);
LOG.info("Queueing " + description + " to worker " +
trackerName + "(" + addr.host + ":" + addr.port + ")");
allWorkQueues.enqueueAction(actionToSend);
} | java | {
"resource": ""
} |
q161118 | NamespaceNotifierClient.connectionStateChanged | train | boolean connectionStateChanged(int newState) {
switch (newState) {
case ConnectionManager.CONNECTED:
LOG.info(listeningPort + ": Switched to CONNECTED state.");
// Try to resubscribe all the watched events
try {
return resubscribe();
} catch (Exception e) {
LOG.error(listeningPort + ": Resubscribing failed", e);
return false;
}
case ConnectionManager.DISCONNECTED_VISIBLE:
LOG.info(listeningPort + ": Switched to DISCONNECTED_VISIBLE state");
for (NamespaceEventKey eventKey : watchedEvents.keySet())
watchedEvents.put(eventKey, -1L);
watcher.connectionFailed();
break;
case ConnectionManager.DISCONNECTED_HIDDEN:
LOG.info(listeningPort + ": Switched to DISCONNECTED_HIDDEN state.");
}
return true;
} | java | {
"resource": ""
} |
q161119 | NamespaceNotifierClient.removeWatch | train | public void removeWatch(String path, EventType watchType)
throws NotConnectedToServerException, InterruptedException,
WatchNotPlacedException {
NamespaceEvent event = new NamespaceEvent(path, watchType.getByteValue());
NamespaceEventKey eventKey = new NamespaceEventKey(path, watchType);
Object connectionLock = connectionManager.getConnectionLock();
ServerHandler.Client server;
LOG.info(listeningPort + ": removeWatch: Removing watch from " +
NotifierUtils.asString(eventKey) + " ...");
if (!watchedEvents.containsKey(eventKey)) {
LOG.warn(listeningPort + ": removeWatch: watch doesen't exist at " +
NotifierUtils.asString(eventKey) + " ...");
throw new WatchNotPlacedException();
}
synchronized (connectionLock) {
connectionManager.waitForTransparentConnect();
server = connectionManager.getServer();
try {
server.unsubscribe(connectionManager.getId(), event);
} catch (InvalidClientIdException e1) {
LOG.warn(listeningPort + ": removeWatch: server deleted us", e1);
connectionManager.failConnection(true);
} catch (ClientNotSubscribedException e2) {
LOG.error(listeningPort + ": removeWatch: event not subscribed", e2);
} catch (TException e3) {
LOG.error(listeningPort + ": removeWatch: failed communicating to" +
" server", e3);
connectionManager.failConnection(true);
}
watchedEvents.remove(eventKey);
}
if (LOG.isDebugEnabled()) {
LOG.debug(listeningPort + ": Unsubscribed from " +
NotifierUtils.asString(eventKey));
}
} | java | {
"resource": ""
} |
q161120 | NamespaceNotifierClient.haveWatch | train | public boolean haveWatch(String path, EventType watchType) {
return watchedEvents.containsKey(new NamespaceEventKey(path, watchType));
} | java | {
"resource": ""
} |
q161121 | NamespaceNotifierClient.resubscribe | train | private boolean resubscribe() throws TransactionIdTooOldException,
InterruptedException {
for (NamespaceEventKey eventKey : watchedEvents.keySet()) {
NamespaceEvent event = eventKey.getEvent();
if (!subscribe(event.getPath(), EventType.fromByteValue(event.getType()),
watchedEvents.get(eventKey))) {
return false;
}
}
return true;
} | java | {
"resource": ""
} |
q161122 | ConnectionManager.waitForTransparentConnect | train | void waitForTransparentConnect() throws InterruptedException,
NotConnectedToServerException {
if (state == DISCONNECTED_VISIBLE) {
LOG.warn(listeningPort + ": waitForTransparentConnect: got visible" +
" disconnected state");
throw new NotConnectedToServerException();
}
// Wait until we are not hidden disconnected
while (state != CONNECTED) {
connectionLock.wait();
switch (state) {
case CONNECTED:
break;
case DISCONNECTED_HIDDEN:
continue;
case DISCONNECTED_VISIBLE:
LOG.warn(listeningPort + ": waitForTransparentConnect: got visible" +
" disconnected state");
throw new NotConnectedToServerException();
}
}
} | java | {
"resource": ""
} |
q161123 | ProtocolProxy.isMethodSupported | train | public boolean isMethodSupported(String methodName,
Class<?>... parameterTypes)
throws IOException {
if (serverMethods == null) { // client & server have the same protocol
return true;
}
Method method;
try {
method = protocol.getDeclaredMethod(methodName, parameterTypes);
} catch (SecurityException e) {
throw new IOException(e);
} catch (NoSuchMethodException e) {
throw new IOException(e);
}
return serverMethods.contains(
Integer.valueOf(ProtocolSignature.getFingerprint(method)));
} | java | {
"resource": ""
} |
q161124 | TeraInputFormat.writePartitionFile | train | public static void writePartitionFile(JobConf conf,
Path partFile) throws IOException {
TeraInputFormat inFormat = new TeraInputFormat();
TextSampler sampler = new TextSampler();
Text key = new Text();
Text value = new Text();
int partitions = conf.getNumReduceTasks();
long sampleSize = conf.getLong(SAMPLE_SIZE, 100000);
InputSplit[] splits = inFormat.getSplits(conf, conf.getNumMapTasks());
int samples = Math.min(10, splits.length);
long recordsPerSample = sampleSize / samples;
int sampleStep = splits.length / samples;
long records = 0;
// take N samples from different parts of the input
for(int i=0; i < samples; ++i) {
RecordReader<Text,Text> reader =
inFormat.getRecordReader(splits[sampleStep * i], conf, null);
while (reader.next(key, value)) {
sampler.addKey(key);
records += 1;
if ((i+1) * recordsPerSample <= records) {
break;
}
}
}
FileSystem outFs = partFile.getFileSystem(conf);
if (outFs.exists(partFile)) {
outFs.delete(partFile, false);
}
SequenceFile.Writer writer =
SequenceFile.createWriter(outFs, conf, partFile, Text.class,
NullWritable.class);
NullWritable nullValue = NullWritable.get();
for(Text split : sampler.createPartitions(partitions)) {
writer.append(split, nullValue);
}
writer.close();
} | java | {
"resource": ""
} |
q161125 | ReflectionUtils.setConf | train | public static void setConf(Object theObject, Configuration conf,
boolean supportJobConf) {
if (conf != null) {
if (theObject instanceof Configurable) {
((Configurable) theObject).setConf(conf);
}
if (supportJobConf) {
setJobConf(theObject, conf);
}
}
} | java | {
"resource": ""
} |
q161126 | ReflectionUtils.newInstance | train | public static <T> T newInstance(Class<T> theClass, Class<?>[] parameterTypes,
Object[] initargs) {
// Perform some sanity checks on the arguments.
if (parameterTypes.length != initargs.length) {
throw new IllegalArgumentException(
"Constructor parameter types don't match constructor arguments");
}
for (int i = 0; i < parameterTypes.length; i++) {
Class<?> clazz = parameterTypes[i];
if (!(clazz.isInstance(initargs[i]))) {
throw new IllegalArgumentException("Object : " + initargs[i]
+ " is not an instance of " + clazz);
}
}
try {
Constructor<T> meth = theClass.getDeclaredConstructor(parameterTypes);
meth.setAccessible(true);
return meth.newInstance(initargs);
} catch (Exception e) {
throw new RuntimeException(e);
}
} | java | {
"resource": ""
} |
q161127 | ReflectionUtils.logThreadInfo | train | public static void logThreadInfo(Log log,
String title,
long minInterval) {
boolean dumpStack = false;
if (log.isInfoEnabled()) {
synchronized (ReflectionUtils.class) {
long now = System.currentTimeMillis();
if (now - previousLogTime >= minInterval * 1000) {
previousLogTime = now;
dumpStack = true;
}
}
if (dumpStack) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
printThreadInfo(new PrintWriter(buffer), title);
log.info(buffer.toString());
}
}
} | java | {
"resource": ""
} |
q161128 | NNStorageDirectoryRetentionManager.backupFiles | train | public static void backupFiles(FileSystem fs, File dest,
Configuration conf) throws IOException {
// check if we can still backup
cleanUpAndCheckBackup(conf, dest);
int MAX_ATTEMPT = 3;
for (int i = 0; i < MAX_ATTEMPT; i++) {
try {
String mdate = dateForm.get().format(new Date(System.currentTimeMillis()));
if (dest.exists()) {
File tmp = new File (dest + File.pathSeparator + mdate);
FLOG.info("Moving aside " + dest + " as " + tmp);
if (!dest.renameTo(tmp)) {
throw new IOException("Unable to rename " + dest +
" to " + tmp);
}
FLOG.info("Moved aside " + dest + " as " + tmp);
}
return;
} catch (IOException e) {
FLOG.error("Creating backup exception. Will retry ", e);
try {
Thread.sleep(1000);
} catch (InterruptedException iex) {
throw new IOException(iex);
}
}
}
throw new IOException("Cannot create backup for: " + dest);
} | java | {
"resource": ""
} |
q161129 | NNStorageDirectoryRetentionManager.cleanUpAndCheckBackup | train | static void cleanUpAndCheckBackup(Configuration conf, File origin) throws IOException {
// get all backups
String[] backups = getBackups(origin);
File root = origin.getParentFile();
// maximum total number of backups
int copiesToKeep = conf.getInt(NN_IMAGE_COPIES_TOKEEP,
NN_IMAGE_COPIES_TOKEEP_DEFAULT);
// days to keep, if set to 0 than keep only last backup
int daysToKeep = conf.getInt(NN_IMAGE_DAYS_TOKEEP,
NN_IMAGE_DAYS_TOKEEP_DEFAULT);
if (copiesToKeep == 0 && daysToKeep == 0) {
// Do not delete anything in this case
// every startup will create extra checkpoint
return;
}
// cleanup copies older than daysToKeep
deleteOldBackups(root, backups, daysToKeep, copiesToKeep);
// check remaining backups
backups = getBackups(origin);
if (backups.length >= copiesToKeep) {
throw new IOException("Exceeded maximum number of standby backups of "
+ origin + " under " + origin.getParentFile() + " max: " + copiesToKeep);
}
} | java | {
"resource": ""
} |
q161130 | NNStorageDirectoryRetentionManager.deleteOldBackups | train | static void deleteOldBackups(File root, String[] backups, int daysToKeep,
int copiesToKeep) {
Date now = new Date(System.currentTimeMillis());
// leave the copiesToKeep-1 at least (+1 will be the current backup)
int maxIndex = Math.max(0, backups.length - copiesToKeep + 1);
for (int i = 0; i < maxIndex; i++) {
String backup = backups[i];
Date backupDate = null;
try {
backupDate = dateForm.get().parse(backup.substring(backup
.indexOf(File.pathSeparator) + 1));
} catch (ParseException pex) {
// This should not happen because of the
// way we construct the list
}
long backupAge = now.getTime() - backupDate.getTime();
// if daysToKeep is set delete everything older providing that
// we retain at least copiesToKeep copies
boolean deleteOldBackup = (daysToKeep > 0
&& backupAge > daysToKeep * 24 * 60 * 60 * 1000);
// if daysToKeep is set to zero retain most recent copies
boolean deleteExtraBackup = (daysToKeep == 0);
if (deleteOldBackup || deleteExtraBackup) {
// This backup is older than daysToKeep, delete it
try {
FLOG.info("Deleting backup " + new File(root, backup));
FileUtil.fullyDelete(new File(root, backup));
FLOG.info("Deleted backup " + new File(root, backup));
} catch (IOException iex) {
FLOG.error("Error deleting backup " + new File(root, backup), iex);
}
} else {
// done with deleting old backups
break;
}
}
} | java | {
"resource": ""
} |
q161131 | NNStorageDirectoryRetentionManager.getBackups | train | static String[] getBackups(File origin) {
File root = origin.getParentFile();
final String originName = origin.getName();
String[] backups = root.list(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
if (!name.startsWith(originName + File.pathSeparator)
|| name.equals(originName))
return false;
try {
dateForm.get().parse(name.substring(name.indexOf(File.pathSeparator) + 1));
} catch (ParseException pex) {
return false;
}
return true;
}
});
if (backups == null)
return new String[0];
Arrays.sort(backups, new Comparator<String>() {
@Override
public int compare(String back1, String back2) {
try {
Date date1 = dateForm.get().parse(back1.substring(back1
.indexOf(File.pathSeparator) + 1));
Date date2 = dateForm.get().parse(back2.substring(back2
.indexOf(File.pathSeparator) + 1));
// Sorting in reverse order, from later dates to earlier
return -1 * date2.compareTo(date1);
} catch (ParseException pex) {
return 0;
}
}
});
return backups;
} | java | {
"resource": ""
} |
q161132 | FSImage.doImportCheckpoint | train | void doImportCheckpoint() throws IOException {
Collection<URI> checkpointDirs =
NNStorageConfiguration.getCheckpointDirs(conf, null);
Collection<URI> checkpointEditsDirs =
NNStorageConfiguration.getCheckpointEditsDirs(conf, null);
if (checkpointDirs == null || checkpointDirs.isEmpty()) {
throw new IOException("Cannot import image from a checkpoint. "
+ "\"dfs.namenode.checkpoint.dir\" is not set." );
}
if (checkpointEditsDirs == null || checkpointEditsDirs.isEmpty()) {
throw new IOException("Cannot import image from a checkpoint. "
+ "\"dfs.namenode.checkpoint.dir\" is not set." );
}
// replace real image with the checkpoint image
FSImage realImage = namesystem.getFSImage();
assert realImage == this;
FSImage ckptImage = new FSImage(conf,
checkpointDirs, checkpointEditsDirs, null);
ckptImage.setFSNamesystem(namesystem);
namesystem.dir.fsImage = ckptImage;
// load from the checkpoint dirs
try {
ckptImage.recoverTransitionRead(StartupOption.REGULAR);
} finally {
ckptImage.close();
}
// return back the real image
realImage.storage.setStorageInfo(ckptImage.storage);
realImage.getEditLog().setLastWrittenTxId(ckptImage.getEditLog().getLastWrittenTxId() + 1);
namesystem.dir.fsImage = realImage;
// and save it but keep the same checkpointTime
// parameters
saveNamespace();
} | java | {
"resource": ""
} |
q161133 | FSImage.loadFSImage | train | protected void loadFSImage(ImageInputStream iis, File imageFile) throws IOException {
MD5Hash expectedMD5 = MD5FileUtils.readStoredMd5ForFile(imageFile);
if (expectedMD5 == null) {
throw new IOException("No MD5 file found corresponding to image file "
+ imageFile);
}
iis.setImageDigest(expectedMD5);
loadFSImage(iis);
} | java | {
"resource": ""
} |
q161134 | FSImage.getParent | train | String getParent(String path) {
return path.substring(0, path.lastIndexOf(Path.SEPARATOR));
} | java | {
"resource": ""
} |
q161135 | FSImage.loadEdits | train | protected long loadEdits(Iterable<EditLogInputStream> editStreams)
throws IOException {
long lastAppliedTxId = storage.getMostRecentCheckpointTxId();
int numLoaded = 0;
FSEditLogLoader loader = new FSEditLogLoader(namesystem);
// Load latest edits
for (EditLogInputStream editIn : editStreams) {
FLOG.info("Load Image: Reading edits: " + editIn + " last applied txid#: "
+ lastAppliedTxId);
numLoaded += loader.loadFSEdits(editIn, lastAppliedTxId);
lastAppliedTxId = loader.getLastAppliedTxId();
}
editLog.setLastWrittenTxId(lastAppliedTxId);
FLOG.info("Load Image: Number of edit transactions loaded: "
+ numLoaded + " last applied txid: " + lastAppliedTxId);
// update the counts
namesystem.dir.updateCountForINodeWithQuota();
return numLoaded;
} | java | {
"resource": ""
} |
q161136 | FSImage.saveFSImage | train | void saveFSImage(SaveNamespaceContext context, ImageManager im, boolean forceUncompressed)
throws IOException {
long txid = context.getTxId();
OutputStream os = im.getCheckpointOutputStream(txid);
FSImageFormat.Saver saver = new FSImageFormat.Saver(context);
FSImageCompression compression = FSImageCompression.createCompression(conf, forceUncompressed);
saver.save(os, compression, null, im.toString());
InjectionHandler.processEvent(InjectionEvent.FSIMAGE_SAVED_IMAGE, txid);
storage.setCheckpointImageDigest(txid, saver.getSavedDigest());
} | java | {
"resource": ""
} |
q161137 | FSImage.saveNamespace | train | public synchronized void saveNamespace(boolean forUncompressed)
throws IOException {
InjectionHandler
.processEvent(InjectionEvent.FSIMAGE_STARTING_SAVE_NAMESPACE);
if (editLog == null) {
throw new IOException("editLog must be initialized");
}
storage.attemptRestoreRemovedStorage();
InjectionHandler
.processEvent(InjectionEvent.FSIMAGE_STARTING_SAVE_NAMESPACE);
boolean editLogWasOpen = editLog.isOpen();
if (editLogWasOpen) {
editLog.endCurrentLogSegment(true);
}
long imageTxId = editLog.getLastWrittenTxId();
try {
// for testing only - we will wait until interruption comes
InjectionHandler
.processEvent(InjectionEvent.FSIMAGE_CREATING_SAVER_THREADS);
saveFSImageInAllDirs(imageTxId, forUncompressed);
storage.writeAll();
} finally {
if (editLogWasOpen) {
editLog.startLogSegment(imageTxId + 1, true);
// Take this opportunity to note the current transaction.
// Even if the namespace save was cancelled, this marker
// is only used to determine what transaction ID is required
// for startup. So, it doesn't hurt to update it unnecessarily.
storage.writeTransactionIdFileToStorage(imageTxId + 1, this);
}
saveNamespaceContext.clear();
}
} | java | {
"resource": ""
} |
q161138 | FSImage.confirmFormat | train | boolean confirmFormat(boolean force, boolean interactive) throws IOException {
List<FormatConfirmable> confirms = Lists.newArrayList();
for (StorageDirectory sd : storage.dirIterable(null)) {
confirms.add(sd);
}
confirms.addAll(editLog.getFormatConfirmables());
return Storage.confirmFormat(confirms, force, interactive);
} | java | {
"resource": ""
} |
q161139 | FSImage.rollFSImage | train | void rollFSImage(CheckpointSignature sig) throws IOException {
long start = System.nanoTime();
sig.validateStorageInfo(this.storage);
saveDigestAndRenameCheckpointImage(sig.mostRecentCheckpointTxId,
sig.imageDigest);
long rollTime = DFSUtil.getElapsedTimeMicroSeconds(start);
if (metrics != null) {
metrics.rollFsImageTime.inc(rollTime);
}
} | java | {
"resource": ""
} |
q161140 | FSImage.saveDigestAndRenameCheckpointImage | train | synchronized void saveDigestAndRenameCheckpointImage(
long txid, MD5Hash digest) throws IOException {
if (!digest.equals(storage.getCheckpointImageDigest(txid))) {
throw new IOException(
"Checkpoint image is corrupt: expecting an MD5 checksum of" +
digest + " but is " + storage.getCheckpointImageDigest(txid));
}
imageSet.saveDigestAndRenameCheckpointImage(txid, digest);
// So long as this is the newest image available,
// advertise it as such to other checkpointers
// from now on
storage.setMostRecentCheckpointTxId(txid);
} | java | {
"resource": ""
} |
q161141 | FSImage.getCheckpointDirs | train | static Collection<File> getCheckpointDirs(Configuration conf,
String defaultName) {
Collection<String> dirNames = conf.getStringCollection("fs.checkpoint.dir");
if (dirNames.size() == 0 && defaultName != null) {
dirNames.add(defaultName);
}
Collection<File> dirs = new ArrayList<File>(dirNames.size());
for (String name : dirNames) {
dirs.add(new File(name));
}
return dirs;
} | java | {
"resource": ""
} |
q161142 | JobInitializationPoller.printJobs | train | private void printJobs(ArrayList<JobInProgress> jobsToInitialize) {
for (JobInProgress job : jobsToInitialize) {
LOG.info("Passing to Initializer Job Id :" + job.getJobID()
+ " User: " + job.getProfile().getUser() + " Queue : "
+ job.getProfile().getQueueName());
}
} | java | {
"resource": ""
} |
q161143 | JobInitializationPoller.assignThreadsToQueues | train | private void assignThreadsToQueues() {
int countOfQueues = jobQueues.size();
String[] queues = (String[]) jobQueues.keySet().toArray(
new String[countOfQueues]);
int numberOfQueuesPerThread = countOfQueues / poolSize;
int numberOfQueuesAssigned = 0;
for (int i = 0; i < poolSize; i++) {
JobInitializationThread initializer = createJobInitializationThread();
int batch = (i * numberOfQueuesPerThread);
for (int j = batch; j < (batch + numberOfQueuesPerThread); j++) {
initializer.addQueue(queues[j]);
threadsToQueueMap.put(queues[j], initializer);
numberOfQueuesAssigned++;
}
}
if (numberOfQueuesAssigned < countOfQueues) {
// Assign remaining queues in round robin fashion to other queues
int startIndex = 0;
for (int i = numberOfQueuesAssigned; i < countOfQueues; i++) {
JobInitializationThread t = threadsToQueueMap
.get(queues[startIndex]);
t.addQueue(queues[i]);
threadsToQueueMap.put(queues[i], t);
startIndex++;
}
}
} | java | {
"resource": ""
} |
q161144 | JobInitializationPoller.cleanUpInitializedJobsList | train | void cleanUpInitializedJobsList() {
Iterator<Entry<JobID, JobInProgress>> jobsIterator =
initializedJobs.entrySet().iterator();
while(jobsIterator.hasNext()) {
Entry<JobID,JobInProgress> entry = jobsIterator.next();
JobInProgress job = entry.getValue();
if (job.getStatus().getRunState() == JobStatus.RUNNING) {
if (isScheduled(job)) {
LOG.info("Removing scheduled jobs from waiting queue"
+ job.getJobID());
jobsIterator.remove();
jobQueueManager.removeJobFromWaitingQueue(job);
continue;
}
}
if(job.isComplete()) {
LOG.info("Removing killed/completed job from initalized jobs " +
"list : "+ job.getJobID());
jobsIterator.remove();
}
}
} | java | {
"resource": ""
} |
q161145 | JobInitializationPoller.isScheduled | train | private boolean isScheduled(JobInProgress job) {
return ((job.pendingMaps() < job.desiredMaps())
|| (job.pendingReduces() < job.desiredReduces()));
} | java | {
"resource": ""
} |
q161146 | BlockCrcFileReader.readHeader | train | void readHeader() throws IOException {
int version = in.readInt();
if (version != BlockCrcInfoWritable.LATEST_BLOCK_CRC_FILE_VERSION) {
throw new IOException("Version " + version + " is not supported.");
}
numBuckets = in.readInt();
currentBucket = -1;
numRecordsReadInBucket = 0;
numRecordsInBucket = 0;
} | java | {
"resource": ""
} |
q161147 | BlockCrcFileReader.moveToNextRecordAndGetItsBucketId | train | int moveToNextRecordAndGetItsBucketId() throws IOException {
while (numRecordsReadInBucket >= numRecordsInBucket) {
if (currentBucket + 1>= numBuckets) {
// We've finished all the records.
return -1;
} else {
numRecordsInBucket = in.readInt();
currentBucket++;
numRecordsReadInBucket = 0;
}
}
return currentBucket;
} | java | {
"resource": ""
} |
q161148 | BlockCrcFileReader.getNextRecord | train | BlockCrcInfoWritable getNextRecord() throws IOException {
// By calling getBucketIdForNextRecord(), we make sure the next field
// to read is the next record (if there is any record left in the file)
// Also, by checking the return value, we know whether we've finished
// the file.
if (moveToNextRecordAndGetItsBucketId() == -1) {
return null;
}
BlockCrcInfoWritable crcInfo = new BlockCrcInfoWritable();
crcInfo.readFields(in);
numRecordsReadInBucket++;
return crcInfo;
} | java | {
"resource": ""
} |
q161149 | DistBlockIntegrityMonitor.startOneJob | train | public static Job startOneJob(Worker newWorker,
Priority pri, Set<String> jobFiles, long detectTime,
AtomicLong numFilesSubmitted, AtomicLong lastCheckingTime,
long maxPendingJobs)
throws IOException, InterruptedException, ClassNotFoundException {
if (lastCheckingTime != null) {
lastCheckingTime.set(System.currentTimeMillis());
}
String startTimeStr = dateFormat.format(new Date());
String jobName = newWorker.JOB_NAME_PREFIX + "." + newWorker.jobCounter +
"." + pri + "-pri" + "." + startTimeStr;
Job job = null;
synchronized(jobFiles) {
if (jobFiles.size() == 0) {
return null;
}
newWorker.jobCounter++;
synchronized(newWorker.jobIndex) {
if (newWorker.jobIndex.size() >= maxPendingJobs) {
// full
return null;
}
job = newWorker.startJob(jobName, jobFiles, pri, detectTime);
}
numFilesSubmitted.addAndGet(jobFiles.size());
jobFiles.clear();
}
return job;
} | java | {
"resource": ""
} |
q161150 | DistBlockIntegrityMonitor.getLostStripes | train | private Map<Integer, Integer> getLostStripes(
Configuration conf, FileStatus stat, FileSystem fs)
throws IOException {
Map<Integer, Integer> lostStripes = new HashMap<Integer, Integer>();
RaidInfo raidInfo = RaidUtils.getFileRaidInfo(stat, conf);
if (raidInfo.codec == null) {
// Can not find the parity file, the file is not raided.
return lostStripes;
}
Codec codec = raidInfo.codec;
if (codec.isDirRaid) {
RaidUtils.collectDirectoryCorruptBlocksInStripe(conf,
(DistributedFileSystem)fs, raidInfo,
stat, lostStripes);
} else {
RaidUtils.collectFileCorruptBlocksInStripe((DistributedFileSystem)fs,
raidInfo, stat, lostStripes);
}
return lostStripes;
} | java | {
"resource": ""
} |
q161151 | DistBlockIntegrityMonitor.getLostFiles | train | protected Map<String, Integer> getLostFiles(
Pattern pattern, String[] dfsckArgs) throws IOException {
Map<String, Integer> lostFiles = new HashMap<String, Integer>();
BufferedReader reader = getLostFileReader(dfsckArgs);
String line = reader.readLine(); // remove the header line
while ((line = reader.readLine()) != null) {
Matcher m = pattern.matcher(line);
if (!m.find()) {
continue;
}
String fileName = m.group(1).trim();
Integer numLost = lostFiles.get(fileName);
numLost = numLost == null ? 0 : numLost;
numLost += 1;
lostFiles.put(fileName, numLost);
}
LOG.info("FSCK returned " + lostFiles.size() + " files with args " +
Arrays.toString(dfsckArgs));
RaidUtils.filterTrash(getConf(), lostFiles.keySet().iterator());
LOG.info("getLostFiles returning " + lostFiles.size() + " files with args " +
Arrays.toString(dfsckArgs));
return lostFiles;
} | java | {
"resource": ""
} |
q161152 | DistBlockIntegrityMonitor.getAggregateStatus | train | @Override
public BlockIntegrityMonitor.Status getAggregateStatus() {
Status fixer = corruptionWorker.getStatus();
Status copier = decommissioningWorker.getStatus();
List<JobStatus> jobs = new ArrayList<JobStatus>();
List<JobStatus> simFailedJobs = new ArrayList<JobStatus>();
List<JobStatus> failedJobs = new ArrayList<JobStatus>();
List<String> highPriFileNames = new ArrayList<String>();
int numHighPriFiles = 0;
int numLowPriFiles = 0;
int numLowestPriFiles = 0;
if (fixer != null) {
jobs.addAll(fixer.jobs);
simFailedJobs.addAll(fixer.simFailJobs);
failedJobs.addAll(fixer.failJobs);
if (fixer.highPriorityFileNames != null) {
highPriFileNames.addAll(fixer.highPriorityFileNames);
}
numHighPriFiles += fixer.highPriorityFiles;
numLowPriFiles += fixer.lowPriorityFiles;
numLowestPriFiles += fixer.lowestPriorityFiles;
}
if (copier != null) {
jobs.addAll(copier.jobs);
simFailedJobs.addAll(copier.simFailJobs);
failedJobs.addAll(copier.failJobs);
if (copier.highPriorityFileNames != null) {
highPriFileNames.addAll(copier.highPriorityFileNames);
}
numHighPriFiles += copier.highPriorityFiles;
numLowPriFiles += copier.lowPriorityFiles;
numLowestPriFiles += copier.lowestPriorityFiles;
}
return new Status(numHighPriFiles, numLowPriFiles, numLowestPriFiles,
jobs, highPriFileNames,failedJobs, simFailedJobs);
} | java | {
"resource": ""
} |
q161153 | CoronaJTFallbackCaller.makeCall | train | public final T makeCall() throws IOException {
while (true) {
try {
return call();
} catch (ConnectException e) {
// We fall back only after ConnectException
try {
// Fall back to secondary tracker and reconnect to new JT
reconnectToNewJobTracker(0);
} catch (IOException f) {
LOG.error("Fallback process failed with ", f);
// Re-throw original exception
throw e;
}
} catch (IOException e) {
// the subclass of fallback caller should provide
// logic here. We will retry in most cases
handleIOException(e);
}
}
} | java | {
"resource": ""
} |
q161154 | CoronaJTFallbackCaller.reconnectToNewJobTracker | train | private final void reconnectToNewJobTracker(int connectNum) throws IOException {
if (connectNum >= CONNECT_MAX_NUMBER) {
LOG.error("reconnectToNewJobTracker has reached its max number.");
throw new IOException("reconnectToNewJobTracker has reached its max number.");
}
InetSocketAddress secondaryTracker = getSecondaryTracker();
JobConf conf = getConf();
InetSocketAddress oldAddress = getCurrentClientAddress();
LOG.info("Falling back from " + oldAddress + " to secondary tracker at "
+ secondaryTracker + " with " + connectNum + " try");
if (secondaryTracker == null)
throw new IOException("Secondary address not provided.");
shutdown();
InterCoronaJobTrackerProtocol secondaryClient = RPC.waitForProxy(
InterCoronaJobTrackerProtocol.class,
InterCoronaJobTrackerProtocol.versionID, secondaryTracker, conf,
SECONDARY_TRACKER_CONNECT_TIMEOUT);
// Obtain new address
InetSocketAddressWritable oldAddrWritable = new InetSocketAddressWritable(
oldAddress);
InetSocketAddressWritable newAddress = null;
int retryNum = 0;
do {
newAddress = secondaryClient.getNewJobTrackerAddress(oldAddrWritable);
try {
waitRetry();
} catch (InterruptedException e) {
LOG.error("Fallback interrupted, taking next retry.");
}
++retryNum;
} while (newAddress == null && predRetry(retryNum));
if (newAddress == null || newAddress.getAddress() == null)
throw new IOException("Failed to obtain new job tracker address.");
RPC.stopProxy(secondaryClient);
try {
connect(newAddress.getAddress());
LOG.info("Fallback process successful: " + newAddress.getAddress());
} catch (IOException e) {
LOG.error("Fallback connect to " + newAddress.getAddress() + " failed for ", e);
reconnectToNewJobTracker(++connectNum);
}
} | java | {
"resource": ""
} |
q161155 | ResourceCalculatorPlugin.getResourceCalculatorPlugin | train | public static ResourceCalculatorPlugin getResourceCalculatorPlugin(
Class<? extends ResourceCalculatorPlugin> clazz, Configuration conf) {
if (clazz != null) {
return ReflectionUtils.newInstance(clazz, conf);
}
// No class given, try a os specific class
try {
String osName = System.getProperty("os.name");
if (osName.startsWith("Linux")) {
return new LinuxResourceCalculatorPlugin();
}
} catch (SecurityException se) {
// Failed to get Operating System name.
return new NullResourceCalculatorPlugin();
}
// Not supported on this system.
return new NullResourceCalculatorPlugin();
} | java | {
"resource": ""
} |
q161156 | ParallelStreamReader.performReads | train | private void performReads(ReadResult readResult) throws InterruptedException {
long start = System.currentTimeMillis();
for (int i = 0; i < streams.length; ) {
boolean acquired = slots.tryAcquire(1, 10, TimeUnit.SECONDS);
reporter.progress();
if (acquired) {
readPool.execute(new ReadOperation(readResult, i));
i++;
}
}
// All read operations have been submitted to the readPool.
// Now wait for the operations to finish and release the semaphore.
while (true) {
boolean acquired =
slots.tryAcquire(numThreads, 10, TimeUnit.SECONDS);
reporter.progress();
if (acquired) {
slots.release(numThreads);
break;
}
}
readTime += (System.currentTimeMillis() - start);
} | java | {
"resource": ""
} |
q161157 | BalancePlan.logImbalancedNodes | train | private void logImbalancedNodes() {
if (LOG.isInfoEnabled()) {
int underUtilized = 0, overUtilized = 0;
for (BalancerDatanode node : this.datanodes.values()) {
if (isUnderUtilized(node))
underUtilized++;
else if (isOverUtilized(node))
overUtilized++;
}
StringBuilder msg = new StringBuilder();
msg.append(overUtilized);
msg.append(" over utilized nodes:");
for (BalancerDatanode node : this.datanodes.values()) {
if (isOverUtilized(node)) {
msg.append(" ");
msg.append(node.getName());
}
}
LOG.info(msg);
msg = new StringBuilder();
msg.append(underUtilized);
msg.append(" under utilized nodes: ");
for (BalancerDatanode node : this.datanodes.values()) {
if (isUnderUtilized(node)) {
msg.append(" ");
msg.append(node.getName());
}
}
LOG.info(msg);
}
} | java | {
"resource": ""
} |
q161158 | BalancePlan.logPlanOutcome | train | private void logPlanOutcome() {
if (LOG.isInfoEnabled()) {
LOG.info("Predicted plan outcome: bytesLeftToMove: "
+ bytesLeftToMove + ", bytesToMove: " + bytesToMove);
for (BalancerDatanode node : this.datanodes.values()) {
LOG.info(node.getName() + " remaining: " + node.getCurrentRemaining());
}
}
} | java | {
"resource": ""
} |
q161159 | BalancePlan.scheduleTask | train | private void scheduleTask(Source source, long size, Target target) {
NodeTask nodeTask = new NodeTask(target, size);
source.addNodeTask(nodeTask);
target.addNodeTask(nodeTask);
sources.add(source);
targets.add(target);
LOG.info("scheduled " + size + " bytes : " + source.getName() + " -> " + target.getName());
} | java | {
"resource": ""
} |
q161160 | BalancePlan.logDataDistribution | train | public static void logDataDistribution(DatanodeInfo[] report) {
if (LOG.isInfoEnabled()) {
double avgRemaining = computeAvgRemaining(Arrays.asList(report));
StringBuilder msg = new StringBuilder("Data distribution report: avgRemaining "
+ avgRemaining);
for (DatanodeInfo node : report) {
msg.append("\n").append(node.getName());
msg.append(" remaining ").append(getRemaining(node));
msg.append(" raw ").append(node.getRemaining()).append(" / ").append(node.getCapacity());
}
LOG.info(msg);
}
} | java | {
"resource": ""
} |
q161161 | JobConf.setJarByClass | train | public void setJarByClass(Class cls) {
String jar = findContainingJar(cls);
if (jar != null) {
setJar(jar);
}
} | java | {
"resource": ""
} |
q161162 | JobConf.deleteLocalFiles | train | @Deprecated
public void deleteLocalFiles() throws IOException {
String[] localDirs = getLocalDirs();
for (int i = 0; i < localDirs.length; i++) {
FileSystem.getLocal(this).delete(new Path(localDirs[i]));
}
} | java | {
"resource": ""
} |
q161163 | JobConf.getWorkingDirectory | train | public Path getWorkingDirectory() {
String name = get("mapred.working.dir");
if (name != null) {
return new Path(name);
} else {
try {
Path dir = FileSystem.get(this).getWorkingDirectory();
set("mapred.working.dir", dir.toString());
return dir;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
} | java | {
"resource": ""
} |
q161164 | JobConf.getMemoryForMapTask | train | public long getMemoryForMapTask() {
long value = getDeprecatedMemoryValue();
if (value == DISABLED_MEMORY_LIMIT) {
value = normalizeMemoryConfigValue(
getLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY,
DISABLED_MEMORY_LIMIT));
}
return value;
} | java | {
"resource": ""
} |
q161165 | JobConf.getMemoryForReduceTask | train | public long getMemoryForReduceTask() {
long value = getDeprecatedMemoryValue();
if (value == DISABLED_MEMORY_LIMIT) {
value = normalizeMemoryConfigValue(
getLong(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY,
DISABLED_MEMORY_LIMIT));
}
return value;
} | java | {
"resource": ""
} |
q161166 | JobConf.computeNumSlotsPerMap | train | int computeNumSlotsPerMap(long slotSizePerMap) {
if ((slotSizePerMap==DISABLED_MEMORY_LIMIT) ||
(getMemoryForMapTask()==DISABLED_MEMORY_LIMIT)) {
return 1;
}
return (int)(Math.ceil((float)getMemoryForMapTask() / (float)slotSizePerMap));
} | java | {
"resource": ""
} |
q161167 | JobConf.computeNumSlotsPerReduce | train | int computeNumSlotsPerReduce(long slotSizePerReduce) {
if ((slotSizePerReduce==DISABLED_MEMORY_LIMIT) ||
(getMemoryForReduceTask()==DISABLED_MEMORY_LIMIT)) {
return 1;
}
return
(int)(Math.ceil((float)getMemoryForReduceTask() / (float)slotSizePerReduce));
} | java | {
"resource": ""
} |
q161168 | JobConf.findContainingJar | train | private static String findContainingJar(Class my_class) {
ClassLoader loader = my_class.getClassLoader();
String class_file = my_class.getName().replaceAll("\\.", "/") + ".class";
try {
for(Enumeration itr = loader.getResources(class_file);
itr.hasMoreElements();) {
URL url = (URL) itr.nextElement();
if ("jar".equals(url.getProtocol())) {
String toReturn = url.getPath();
if (toReturn.startsWith("file:")) {
toReturn = toReturn.substring("file:".length());
}
toReturn = URLDecoder.decode(toReturn, "UTF-8");
return toReturn.replaceAll("!.*$", "");
}
}
} catch (IOException e) {
throw new RuntimeException(e);
}
return null;
} | java | {
"resource": ""
} |
q161169 | JobConf.overrideConfiguration | train | public static void overrideConfiguration(JobConf conf, int instance) {
final String CONFIG_KEYS[] =
new String[]{"mapred.job.tracker", "mapred.local.dir",
"mapred.fairscheduler.server.address"};
for (String configKey : CONFIG_KEYS) {
String value = conf.get(configKey + "-" + instance);
if (value != null) {
conf.set(configKey, value);
} else {
LOG.warn("Configuration " + configKey + "-" + instance + " not found.");
}
}
} | java | {
"resource": ""
} |
q161170 | ReconfigurationException.constructMessage | train | private static String constructMessage(String property,
String newVal, String oldVal) {
String message = "Could not change property " + property;
if (oldVal != null) {
message += " from \'" + oldVal;
}
if (newVal != null) {
message += "\' to \'" + newVal + "\'";
}
return message;
} | java | {
"resource": ""
} |
q161171 | NameSpaceSliceStorage.recoverTransitionRead | train | void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo,
Collection<File> dataDirs, StartupOption startOpt) throws IOException {
assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion()
: "Block-pool and name-node layout versions must be the same.";
// 1. For each Namespace data directory analyze the state and
// check whether all is consistent before transitioning.
this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>(
dataDirs.size());
for (Iterator<File> it = dataDirs.iterator(); it.hasNext();) {
File dataDir = it.next();
StorageDirectory sd = new StorageDirectory(dataDir, null, false);
StorageState curState;
try {
curState = sd.analyzeStorage(startOpt);
// sd is locked but not opened
switch (curState) {
case NORMAL:
break;
case NON_EXISTENT:
// ignore this storage
LOG.info("Storage directory " + dataDir + " does not exist.");
it.remove();
continue;
case NOT_FORMATTED: // format
LOG.info("Storage directory " + dataDir + " is not formatted.");
if (!sd.isEmpty()) {
LOG.error("Storage directory " + dataDir
+ " is not empty, and will not be formatted! Exiting.");
throw new IOException(
"Storage directory " + dataDir + " is not empty!");
}
LOG.info("Formatting ...");
format(sd, nsInfo);
break;
default: // recovery part is common
sd.doRecover(curState);
}
} catch (IOException ioe) {
sd.unlock();
throw ioe;
}
// add to the storage list. This is inherited from parent class, Storage.
addStorageDir(sd);
dataDirStates.add(curState);
}
if (dataDirs.size() == 0) // none of the data dirs exist
throw new IOException(
"All specified directories are not accessible or do not exist.");
// 2. Do transitions
// Each storage directory is treated individually.
// During startup some of them can upgrade or roll back
// while others could be up-to-date for the regular startup.
doTransition(datanode, nsInfo, startOpt);
// 3. Update all storages. Some of them might have just been formatted.
this.writeAll();
} | java | {
"resource": ""
} |
q161172 | NameSpaceSliceStorage.setFields | train | @Override
protected void setFields(Properties props, StorageDirectory sd)
throws IOException {
props.setProperty(NAMESPACE_ID, String.valueOf(namespaceID));
props.setProperty(CHECK_TIME, String.valueOf(cTime));
props.setProperty(LAYOUT_VERSION, String.valueOf(layoutVersion));
} | java | {
"resource": ""
} |
q161173 | NameSpaceSliceStorage.setNameSpaceID | train | private void setNameSpaceID(File storage, String nsid)
throws InconsistentFSStateException {
if (nsid == null || nsid.equals("")) {
throw new InconsistentFSStateException(storage, "file "
+ STORAGE_FILE_VERSION + " is invalid.");
}
int newNsId = Integer.parseInt(nsid);
if (namespaceID > 0 && namespaceID != newNsId) {
throw new InconsistentFSStateException(storage,
"Unexepcted namespaceID " + nsid + " . Expected " + namespaceID);
}
namespaceID = newNsId;
} | java | {
"resource": ""
} |
q161174 | NameSpaceSliceStorage.doUpgrade | train | private void doUpgrade(List<StorageDirectory> sds,
List<StorageInfo> sdsInfo,
final NamespaceInfo nsInfo
) throws IOException {
assert sds.size() == sdsInfo.size();
UpgradeThread[] upgradeThreads = new UpgradeThread[sds.size()];
// start to upgrade
for (int i=0; i<upgradeThreads.length; i++) {
final StorageDirectory sd = sds.get(i);
final StorageInfo si = sdsInfo.get(i);
UpgradeThread thread = new UpgradeThread(sd, si, nsInfo);
thread.start();
upgradeThreads[i] = thread;
}
// wait for upgrade to be done
for (UpgradeThread thread : upgradeThreads) {
try {
thread.join();
} catch (InterruptedException e) {
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
}
}
// check for errors
for (UpgradeThread thread : upgradeThreads) {
if (thread.error != null)
throw new IOException(thread.error);
}
// write version file
this.layoutVersion = FSConstants.LAYOUT_VERSION;
assert this.namespaceID == nsInfo.getNamespaceID() :
"Data-node and name-node layout versions must be the same.";
this.cTime = nsInfo.getCTime();
for (StorageDirectory sd :sds) {
sd.write();
File prevDir = sd.getPreviousDir();
File tmpDir = sd.getPreviousTmp();
// rename tmp to previous
rename(tmpDir, prevDir);
LOG.info("Upgrade of " + sd.getRoot()+ " is complete.");
}
} | java | {
"resource": ""
} |
q161175 | VersionedWritable.readFields | train | public void readFields(DataInput in) throws IOException {
byte version = in.readByte(); // read version
if (version != getVersion())
throw new VersionMismatchException(getVersion(), version);
} | java | {
"resource": ""
} |
q161176 | CGroupMemoryWatcher.failTasksWithMaxMemory | train | private void failTasksWithMaxMemory(long memoryToRelease) {
List<TaskAttemptID> allTasks = new ArrayList<TaskAttemptID>();
allTasks.addAll(processTreeInfoMap.keySet());
// Sort the tasks descendingly according to RSS memory usage
Collections.sort(allTasks, new Comparator<TaskAttemptID>() {
@Override
public int compare(TaskAttemptID tid1, TaskAttemptID tid2) {
return processTreeInfoMap.get(tid2).getMemoryUsed() >
processTreeInfoMap.get(tid1).getMemoryUsed() ?
1 : -1;
}});
long memoryReleased = 0;
// Fail the tasks one by one until the memory requirement is met
while (memoryReleased < memoryToRelease && !allTasks.isEmpty()) {
TaskAttemptID tid = allTasks.remove(0);
if (!isKillable(tid)) {
continue;
}
long memoryUsed = processTreeInfoMap.get(tid).getMemoryUsed();
if (memoryUsed == 0) {
break; // Skip tasks without process tree information currently
}
tasksToKill.add(tid);
memoryReleased += memoryUsed;
}
if (tasksToKill.isEmpty()) {
LOG.error("The total memory usage is over CGroup limits. "
+ "But found no alive task to kill for freeing memory.");
} else if (memoryReleased < memoryToRelease) {
LOG.error("The total memory usage is over CGroup limits. "
+ "But uanble to find enough tasks to kill for freeing memory.");
}
killTasks();
} | java | {
"resource": ""
} |
q161177 | CGroupMemoryWatcher.killTask | train | private void killTask(TaskAttemptID tid, String msg, boolean wasFailure) {
// Kill the task and mark it as killed.
taskTracker.cleanUpOverMemoryTask(tid, wasFailure, msg);
// Now destroy the ProcessTree, remove it from monitoring map.
CGroupProcessTreeInfo ptInfo = processTreeInfoMap.get(tid);
try {
LinuxSystemCall.killProcessGroup(Integer.parseInt(ptInfo.getPID()));
} catch (java.io.IOException e) {
LOG.error("Could not kill process group " + ptInfo.getPID(), e);
}
processTreeInfoMap.remove(tid);
LOG.info("Removed ProcessTree with root " + ptInfo.getPID());
} | java | {
"resource": ""
} |
q161178 | CGroupMemoryWatcher.isKillable | train | private boolean isKillable(TaskAttemptID tid) {
TaskInProgress tip = taskTracker.runningTasks.get(tid);
return tip != null && !tip.wasKilled() &&
(tip.getRunState() == TaskStatus.State.RUNNING ||
tip.getRunState() == TaskStatus.State.COMMIT_PENDING);
} | java | {
"resource": ""
} |
q161179 | UtilizationShell.getResponse | train | public String getResponse(String[] argv) throws IOException {
String result = "";
if (argv.length < 1) {
return result;
}
if (argv[0].equals("-all")) {
result += rpcCollector.getClusterUtilization();
result += JobUtilization.legendString +
JobUtilization.unitString;
for (JobUtilization job : rpcCollector.getAllRunningJobUtilization()) {
result += job;
}
result += TaskTrackerUtilization.legendString +
TaskTrackerUtilization.unitString;
for (TaskTrackerUtilization tt :
rpcCollector.getAllTaskTrackerUtilization()) {
result += tt;
}
return result;
}
if (argv[0].equals("-cluster")) {
result += rpcCollector.getClusterUtilization();
return result;
}
if (argv[0].equals("-job")) {
result += JobUtilization.legendString +
JobUtilization.unitString;
if (argv.length == 1) {
for (JobUtilization job : rpcCollector.getAllRunningJobUtilization()) {
result += job;
}
return result;
}
for (int i = 1; i < argv.length; i++) {
result += rpcCollector.getJobUtilization(argv[i]);
}
return result;
}
if (argv[0].equals("-tasktracker")) {
result += TaskTrackerUtilization.legendString +
TaskTrackerUtilization.unitString;
if (argv.length == 1) {
for (TaskTrackerUtilization tt :
rpcCollector.getAllTaskTrackerUtilization()) {
result += tt;
}
return result;
}
for (int i = 1; i < argv.length; i++) {
result += rpcCollector.getTaskTrackerUtilization(argv[i]);
}
return result;
}
return result;
} | java | {
"resource": ""
} |
q161180 | PipeMapRed.splitKeyVal | train | void splitKeyVal(byte[] line, Text key, Text val) throws IOException {
int pos = UTF8ByteArrayUtils.findNthByte(line, (byte)this.getFieldSeparator(), this.getNumOfKeyFields());
try {
if (pos == -1) {
key.set(line);
val.set("");
} else {
UTF8ByteArrayUtils.splitKeyVal(line, key, val, pos);
}
} catch (CharacterCodingException e) {
LOG.warn(StringUtils.stringifyException(e));
}
} | java | {
"resource": ""
} |
q161181 | PipeMapRed.write | train | void write(Writable value) throws IOException {
byte[] bval;
int valSize;
if (value instanceof BytesWritable) {
BytesWritable val = (BytesWritable) value;
bval = val.get();
valSize = val.getSize();
} else if (value instanceof Text) {
Text val = (Text) value;
bval = val.getBytes();
valSize = val.getLength();
} else {
String sval = value.toString();
bval = sval.getBytes("UTF-8");
valSize = bval.length;
}
clientOut_.write(bval, 0, valSize);
} | java | {
"resource": ""
} |
q161182 | ServerHistory.cleanUpHistory | train | private void cleanUpHistory() {
long oldestAllowedTimestamp = System.currentTimeMillis() - historyLength;
int trashedNotifications = 0;
if (LOG.isDebugEnabled()) {
LOG.debug("History cleanup: Checking old notifications to remove from history list ...");
}
HistoryTreeEntry key = new HistoryTreeEntry(oldestAllowedTimestamp, 0, (byte)0);
int notificationsCount = 0;
historyLock.writeLock().lock();
try {
notificationsCount = orderedHistoryList.size();
LOG.warn("History cleanup: size of the history before cleanup: " + notificationsCount);
if (!historyLimitDisabled && notificationsCount > historyLimit) {
LOG.warn("History cleanup: Reached physical limit. Number of stored notifications: " +
notificationsCount + ". Clearing ...");
}
int index = Collections.binarySearch(orderedHistoryList, key, comparatorByTS);
int toDeleteByTS = index >= 0 ? index : - (index + 1);
int toDeleteByLimit = historyLimitDisabled ? 0 : notificationsCount - (int)historyLimit;
toDeleteByLimit = toDeleteByLimit > 0 ? toDeleteByLimit : 0;
int toDelete = Math.max(toDeleteByTS, toDeleteByLimit);
// Delete items which are too old
if (toDelete > 0) {
LOG.warn("History cleanup: number of the history to cleanup: " + toDelete);
for (int i = 0; i < toDelete; i++) {
orderedHistoryList.get(i).removeFromTree();
}
orderedHistoryList.subList(0, toDelete).clear();
if (toDeleteByLimit > toDeleteByTS) {
// If we delete a notification because we don't have space left
trashedNotifications ++;
}
notificationsCount = orderedHistoryList.size();
LOG.warn("History cleanup: size of the history after cleanup: " + notificationsCount);
// clean up history tree, remove the node that has no children and
// no notifications associated with them.
cleanUpHistoryTree(historyTree);
}
} finally {
historyLock.writeLock().unlock();
}
core.getMetrics().trashedHistoryNotifications.inc(trashedNotifications);
core.getMetrics().historySize.set(notificationsCount);
core.getMetrics().historyQueues.set(historyQueuesCount);
} | java | {
"resource": ""
} |
q161183 | ServerHistory.cleanUpHistoryTree | train | private void cleanUpHistoryTree(HistoryNode node) {
if (node == null || node.children == null) {
return;
}
Iterator<HistoryNode> iterator = node.children.iterator();
while (iterator.hasNext()) {
HistoryNode child = iterator.next();
// clean up child
cleanUpHistoryTree(child);
// clean up current node;
if (shouldRemoveNode(child)) {
iterator.remove();
}
}
} | java | {
"resource": ""
} |
q161184 | ServerHistory.shouldRemoveNode | train | private boolean shouldRemoveNode(HistoryNode node) {
if (node == null) {
return true;
}
int sizeOfChildren = 0;
if (node.children != null) {
sizeOfChildren = node.children.size();
}
if (sizeOfChildren > 0) {
return false;
}
int sizeOfNotifications = 0;
if (node.notifications != null) {
for (List<HistoryTreeEntry> notiList : node.notifications.values()) {
if (notiList != null) {
sizeOfNotifications += notiList.size();
if (sizeOfNotifications > 0) {
return false;
}
}
}
}
return true;
} | java | {
"resource": ""
} |
q161185 | ServerHistory.storeNotification | train | @Override
public void storeNotification(NamespaceNotification notification) {
int notificationsCount = 0;
historyLock.writeLock().lock();
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Storing into history: " + NotifierUtils.asString(notification));
}
String[] paths = DFSUtil.split(notification.path, Path.SEPARATOR_CHAR);
long timestamp = System.currentTimeMillis();
HistoryTreeEntry entry = new HistoryTreeEntry(timestamp, notification.txId, notification.type);
// Store the notification
HistoryNode node = historyTree;
for (String path : paths) {
if (path.trim().length() == 0) {
continue;
}
node = node.addOrGetChild(path);
}
if (node.notifications == null) {
node.notifications = new HashMap<Byte, List<HistoryTreeEntry>>();
}
if (!node.notifications.containsKey(notification.type)) {
node.notifications.put(notification.type, new LinkedList<HistoryTreeEntry>());
}
entry.node = node;
node.notifications.get(notification.type).add(entry);
orderedHistoryList.add(entry);
notificationsCount = orderedHistoryList.size();
} finally {
historyLock.writeLock().unlock();
}
core.getMetrics().historySize.set(notificationsCount);
core.getMetrics().historyQueues.set(historyQueuesCount);
if (LOG.isDebugEnabled()) {
LOG.debug("Notification stored.");
}
} | java | {
"resource": ""
} |
q161186 | ServerHistory.addNotificationsToQueue | train | @Override
public void addNotificationsToQueue(NamespaceEvent event, long txId,
Queue<NamespaceNotification> notifications)
throws TransactionIdTooOldException {
if (LOG.isDebugEnabled()) {
LOG.debug("Got addNotificationsToQueue for: " +
NotifierUtils.asString(event) + " and txId: " + txId);
}
historyLock.readLock().lock();
try {
if (orderedHistoryList == null || orderedHistoryList.size() == 0) {
throw new TransactionIdTooOldException("No data in history.");
}
if (orderedHistoryList.get(0).txnId > txId ||
orderedHistoryList.get(orderedHistoryList.size() - 1).txnId < txId) {
throw new TransactionIdTooOldException("No data in history for txId " + txId);
}
int index = Collections.binarySearch(orderedHistoryList, new HistoryTreeEntry(0, txId, event.type),
comparatorByID);
if (index < 0) {
// If we got here, there are 2 possibilities:
// * The client gave us a bad transaction id.
// * We missed one (or more) transaction(s)
LOG.error("Potential corrupt history. Got request for: " +
NotifierUtils.asString(event) + " and txId: " + txId);
throw new TransactionIdTooOldException(
"Potentially corrupt server history");
}
String dirFormatPath = event.path;
if (!dirFormatPath.endsWith(Path.SEPARATOR)) {
dirFormatPath += Path.SEPARATOR;
}
for (int i = index + 1; i < orderedHistoryList.size(); i++) {
HistoryTreeEntry entry = orderedHistoryList.get(i);
if (event.type != entry.type) {
continue;
}
String entryPath = entry.getFullPath();
if (entryPath.startsWith(dirFormatPath)) {
notifications.add(new NamespaceNotification(entryPath, entry.type, entry.txnId));
}
}
} finally {
historyLock.readLock().unlock();
}
} | java | {
"resource": ""
} |
q161187 | OfflineImageDecompressor.printProgress | train | private void printProgress(long read, long size) {
int progress = Math.min(100, (int) ((100 * read) / size));
if (progress > lastProgress) {
lastProgress = progress;
System.out.println("Completed " + lastProgress + " % ");
}
} | java | {
"resource": ""
} |
q161188 | MultipleOutputs.checkNamedOutput | train | private static void checkNamedOutput(JobConf conf, String namedOutput,
boolean alreadyDefined) {
List<String> definedChannels = getNamedOutputsList(conf);
if (alreadyDefined && definedChannels.contains(namedOutput)) {
throw new IllegalArgumentException("Named output '" + namedOutput +
"' already alreadyDefined");
} else if (!alreadyDefined && !definedChannels.contains(namedOutput)) {
throw new IllegalArgumentException("Named output '" + namedOutput +
"' not defined");
}
} | java | {
"resource": ""
} |
q161189 | MultipleOutputs.checkTokenName | train | private static void checkTokenName(String namedOutput) {
if (namedOutput == null || namedOutput.length() == 0) {
throw new IllegalArgumentException(
"Name cannot be NULL or emtpy");
}
for (char ch : namedOutput.toCharArray()) {
if ((ch >= 'A') && (ch <= 'Z')) {
continue;
}
if ((ch >= 'a') && (ch <= 'z')) {
continue;
}
if ((ch >= '0') && (ch <= '9')) {
continue;
}
throw new IllegalArgumentException(
"Name cannot be have a '" + ch + "' char");
}
} | java | {
"resource": ""
} |
q161190 | MultipleOutputs.isMultiNamedOutput | train | public static boolean isMultiNamedOutput(JobConf conf, String namedOutput) {
checkNamedOutput(conf, namedOutput, false);
return conf.getBoolean(MO_PREFIX + namedOutput + MULTI, false);
} | java | {
"resource": ""
} |
q161191 | HostsFileReader.isAllowedHost | train | public synchronized boolean isAllowedHost(String host) {
boolean isIncluded = includes.isEmpty() || includes.contains(host);
boolean isExcluded = excludes.contains(host);
return isIncluded && !isExcluded;
} | java | {
"resource": ""
} |
q161192 | Counter.readFields | train | @Override
public synchronized void readFields(DataInput in) throws IOException {
name = CounterNames.intern(Text.readString(in));
if (in.readBoolean()) {
displayName = CounterNames.intern(Text.readString(in));
} else {
displayName = name;
}
value = WritableUtils.readVLong(in);
} | java | {
"resource": ""
} |
q161193 | Counter.write | train | @Override
public synchronized void write(DataOutput out) throws IOException {
Text.writeString(out, name);
boolean distinctDisplayName = ! name.equals(displayName);
out.writeBoolean(distinctDisplayName);
if (distinctDisplayName) {
Text.writeString(out, displayName);
}
WritableUtils.writeVLong(out, value);
} | java | {
"resource": ""
} |
q161194 | FilePool.getInputFiles | train | public long getInputFiles(long minSize, Collection<FileStatus> files)
throws IOException {
updateLock.readLock().lock();
try {
return root.selectFiles(minSize, files);
} finally {
updateLock.readLock().unlock();
}
} | java | {
"resource": ""
} |
q161195 | FilePool.locationsFor | train | public BlockLocation[] locationsFor(FileStatus stat, long start, long len)
throws IOException {
// TODO cache
return fs.getFileBlockLocations(stat, start, len);
} | java | {
"resource": ""
} |
q161196 | RunJar.unJar | train | public static void unJar(File jarFile, File toDir) throws IOException {
JarFile jar = new JarFile(jarFile);
try {
Enumeration entries = jar.entries();
while (entries.hasMoreElements()) {
JarEntry entry = (JarEntry)entries.nextElement();
if (!entry.isDirectory()) {
InputStream in = jar.getInputStream(entry);
try {
File file = new File(toDir, entry.getName());
if (!file.getParentFile().mkdirs()) {
if (!file.getParentFile().isDirectory()) {
throw new IOException("Mkdirs failed to create " +
file.getParentFile().toString());
}
}
OutputStream out = new FileOutputStream(file);
try {
byte[] buffer = new byte[8192];
int i;
while ((i = in.read(buffer)) != -1) {
out.write(buffer, 0, i);
}
} finally {
out.close();
}
} finally {
in.close();
}
}
}
} finally {
jar.close();
}
} | java | {
"resource": ""
} |
q161197 | JournalNodeRpcServer.init | train | public static void init() {
try {
FastProtocolRegister.register(FastProtocolId.SERIAL_VERSION_ID_1,
QJournalProtocol.class.getMethod("journal",
JournalRequestInfo.class));
} catch (Exception e) {
throw new RuntimeException(e);
}
} | java | {
"resource": ""
} |
q161198 | HardLink.createHardLink | train | public static void createHardLink(File file, File linkName)
throws IOException {
if (file == null) {
throw new IOException(
"invalid arguments to createHardLink: source file is null");
}
if (linkName == null) {
throw new IOException(
"invalid arguments to createHardLink: link name is null");
}
// construct and execute shell command
String[] hardLinkCommand = getHardLinkCommand.linkOne(file, linkName);
Process process = Runtime.getRuntime().exec(hardLinkCommand);
try {
if (process.waitFor() != 0) {
String errMsg = new BufferedReader(new InputStreamReader(
process.getInputStream())).readLine();
if (errMsg == null) errMsg = "";
String inpMsg = new BufferedReader(new InputStreamReader(
process.getErrorStream())).readLine();
if (inpMsg == null) inpMsg = "";
throw new IOException(errMsg + inpMsg);
}
} catch (InterruptedException e) {
throw new IOException(e);
} finally {
process.destroy();
}
} | java | {
"resource": ""
} |
q161199 | HardLink.getLinkCount | train | public static int getLinkCount(File fileName) throws IOException {
if (fileName == null) {
throw new IOException(
"invalid argument to getLinkCount: file name is null");
}
if (!fileName.exists()) {
throw new FileNotFoundException(fileName + " not found.");
}
// construct and execute shell command
String[] cmd = getHardLinkCommand.linkCount(fileName);
String inpMsg = null;
String errMsg = null;
int exitValue = -1;
BufferedReader in = null;
BufferedReader err = null;
Process process = Runtime.getRuntime().exec(cmd);
try {
exitValue = process.waitFor();
in = new BufferedReader(new InputStreamReader(
process.getInputStream()));
inpMsg = in.readLine();
err = new BufferedReader(new InputStreamReader(
process.getErrorStream()));
errMsg = err.readLine();
if (inpMsg == null || exitValue != 0) {
throw createIOException(fileName, inpMsg, errMsg, exitValue, null);
}
if (osType == OSType.OS_TYPE_SOLARIS) {
String[] result = inpMsg.split("\\s+");
return Integer.parseInt(result[1]);
} else {
return Integer.parseInt(inpMsg);
}
} catch (NumberFormatException e) {
throw createIOException(fileName, inpMsg, errMsg, exitValue, e);
} catch (InterruptedException e) {
throw createIOException(fileName, inpMsg, errMsg, exitValue, e);
} finally {
process.destroy();
if (in != null) in.close();
if (err != null) err.close();
}
} | java | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.