_id stringlengths 2 7 | title stringlengths 3 140 | partition stringclasses 3
values | text stringlengths 73 34.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q161200 | DfsServlet.createRedirectUri | train | protected URI createRedirectUri(String servletpath, UserGroupInformation ugi,
DatanodeID host, HttpServletRequest request, NameNode nn) throws URISyntaxException {
final String hostname = host instanceof DatanodeInfo?
((DatanodeInfo)host).getHostName(): host.getHost();
final String scheme = request.getScheme();
final int port = "https".equals(scheme)?
(Integer)getServletContext().getAttribute("datanode.https.port")
: host.getInfoPort();
// Add namenode address to the URL params
final String nnAddr = NetUtils.toIpPort(nn.getNameNodeAddress());
final String filename = request.getPathInfo();
return new URI(scheme, null, hostname, port, servletpath,
"filename=" + filename + "&ugi=" + ugi +
JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr), null);
} | java | {
"resource": ""
} |
q161201 | DfsServlet.getFilename | train | protected String getFilename(HttpServletRequest request,
HttpServletResponse response) throws IOException {
final String filename = request.getParameter("filename");
if (filename == null || filename.length() == 0) {
throw new IOException("Invalid filename");
}
return filename;
} | java | {
"resource": ""
} |
q161202 | DfsServlet.createUri | train | protected URI createUri(String file,
DatanodeID[] candidates, UnixUserGroupInformation ugi,
HttpServletRequest request) throws URISyntaxException {
String scheme = request.getScheme();
final DatanodeID host = candidates[0];
final String hostname;
if (host instanceof DatanodeInfo) {
hostname = ((DatanodeInfo)host).getHostName();
} else {
hostname = host.getHost();
}
// Construct query.
StringBuilder builder = new StringBuilder();
builder.append("ugi=" + ugi);
// Populate the rest of parameters.
Enumeration<?> it = request.getParameterNames();
while (it.hasMoreElements()) {
String key = it.nextElement().toString();
String value = request.getParameter(key);
builder.append("&" + key + "=" + value);
}
// Construct the possible candidates for retry
if (candidates.length > 1) {
builder.append("&candidates=");
appendDatanodeID(builder, candidates[1]);
for (int j=2; j<candidates.length; j++) {
builder.append(" ");
appendDatanodeID(builder, candidates[j]);
}
}
// Add namenode address to the url params
NameNode nn = (NameNode)getServletContext().getAttribute("name.node");
String addr = NetUtils.toIpPort(nn.getNameNodeAddress());
builder.append(JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr));
return new URI(scheme, null, hostname,
"https".equals(scheme)
? (Integer)getServletContext().getAttribute("datanode.https.port")
: host.getInfoPort(),
"/streamFile" + file, builder.toString(), null);
} | java | {
"resource": ""
} |
q161203 | FailoverClientHandler.handleFailure | train | void handleFailure(IOException ex, int failures)
throws IOException {
// Check if the exception was thrown by the network stack
if (failoverClient.isShuttingdown() || !shouldHandleException(ex)) {
throw ex;
}
if (failures > FAILURE_RETRY) {
throw ex;
}
try {
// This might've happened because we are failing over
if (!watchZK) {
LOG.debug("Not watching ZK, so checking explicitly");
// Check with zookeeper
fsLock.readLock().unlock();
InjectionHandler.processEvent(InjectionEvent.DAFS_CHECK_FAILOVER);
fsLock.writeLock().lock();
boolean failover = false;
try {
failover = zkCheckFailover(ex);
} finally {
fsLock.writeLock().unlock();
fsLock.readLock().lock();
}
if (failover) {
return;
}
}
Thread.sleep(1000);
} catch (InterruptedException iex) {
LOG.error("Interrupted while waiting for a failover", iex);
Thread.currentThread().interrupt();
}
} | java | {
"resource": ""
} |
q161204 | PosixPathNameChecker.isValidPosixFileChar | train | public boolean isValidPosixFileChar(char c) {
if ((((c >= 'A') && (c <= 'Z')) || ((c >= 'a') && (c <= 'z')) ||
((c >= '0') && (c <= '9'))
|| (c == '.') || (c == '_') || (c == '-'))) {
return true;
} else {
return false;
}
} | java | {
"resource": ""
} |
q161205 | PosixPathNameChecker.isValidPosixFileName | train | public boolean isValidPosixFileName(String name) {
for (int i = 0; i < name.length(); i++) {
char c = name.charAt(i);
if (i == 0) {
if (c == '-') {
return false;
}
}
if (!isValidPosixFileChar(c)) {
return false;
}
}
return true;
} | java | {
"resource": ""
} |
q161206 | LoggedJob.compareStrings | train | private void compareStrings(List<String> c1, List<String> c2, TreePath loc,
String eltname) throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
TreePath recursePath = new TreePath(loc, eltname);
if (c1 == null || c2 == null || !c1.equals(c2)) {
throw new DeepInequalityException(eltname + " miscompared", recursePath);
}
} | java | {
"resource": ""
} |
q161207 | TextWriterImageVisitor.rollIfNeeded | train | void rollIfNeeded() throws IOException {
if (numberOfParts == 1 || numberOfFiles < 1) {
return;
}
filesCount++;
if (filesCount % filesPerRoll == 0) {
out.close();
currentPart++;
createOutputStream();
}
} | java | {
"resource": ""
} |
q161208 | SessionSchedulable.adjustLocalityRequirement | train | public void adjustLocalityRequirement(
long now, long nodeWait, long rackWait) {
if (!localityWaitStarted) {
return;
}
if (localityRequired == LocalityLevel.ANY) {
return;
}
if (localityRequired == LocalityLevel.NODE) {
if (now - localityWaitStartTime > nodeWait) {
setLocalityLevel(LocalityLevel.RACK);
}
}
if (localityRequired == LocalityLevel.RACK) {
if (now - localityWaitStartTime > rackWait) {
setLocalityLevel(LocalityLevel.ANY);
}
}
} | java | {
"resource": ""
} |
q161209 | SessionSchedulable.setLocalityLevel | train | public void setLocalityLevel(LocalityLevel level) {
localityRequired = level;
lastLocality = level;
localityWaitStarted = false;
localityWaitStartTime = Long.MAX_VALUE;
} | java | {
"resource": ""
} |
q161210 | UnixUserGroupInformation.readFields | train | public void readFields(DataInput in) throws IOException {
// read UGI type first
String ugiType = Text.readString(in);
if (!UGI_TECHNOLOGY.equals(ugiType)) {
throw new IOException("Expect UGI prefix: " + UGI_TECHNOLOGY +
", but receive a prefix: " + ugiType);
}
// read this object
userName = Text.readString(in);
int numOfGroups = WritableUtils.readVInt(in);
groupNames = new String[numOfGroups];
for (int i = 0; i < numOfGroups; i++) {
groupNames[i] = Text.readString(in);
}
} | java | {
"resource": ""
} |
q161211 | UnixUserGroupInformation.write | train | public void write(DataOutput out) throws IOException {
// write a prefix indicating the type of UGI being written
Text.writeString(out, UGI_TECHNOLOGY);
// write this object
Text.writeString(out, userName);
WritableUtils.writeVInt(out, groupNames.length);
for (String groupName : groupNames) {
Text.writeString(out, groupName);
}
} | java | {
"resource": ""
} |
q161212 | UnixUserGroupInformation.login | train | public static UnixUserGroupInformation login() throws LoginException {
try {
String userName;
// if an exception occurs, then uses the
// default user
try {
userName = getUnixUserName();
} catch (Exception e) {
userName = DEFAULT_USERNAME;
}
// check if this user already has a UGI object in the ugi map
UnixUserGroupInformation ugi = user2UGIMap.get(userName);
if (ugi != null) {
return ugi;
}
/* get groups list from UNIX.
* It's assumed that the first group is the default group.
*/
String[] groupNames;
// if an exception occurs, then uses the
// default group
try {
groupNames = getUnixGroups();
} catch (Exception e) {
groupNames = new String[1];
groupNames[0] = DEFAULT_GROUP;
}
// construct a Unix UGI
ugi = new UnixUserGroupInformation(userName, groupNames);
user2UGIMap.put(ugi.getUserName(), ugi);
return ugi;
} catch (Exception e) {
throw new LoginException("Login failed: "+e.getMessage());
}
} | java | {
"resource": ""
} |
q161213 | UnixUserGroupInformation.login | train | public static UnixUserGroupInformation login(Configuration conf, boolean save
) throws LoginException {
UnixUserGroupInformation ugi = readFromConf(conf, UGI_PROPERTY_NAME);
if (ugi == null) {
ugi = login();
LOG.debug("Unix Login: " + ugi);
if (save) {
saveToConf(conf, UGI_PROPERTY_NAME, ugi);
}
}
return ugi;
} | java | {
"resource": ""
} |
q161214 | RecordTypeInfo.addField | train | public void addField(String fieldName, TypeID tid) {
sTid.getFieldTypeInfos().add(new FieldTypeInfo(fieldName, tid));
} | java | {
"resource": ""
} |
q161215 | RecordTypeInfo.getNestedStructTypeInfo | train | public RecordTypeInfo getNestedStructTypeInfo(String name) {
StructTypeID stid = sTid.findStruct(name);
if (null == stid) return null;
return new RecordTypeInfo(name, stid);
} | java | {
"resource": ""
} |
q161216 | RecordTypeInfo.serialize | train | public void serialize(RecordOutput rout, String tag) throws IOException {
// write out any header, version info, here
rout.startRecord(this, tag);
rout.writeString(name, tag);
sTid.writeRest(rout, tag);
rout.endRecord(this, tag);
} | java | {
"resource": ""
} |
q161217 | RecordTypeInfo.deserialize | train | public void deserialize(RecordInput rin, String tag) throws IOException {
// read in any header, version info
rin.startRecord(tag);
// name
this.name = rin.readString(tag);
sTid.read(rin, tag);
rin.endRecord(tag);
} | java | {
"resource": ""
} |
q161218 | GangliaContext.xdr_string | train | private void xdr_string(String s) {
byte[] bytes = s.getBytes();
int len = bytes.length;
xdr_int(len);
System.arraycopy(bytes, 0, buffer, offset, len);
offset += len;
pad();
} | java | {
"resource": ""
} |
q161219 | GangliaContext.xdr_int | train | private void xdr_int(int i) {
buffer[offset++] = (byte)((i >> 24) & 0xff);
buffer[offset++] = (byte)((i >> 16) & 0xff);
buffer[offset++] = (byte)((i >> 8) & 0xff);
buffer[offset++] = (byte)(i & 0xff);
} | java | {
"resource": ""
} |
q161220 | NamespaceMap.getBlockCrcPerVolume | train | Map<FSVolume, List<Map<Block, DatanodeBlockInfo>>> getBlockCrcPerVolume(
List<FSVolume> volumes) {
Map<FSVolume, List<Map<Block, DatanodeBlockInfo>>> retMap =
new HashMap<FSVolume, List<Map<Block, DatanodeBlockInfo>>>();
for (FSVolume volume : volumes) {
List<Map<Block, DatanodeBlockInfo>> newSubMap = new ArrayList<Map<Block, DatanodeBlockInfo>>(
numBucket);
for (int i = 0; i < numBucket; i++) {
newSubMap.add(new HashMap<Block, DatanodeBlockInfo>());
}
retMap.put(volume, newSubMap);
}
for (BlockBucket bb : blockBuckets) {
bb.getBlockCrcPerVolume(retMap);
}
return retMap;
} | java | {
"resource": ""
} |
q161221 | CompositeRecordReader.key | train | public K key() {
if (jc.hasNext()) {
return jc.key();
}
if (!q.isEmpty()) {
return q.peek().key();
}
return null;
} | java | {
"resource": ""
} |
q161222 | CompositeRecordReader.skip | train | public void skip(K key) throws IOException {
ArrayList<ComposableRecordReader<K,?>> tmp =
new ArrayList<ComposableRecordReader<K,?>>();
while (!q.isEmpty() && cmp.compare(q.peek().key(), key) <= 0) {
tmp.add(q.poll());
}
for (ComposableRecordReader<K,?> rr : tmp) {
rr.skip(key);
if (rr.hasNext()) {
q.add(rr);
}
}
} | java | {
"resource": ""
} |
q161223 | CompositeRecordReader.accept | train | @SuppressWarnings("unchecked") // No values from static EMPTY class
public void accept(CompositeRecordReader.JoinCollector jc, K key)
throws IOException {
if (hasNext() && 0 == cmp.compare(key, key())) {
fillJoinCollector(createKey());
jc.add(id, getDelegate());
return;
}
jc.add(id, EMPTY);
} | java | {
"resource": ""
} |
q161224 | CompositeRecordReader.fillJoinCollector | train | protected void fillJoinCollector(K iterkey) throws IOException {
if (!q.isEmpty()) {
q.peek().key(iterkey);
while (0 == cmp.compare(q.peek().key(), iterkey)) {
ComposableRecordReader<K,?> t = q.poll();
t.accept(jc, iterkey);
if (t.hasNext()) {
q.add(t);
} else if (q.isEmpty()) {
return;
}
}
}
} | java | {
"resource": ""
} |
q161225 | CompositeRecordReader.createKey | train | @SuppressWarnings("unchecked") // Explicit check for key class agreement
public K createKey() {
if (null == keyclass) {
final Class<?> cls = kids[0].createKey().getClass();
for (RecordReader<K,? extends Writable> rr : kids) {
if (!cls.equals(rr.createKey().getClass())) {
throw new ClassCastException("Child key classes fail to agree");
}
}
keyclass = cls.asSubclass(WritableComparable.class);
}
return (K) ReflectionUtils.newInstance(keyclass, getConf());
} | java | {
"resource": ""
} |
q161226 | CompositeRecordReader.createInternalValue | train | protected TupleWritable createInternalValue() {
Writable[] vals = new Writable[kids.length];
for (int i = 0; i < vals.length; ++i) {
vals[i] = kids[i].createValue();
}
return new TupleWritable(vals);
} | java | {
"resource": ""
} |
q161227 | CompositeRecordReader.close | train | public void close() throws IOException {
if (kids != null) {
for (RecordReader<K,? extends Writable> rr : kids) {
rr.close();
}
}
if (jc != null) {
jc.close();
}
} | java | {
"resource": ""
} |
q161228 | CompositeRecordReader.getProgress | train | public float getProgress() throws IOException {
float ret = 1.0f;
for (RecordReader<K,? extends Writable> rr : kids) {
ret = Math.min(ret, rr.getProgress());
}
return ret;
} | java | {
"resource": ""
} |
q161229 | DBConfiguration.getConnection | train | Connection getConnection() throws ClassNotFoundException, SQLException{
Class.forName(job.get(DBConfiguration.DRIVER_CLASS_PROPERTY));
if(job.get(DBConfiguration.USERNAME_PROPERTY) == null) {
return DriverManager.getConnection(job.get(DBConfiguration.URL_PROPERTY));
} else {
return DriverManager.getConnection(
job.get(DBConfiguration.URL_PROPERTY),
job.get(DBConfiguration.USERNAME_PROPERTY),
job.get(DBConfiguration.PASSWORD_PROPERTY));
}
} | java | {
"resource": ""
} |
q161230 | FastWritableRegister.tryGetInstance | train | public static FastWritable tryGetInstance(String name, Configuration conf) {
if (name.length() != NAME_LEN) {
// we use it to fast discard the request without doing map lookup
return null;
}
FastWritable fw = register.get(name);
return fw == null ? null : fw.getFastWritableInstance(conf);
} | java | {
"resource": ""
} |
q161231 | HadoopLocationWizard.performFinish | train | public HadoopServer performFinish() {
try {
if (this.original == null) {
// New location
Display.getDefault().syncExec(new Runnable() {
public void run() {
ServerRegistry.getInstance().addServer(
HadoopLocationWizard.this.location);
}
});
return this.location;
} else {
// Update location
final String originalName = this.original.getLocationName();
this.original.load(this.location);
Display.getDefault().syncExec(new Runnable() {
public void run() {
ServerRegistry.getInstance().updateServer(originalName,
HadoopLocationWizard.this.location);
}
});
return this.original;
}
} catch (Exception e) {
e.printStackTrace();
setMessage("Invalid server location values", IMessageProvider.ERROR);
return null;
}
} | java | {
"resource": ""
} |
q161232 | HadoopLocationWizard.createConfNameEditor | train | private Text createConfNameEditor(ModifyListener listener,
Composite parent, String propName, String labelText) {
{
ConfProp prop = ConfProp.getByName(propName);
if (prop != null)
return createConfLabelText(listener, parent, prop, labelText);
}
Label label = new Label(parent, SWT.NONE);
if (labelText == null)
labelText = propName;
label.setText(labelText);
Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
GridData data = new GridData(GridData.FILL_HORIZONTAL);
text.setLayoutData(data);
text.setData("hPropName", propName);
text.setText(location.getConfProp(propName));
text.addModifyListener(listener);
return text;
} | java | {
"resource": ""
} |
q161233 | XMLUtils.transform | train | public static void transform(
InputStream styleSheet, InputStream xml, Writer out
)
throws TransformerConfigurationException, TransformerException {
// Instantiate a TransformerFactory
TransformerFactory tFactory = TransformerFactory.newInstance();
// Use the TransformerFactory to process the
// stylesheet and generate a Transformer
Transformer transformer = tFactory.newTransformer(
new StreamSource(styleSheet)
);
// Use the Transformer to transform an XML Source
// and send the output to a Result object.
transformer.transform(new StreamSource(xml), new StreamResult(out));
} | java | {
"resource": ""
} |
q161234 | NLineInputFormat.getSplits | train | public InputSplit[] getSplits(JobConf job, int numSplits)
throws IOException {
ArrayList<FileSplit> splits = new ArrayList<FileSplit>();
for (FileStatus status : listLocatedStatus(job)) {
Path fileName = status.getPath();
if (status.isDir()) {
throw new IOException("Not a file: " + fileName);
}
FileSystem fs = fileName.getFileSystem(job);
LineReader lr = null;
try {
FSDataInputStream in = fs.open(fileName);
lr = new LineReader(in, job);
Text line = new Text();
int numLines = 0;
long begin = 0;
long length = 0;
int num = -1;
while ((num = lr.readLine(line)) > 0) {
numLines++;
length += num;
if (numLines == N) {
splits.add(new FileSplit(fileName, begin, length, new String[]{}));
begin += length;
length = 0;
numLines = 0;
}
}
if (numLines != 0) {
splits.add(new FileSplit(fileName, begin, length, new String[]{}));
}
} finally {
if (lr != null) {
lr.close();
}
}
}
return splits.toArray(new FileSplit[splits.size()]);
} | java | {
"resource": ""
} |
q161235 | FileInputFormat.getSplits | train | public List<InputSplit> getSplits(JobContext job
) throws IOException {
long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
long maxSize = getMaxSplitSize(job);
// generate splits
List<InputSplit> splits = new ArrayList<InputSplit>();
for (LocatedFileStatus file: listLocatedStatus(job)) {
Path path = file.getPath();
long length = file.getLen();
BlockLocation[] blkLocations = file.getBlockLocations();
if ((length != 0) && isSplitable(job, path)) {
long blockSize = file.getBlockSize();
long splitSize = computeSplitSize(blockSize, minSize, maxSize);
long bytesRemaining = length;
while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
splits.add(new FileSplit(path, length-bytesRemaining, splitSize,
blkLocations[blkIndex].getHosts()));
bytesRemaining -= splitSize;
}
if (bytesRemaining != 0) {
splits.add(new FileSplit(path, length-bytesRemaining, bytesRemaining,
blkLocations[blkLocations.length-1].getHosts()));
}
} else if (length != 0) {
splits.add(new FileSplit(path, 0, length, blkLocations[0].getHosts()));
} else {
//Create empty hosts array for zero length files
splits.add(new FileSplit(path, 0, length, new String[0]));
}
}
LOG.debug("Total # of splits: " + splits.size());
return splits;
} | java | {
"resource": ""
} |
q161236 | TrashPolicy.getInstance | train | public static TrashPolicy getInstance(Configuration conf, FileSystem fs, Path home)
throws IOException {
Class<? extends TrashPolicy> trashClass = conf.getClass("fs.trash.classname",
TrashPolicyDefault.class,
TrashPolicy.class);
TrashPolicy trash = (TrashPolicy) ReflectionUtils.newInstance(trashClass, conf);
trash.initialize(conf, fs, home); // initialize TrashPolicy
return trash;
} | java | {
"resource": ""
} |
q161237 | Lz4Compressor.reset | train | @Override
public synchronized void reset() {
finish = false;
finished = false;
uncompressedDirectBuf.clear();
uncompressedDirectBufLen = 0;
compressedDirectBuf.clear();
compressedDirectBuf.limit(0);
userBufOff = userBufLen = 0;
bytesRead = bytesWritten = 0L;
} | java | {
"resource": ""
} |
q161238 | CoronaJobHistory.logSubmitted | train | public void logSubmitted(String jobConfPath, long submitTime, String jobTrackerId)
throws IOException {
if (disableHistory) {
return;
}
// create output stream for logging in hadoop.job.history.location
int defaultBufferSize =
logDirFs.getConf().getInt("io.file.buffer.size", 4096);
try {
FSDataOutputStream out = null;
PrintWriter writer = null;
// In case the old JT is still running, but we can't connect to it, we
// should ensure that it won't write to our (new JT's) job history file.
if (logDirFs.exists(logFile)) {
LOG.info("Remove the old history file " + logFile);
logDirFs.delete(logFile, true);
}
out = logDirFs.create(logFile,
new FsPermission(HISTORY_FILE_PERMISSION),
true,
defaultBufferSize,
logDirFs.getDefaultReplication(),
jobHistoryBlockSize, null);
writer = new PrintWriter(out);
fileManager.addWriter(jobId, writer);
// cache it ...
fileManager.setHistoryFile(jobId, logFile);
writers = fileManager.getWriters(jobId);
if (null != writers) {
log(writers, RecordTypes.Meta,
new Keys[] {Keys.VERSION},
new String[] {String.valueOf(JobHistory.VERSION)});
}
String jobName = getJobName();
String user = getUserName();
//add to writer as well
log(writers, RecordTypes.Job,
new Keys[]{Keys.JOBID, Keys.JOBNAME, Keys.USER,
Keys.SUBMIT_TIME, Keys.JOBCONF, Keys.JOBTRACKERID },
new String[]{jobId.toString(), jobName, user,
String.valueOf(submitTime) , jobConfPath, jobTrackerId}
);
} catch (IOException e) {
// Disable history if we have errors other than in the user log.
disableHistory = true;
}
/* Storing the job conf on the log dir */
Path jobFilePath = new Path(logDir,
CoronaJobHistoryFilesManager.getConfFilename(jobId));
fileManager.setConfFile(jobId, jobFilePath);
FSDataOutputStream jobFileOut = null;
try {
if (!logDirFs.exists(jobFilePath)) {
jobFileOut = logDirFs.create(jobFilePath,
new FsPermission(HISTORY_FILE_PERMISSION),
true,
defaultBufferSize,
logDirFs.getDefaultReplication(),
logDirFs.getDefaultBlockSize(), null);
conf.writeXml(jobFileOut);
jobFileOut.close();
}
} catch (IOException ioe) {
LOG.error("Failed to store job conf in the log dir", ioe);
} finally {
if (jobFileOut != null) {
try {
jobFileOut.close();
} catch (IOException ie) {
LOG.info("Failed to close the job configuration file " +
StringUtils.stringifyException(ie));
}
}
}
} | java | {
"resource": ""
} |
q161239 | CoronaJobHistory.logInited | train | public void logInited(long startTime, int totalMaps, int totalReduces) {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID, Keys.LAUNCH_TIME, Keys.TOTAL_MAPS,
Keys.TOTAL_REDUCES, Keys.JOB_STATUS},
new String[] {jobId.toString(), String.valueOf(startTime),
String.valueOf(totalMaps),
String.valueOf(totalReduces),
Values.PREP.name()});
}
} | java | {
"resource": ""
} |
q161240 | CoronaJobHistory.logStarted | train | public void logStarted() {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID, Keys.JOB_STATUS},
new String[] {jobId.toString(),
Values.RUNNING.name()});
}
} | java | {
"resource": ""
} |
q161241 | CoronaJobHistory.logFinished | train | public void logFinished(long finishTime,
int finishedMaps, int finishedReduces,
int failedMaps, int failedReduces,
int killedMaps, int killedReduces,
Counters mapCounters,
Counters reduceCounters,
Counters counters) {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID, Keys.FINISH_TIME,
Keys.JOB_STATUS, Keys.FINISHED_MAPS,
Keys.FINISHED_REDUCES,
Keys.FAILED_MAPS, Keys.FAILED_REDUCES,
Keys.KILLED_MAPS, Keys.KILLED_REDUCES,
Keys.MAP_COUNTERS, Keys.REDUCE_COUNTERS,
Keys.COUNTERS},
new String[] {jobId.toString(), Long.toString(finishTime),
Values.SUCCESS.name(),
String.valueOf(finishedMaps),
String.valueOf(finishedReduces),
String.valueOf(failedMaps),
String.valueOf(failedReduces),
String.valueOf(killedMaps),
String.valueOf(killedReduces),
mapCounters.makeEscapedCompactString(),
reduceCounters.makeEscapedCompactString(),
counters.makeEscapedCompactString()},
true);
closeAndClear(writers);
}
// NOTE: history cleaning stuff deleted from here. We should do that
// somewhere else!
} | java | {
"resource": ""
} |
q161242 | CoronaJobHistory.logFailed | train | public void logFailed(long timestamp, int finishedMaps,
int finishedReduces, Counters counters) {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID, Keys.FINISH_TIME,
Keys.JOB_STATUS, Keys.FINISHED_MAPS,
Keys.FINISHED_REDUCES, Keys.COUNTERS},
new String[] {jobId.toString(),
String.valueOf(timestamp),
Values.FAILED.name(),
String.valueOf(finishedMaps),
String.valueOf(finishedReduces),
counters.makeEscapedCompactString()},
true);
closeAndClear(writers);
}
} | java | {
"resource": ""
} |
q161243 | CoronaJobHistory.logJobPriority | train | public void logJobPriority(JobID jobid, JobPriority priority) {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID, Keys.JOB_PRIORITY},
new String[] {jobId.toString(), priority.toString()});
}
} | java | {
"resource": ""
} |
q161244 | CoronaJobHistory.markCompleted | train | public void markCompleted() throws IOException {
if (disableHistory) {
return;
}
fileManager.moveToDone(jobId, true, CoronaJobTracker.getMainJobID(jobId));
} | java | {
"resource": ""
} |
q161245 | CoronaJobHistory.logTaskFinished | train | public void logTaskFinished(TaskID taskId, String taskType,
long finishTime, Counters counters) {
if (disableHistory) {
return;
}
JobID id = taskId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.Task,
new Keys[]{Keys.TASKID, Keys.TASK_TYPE,
Keys.TASK_STATUS, Keys.FINISH_TIME,
Keys.COUNTERS},
new String[]{ taskId.toString(), taskType, Values.SUCCESS.name(),
String.valueOf(finishTime),
counters.makeEscapedCompactString()});
}
} | java | {
"resource": ""
} |
q161246 | CoronaJobHistory.logTaskUpdates | train | public void logTaskUpdates(TaskID taskId, long finishTime) {
if (disableHistory) {
return;
}
JobID id = taskId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.Task,
new Keys[]{Keys.TASKID, Keys.FINISH_TIME},
new String[]{ taskId.toString(),
String.valueOf(finishTime)});
}
} | java | {
"resource": ""
} |
q161247 | CoronaJobHistory.logTaskFailed | train | public void logTaskFailed(TaskID taskId, String taskType, long time,
String error) {
logTaskFailed(taskId, taskType, time, error, null);
} | java | {
"resource": ""
} |
q161248 | CoronaJobHistory.logTaskFailed | train | public void logTaskFailed(TaskID taskId, String taskType, long time,
String error,
TaskAttemptID failedDueToAttempt) {
if (disableHistory) {
return;
}
JobID id = taskId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
String failedAttempt = failedDueToAttempt == null ?
"" :
failedDueToAttempt.toString();
log(writers, RecordTypes.Task,
new Keys[]{Keys.TASKID, Keys.TASK_TYPE,
Keys.TASK_STATUS, Keys.FINISH_TIME,
Keys.ERROR, Keys.TASK_ATTEMPT_ID},
new String[]{ taskId.toString(), taskType,
Values.FAILED.name(),
String.valueOf(time) , error,
failedAttempt});
}
} | java | {
"resource": ""
} |
q161249 | CoronaJobHistory.logMapTaskStarted | train | public void logMapTaskStarted(TaskAttemptID taskAttemptId, long startTime,
String trackerName, int httpPort,
String taskType) {
if (disableHistory) {
return;
}
JobID id = taskAttemptId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.MapAttempt,
new Keys[]{ Keys.TASK_TYPE, Keys.TASKID,
Keys.TASK_ATTEMPT_ID, Keys.START_TIME,
Keys.TRACKER_NAME, Keys.HTTP_PORT},
new String[]{taskType,
taskAttemptId.getTaskID().toString(),
taskAttemptId.toString(),
String.valueOf(startTime), trackerName,
httpPort == -1 ? "" :
String.valueOf(httpPort)});
}
} | java | {
"resource": ""
} |
q161250 | CoronaJobHistory.logMapTaskFinished | train | public void logMapTaskFinished(TaskAttemptID taskAttemptId,
long finishTime,
String hostName,
String taskType,
String stateString,
Counters counter) {
if (disableHistory) {
return;
}
JobID id = taskAttemptId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.MapAttempt,
new Keys[]{ Keys.TASK_TYPE, Keys.TASKID,
Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS,
Keys.FINISH_TIME, Keys.HOSTNAME,
Keys.STATE_STRING, Keys.COUNTERS},
new String[]{taskType,
taskAttemptId.getTaskID().toString(),
taskAttemptId.toString(),
Values.SUCCESS.name(),
String.valueOf(finishTime), hostName,
stateString,
counter.makeEscapedCompactString()});
}
} | java | {
"resource": ""
} |
q161251 | CoronaJobHistory.logReduceTaskKilled | train | public void logReduceTaskKilled(TaskAttemptID taskAttemptId, long timestamp,
String hostName, String error,
String taskType) {
if (disableHistory) {
return;
}
JobID id = taskAttemptId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.ReduceAttempt,
new Keys[]{ Keys.TASK_TYPE, Keys.TASKID,
Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS,
Keys.FINISH_TIME, Keys.HOSTNAME,
Keys.ERROR },
new String[]{ taskType,
taskAttemptId.getTaskID().toString(),
taskAttemptId.toString(),
Values.KILLED.name(),
String.valueOf(timestamp),
hostName, error });
}
} | java | {
"resource": ""
} |
q161252 | ProcessTree.destroyProcess | train | protected static void destroyProcess(String pid, long sleeptimeBeforeSigkill,
boolean inBackground) {
terminateProcess(pid);
sigKill(pid, false, sleeptimeBeforeSigkill, inBackground);
} | java | {
"resource": ""
} |
q161253 | ProcessTree.destroyProcessGroup | train | protected static void destroyProcessGroup(String pgrpId,
long sleeptimeBeforeSigkill, boolean inBackground) {
terminateProcessGroup(pgrpId);
sigKill(pgrpId, true, sleeptimeBeforeSigkill, inBackground);
} | java | {
"resource": ""
} |
q161254 | ProcessTree.terminateProcess | train | public static void terminateProcess(String pid) {
ShellCommandExecutor shexec = null;
String errMsg = null;
try {
String[] args = { "kill", pid };
shexec = new ShellCommandExecutor(args);
shexec.execute();
} catch (IOException ioe) {
// Do nothing, we log the exit code in the finally block.
errMsg = ioe.getMessage();
} finally {
LOG.info("Killing process " + pid +
" with SIGTERM. Exit code " + shexec.getExitCode() +
(errMsg == null ? "" : " (" + errMsg + ")"));
}
} | java | {
"resource": ""
} |
q161255 | ProcessTree.terminateProcessGroup | train | public static void terminateProcessGroup(String pgrpId) {
ShellCommandExecutor shexec = null;
try {
String[] args = { "kill", "--", "-" + pgrpId };
shexec = new ShellCommandExecutor(args);
shexec.execute();
} catch (IOException ioe) {
LOG.warn("Error executing shell command " + ioe);
} finally {
LOG.info("Killing all processes in the process group " + pgrpId +
" with SIGTERM. Exit code " + shexec.getExitCode());
}
} | java | {
"resource": ""
} |
q161256 | ProcessTree.killProcess | train | public static void killProcess(String pid) {
//If process tree is not alive then return immediately.
if(!ProcessTree.isAlive(pid)) {
return;
}
String[] args = { "kill", "-9", pid };
ShellCommandExecutor shexec = new ShellCommandExecutor(args);
try {
shexec.execute();
} catch (IOException e) {
LOG.warn("Error sending SIGKILL to process "+ pid + " ."+
StringUtils.stringifyException(e));
} finally {
LOG.info("Killing process " + pid + " with SIGKILL. Exit code "
+ shexec.getExitCode());
}
} | java | {
"resource": ""
} |
q161257 | ProcessTree.killProcessGroup | train | public static void killProcessGroup(String pgrpId) {
//If process tree is not alive then return immediately.
if(!ProcessTree.isProcessGroupAlive(pgrpId)) {
return;
}
String[] args = { "kill", "-9", "-"+pgrpId };
ShellCommandExecutor shexec = new ShellCommandExecutor(args);
try {
shexec.execute();
} catch (IOException e) {
LOG.warn("Error sending SIGKILL to process group "+ pgrpId + " ."+
StringUtils.stringifyException(e));
} finally {
LOG.info("Killing process group" + pgrpId + " with SIGKILL. Exit code "
+ shexec.getExitCode());
}
} | java | {
"resource": ""
} |
q161258 | ProcessTree.isAlive | train | public static boolean isAlive(String pid) {
ShellCommandExecutor shexec = null;
try {
String[] args = { "kill", "-0", pid };
shexec = new ShellCommandExecutor(args);
shexec.execute();
} catch (ExitCodeException ee) {
return false;
} catch (IOException ioe) {
LOG.warn("Error executing shell command "
+ Arrays.toString(shexec.getExecString()) + ioe);
return false;
}
return (shexec.getExitCode() == 0 ? true : false);
} | java | {
"resource": ""
} |
q161259 | HarFileSystem.decodeHarURI | train | private URI decodeHarURI(URI rawURI, Configuration conf) throws IOException {
String tmpAuth = rawURI.getAuthority();
//we are using the default file
//system in the config
//so create a underlying uri and
//return it
if (tmpAuth == null) {
//create a path
return FileSystem.getDefaultUri(conf);
}
String host = rawURI.getHost();
String[] str = host.split("-", 2);
if (str[0] == null) {
throw new IOException("URI: " + rawURI + " is an invalid Har URI.");
}
String underLyingScheme = str[0];
String underLyingHost = (str.length > 1)? str[1]:null;
int underLyingPort = rawURI.getPort();
String auth = (underLyingHost == null && underLyingPort == -1)?
null:(underLyingHost+":"+underLyingPort);
URI tmp = null;
if (rawURI.getQuery() != null) {
// query component not allowed
throw new IOException("query component in Path not supported " + rawURI);
}
try {
tmp = new URI(underLyingScheme, auth, rawURI.getPath(),
rawURI.getQuery(), rawURI.getFragment());
} catch (URISyntaxException e) {
// do nothing should not happen
}
return tmp;
} | java | {
"resource": ""
} |
q161260 | HarFileSystem.getPathInHar | train | public Path getPathInHar(Path path) {
Path harPath = new Path(path.toUri().getPath());
if (archivePath.compareTo(harPath) == 0)
return new Path(Path.SEPARATOR);
Path tmp = new Path(harPath.getName());
Path parent = harPath.getParent();
while (!(parent.compareTo(archivePath) == 0)) {
if (parent.toString().equals(Path.SEPARATOR)) {
tmp = null;
break;
}
tmp = new Path(parent.getName(), tmp);
parent = parent.getParent();
}
if (tmp != null)
tmp = new Path(Path.SEPARATOR, tmp);
return tmp;
} | java | {
"resource": ""
} |
q161261 | HarFileSystem.makeRelative | train | private Path makeRelative(String initial, Path p) {
String scheme = this.uri.getScheme();
String authority = this.uri.getAuthority();
Path root = new Path(Path.SEPARATOR);
if (root.compareTo(p) == 0)
return new Path(scheme, authority, initial);
Path retPath = new Path(p.getName());
Path parent = p.getParent();
for (int i=0; i < p.depth()-1; i++) {
retPath = new Path(parent.getName(), retPath);
parent = parent.getParent();
}
return new Path(new Path(scheme, authority, initial),
retPath.toString());
} | java | {
"resource": ""
} |
q161262 | HarFileSystem.fixBlockLocations | train | static BlockLocation[] fixBlockLocations(BlockLocation[] locations,
long start,
long len,
long fileOffsetInHar) {
// offset 1 past last byte of desired range
long end = start + len;
for (BlockLocation location : locations) {
// offset of part block relative to beginning of desired file
// (may be negative if file starts in this part block)
long harBlockStart = location.getOffset() - fileOffsetInHar;
// offset 1 past last byte of har block relative to beginning of
// desired file
long harBlockEnd = harBlockStart + location.getLength();
if (start > harBlockStart) {
// desired range starts after beginning of this har block
// fix offset to beginning of relevant range (relative to desired file)
location.setOffset(start);
// fix length to relevant portion of har block
location.setLength(location.getLength() - (start - harBlockStart));
} else {
// desired range includes beginning of this har block
location.setOffset(harBlockStart);
}
if (harBlockEnd > end) {
// range ends before end of this har block
// fix length to remove irrelevant portion at the end
location.setLength(location.getLength() - (harBlockEnd - end));
}
}
return locations;
} | java | {
"resource": ""
} |
q161263 | HarFileSystem.getFileBlockLocations | train | @Override
public BlockLocation[] getFileBlockLocations(FileStatus file, long start,
long len) throws IOException {
HarStatus hstatus = getFileHarStatus(file.getPath(), null);
Path partPath = new Path(archivePath, hstatus.getPartName());
FileStatus partStatus = fs.getFileStatus(partPath);
// get all part blocks that overlap with the desired file blocks
BlockLocation[] locations =
fs.getFileBlockLocations(partStatus,
hstatus.getStartIndex() + start, len);
return fixBlockLocations(locations, start, len, hstatus.getStartIndex());
} | java | {
"resource": ""
} |
q161264 | HarFileSystem.fileStatusesInIndex | train | private void fileStatusesInIndex(HarStatus parent, List<FileStatus> statuses,
List<String> children, FileStatus archiveIndexStat) throws IOException {
// read the index file
FSDataInputStream aIn = null;
try {
aIn = fs.open(archiveIndex);
LineReader aLin;
long read = 0;
aLin = new LineReader(aIn, getConf());
String parentString = parent.getName();
if (!parentString.endsWith(Path.SEPARATOR)){
parentString += Path.SEPARATOR;
}
Path harPath = new Path(parentString);
int harlen = harPath.depth();
Text line = new Text();
while (read < archiveIndexStat.getLen()) {
int tmp = aLin.readLine(line);
read += tmp;
String lineFeed = line.toString();
String child = decodeFileName(lineFeed.substring(0, lineFeed.indexOf(" ")));
if ((child.startsWith(parentString))) {
Path thisPath = new Path(child);
if (thisPath.depth() == harlen + 1) {
// bingo!
HarStatus hstatus = new HarStatus(lineFeed);
FileStatus childStatus = new FileStatus(hstatus.isDir() ? 0
: hstatus.getLength(), hstatus.isDir(), (int) archiveIndexStat
.getReplication(), archiveIndexStat.getBlockSize(),
hstatus.getModificationTime(), hstatus
.getAccessTime(), new FsPermission(hstatus
.getPermission()), hstatus.getOwner(),
hstatus.getGroup(), makeRelative(this.uri.getPath(),
new Path(hstatus.getName())));
statuses.add(childStatus);
}
line.clear();
}
}
} finally {
if (aIn != null) {
aIn.close();
}
}
} | java | {
"resource": ""
} |
q161265 | HarFileSystem.fileStatusInIndex | train | private String fileStatusInIndex(Path harPath) throws IOException {
// read the index file
int hashCode = getHarHash(harPath.toString());
// get the master index to find the pos
// in the index file
FSDataInputStream in = fs.open(masterIndex);
FileStatus masterStat = fs.getFileStatus(masterIndex);
LineReader lin = new LineReader(in, getConf());
Text line = new Text();
long read = lin.readLine(line);
//ignore the first line. this is the header of the index files
String[] readStr = null;
List<Store> stores = new ArrayList<Store>();
while(read < masterStat.getLen()) {
int b = lin.readLine(line);
read += b;
readStr = line.toString().split(" ");
int startHash = Integer.parseInt(readStr[0]);
int endHash = Integer.parseInt(readStr[1]);
if (startHash <= hashCode && hashCode <= endHash) {
stores.add(new Store(Long.parseLong(readStr[2]),
Long.parseLong(readStr[3]), startHash,
endHash));
}
line.clear();
}
try {
lin.close();
} catch(IOException io){
// do nothing just a read.
}
FSDataInputStream aIn = fs.open(archiveIndex);
LineReader aLin;
String retStr = null;
// now start reading the real index file
for (Store s: stores) {
read = 0;
aIn.seek(s.begin);
aLin = new LineReader(aIn, getConf());
while (read + s.begin < s.end) {
int tmp = aLin.readLine(line);
read += tmp;
String lineFeed = line.toString();
String[] parsed = lineFeed.split(" ");
parsed[0] = decodeFileName(parsed[0]);
if (harPath.compareTo(new Path(parsed[0])) == 0) {
// bingo!
retStr = lineFeed;
break;
}
line.clear();
}
if (retStr != null)
break;
}
try {
aIn.close();
} catch(IOException io) {
//do nothing
}
return retStr;
} | java | {
"resource": ""
} |
q161266 | HarFileSystem.getFileStatus | train | @Override
public FileStatus getFileStatus(Path f) throws IOException {
FileStatus archiveStatus = fs.getFileStatus(archiveIndex);
HarStatus hstatus = getFileHarStatus(f, archiveStatus);
return new FileStatus(hstatus.isDir()?0:hstatus.getLength(), hstatus.isDir(),
(int)archiveStatus.getReplication(), archiveStatus.getBlockSize(),
hstatus.getModificationTime(), hstatus.getAccessTime(),
new FsPermission(
hstatus.getPermission()), hstatus.getOwner(),
hstatus.getGroup(),
makeRelative(this.uri.getPath(), new Path(hstatus.getName())));
} | java | {
"resource": ""
} |
q161267 | HarFileSystem.open | train | @Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
HarStatus hstatus = getFileHarStatus(f, null);
// we got it.. woo hooo!!!
if (hstatus.isDir()) {
throw new FileNotFoundException(f + " : not a file in " +
archivePath);
}
return new HarFSDataInputStream(fs, new Path(archivePath,
hstatus.getPartName()),
hstatus.getStartIndex(), hstatus.getLength(), bufferSize);
} | java | {
"resource": ""
} |
q161268 | HarFileSystem.listStatus | train | @Override
public FileStatus[] listStatus(Path f) throws IOException {
//need to see if the file is an index in file
//get the filestatus of the archive directory
// we will create fake filestatuses to return
// to the client
List<FileStatus> statuses = new ArrayList<FileStatus>();
FileStatus archiveStatus = fs.getFileStatus(archiveIndex);
Path tmpPath = makeQualified(f);
Path harPath = getPathInHar(tmpPath);
String readStr = fileStatusInIndex(harPath);
if (readStr == null) {
throw new FileNotFoundException("File " + f + " not found in " + archivePath);
}
HarStatus hstatus = new HarStatus(readStr,archiveStatus, version);
if (!hstatus.isDir()) {
statuses.add(new FileStatus(hstatus.getLength(),
hstatus.isDir(),
archiveStatus.getReplication(), archiveStatus.getBlockSize(),
hstatus.getModificationTime(), hstatus.getAccessTime(),
new FsPermission(hstatus.getPermission()),
hstatus.getOwner(), hstatus.getGroup(),
makeRelative(this.uri.getPath(), new Path(hstatus.getName()))));
} else {
fileStatusesInIndex(hstatus, statuses, hstatus.getChildren(), archiveStatus);
}
return statuses.toArray(new FileStatus[statuses.size()]);
} | java | {
"resource": ""
} |
q161269 | BlockMetadataHeader.readHeader | train | public static BlockMetadataHeader readHeader(DataInputStream in,
Checksum checksumImpl) throws IOException {
return readHeader(in.readShort(), in, checksumImpl);
} | java | {
"resource": ""
} |
q161270 | BlockMetadataHeader.readHeader | train | static BlockMetadataHeader readHeader(File file) throws IOException {
DataInputStream in = null;
try {
in = new DataInputStream(new BufferedInputStream(
new FileInputStream(file)));
return readHeader(in);
} finally {
IOUtils.closeStream(in);
}
} | java | {
"resource": ""
} |
q161271 | BlockMetadataHeader.readHeader | train | private static BlockMetadataHeader readHeader(short version, DataInputStream in)
throws IOException {
DataChecksum checksum = DataChecksum.newDataChecksum(in);
return new BlockMetadataHeader(version, checksum);
} | java | {
"resource": ""
} |
q161272 | BlockMetadataHeader.writeHeader | train | private static void writeHeader(DataOutputStream out,
BlockMetadataHeader header)
throws IOException {
out.writeShort(header.getVersion());
header.getChecksum().writeHeader(out);
} | java | {
"resource": ""
} |
q161273 | BlockMetadataHeader.writeHeader | train | static void writeHeader(DataOutputStream out, DataChecksum checksum)
throws IOException {
writeHeader(out, new BlockMetadataHeader(METADATA_VERSION, checksum));
} | java | {
"resource": ""
} |
q161274 | FSInputChecker.read | train | public synchronized int read(byte[] b, int off, int len) throws IOException {
// parameter check
if ((off | len | (off + len) | (b.length - (off + len))) < 0) {
throw new IndexOutOfBoundsException();
}
int n = 0;
for (;;) {
int nread = read1(b, off + n, len - n);
if (nread <= 0)
return (n == 0) ? nread : n;
n += nread;
if (n >= len)
return n;
}
} | java | {
"resource": ""
} |
q161275 | FSInputChecker.checksum2long | train | static public long checksum2long(byte[] checksum, int offset, int length) {
long crc = 0L;
int iter = 0;
for(int i=offset; i<offset+length; i++, iter++) {
crc |= (0xffL&(long)checksum[i])<<((length-iter-1)*8);
}
return crc;
} | java | {
"resource": ""
} |
q161276 | FSInputChecker.set | train | final protected synchronized void set(boolean verifyChecksum,
Checksum sum, int maxChunkSize, int checksumSize ) {
this.verifyChecksum = verifyChecksum;
this.sum = sum;
this.buf = new byte[maxChunkSize];
this.checksum = new byte[checksumSize];
this.count = 0;
this.pos = 0;
} | java | {
"resource": ""
} |
q161277 | MetricsServlet.printMap | train | void printMap(PrintWriter out, Map<String, Map<String, List<TagsMetricsPair>>> map) {
for (Map.Entry<String, Map<String, List<TagsMetricsPair>>> context : map.entrySet()) {
out.println(context.getKey());
for (Map.Entry<String, List<TagsMetricsPair>> record : context.getValue().entrySet()) {
indent(out, 1);
out.println(record.getKey());
for (TagsMetricsPair pair : record.getValue()) {
indent(out, 2);
// Prints tag values in the form "{key=value,key=value}:"
out.print("{");
boolean first = true;
for (Map.Entry<String, Object> tagValue : pair.tagMap.entrySet()) {
if (first) {
first = false;
} else {
out.print(",");
}
out.print(tagValue.getKey());
out.print("=");
out.print(tagValue.getValue().toString());
}
out.println("}:");
// Now print metric values, one per line
for (Map.Entry<String, Number> metricValue :
pair.metricMap.entrySet()) {
indent(out, 3);
out.print(metricValue.getKey());
out.print("=");
out.println(metricValue.getValue().toString());
}
}
}
}
} | java | {
"resource": ""
} |
q161278 | JobStatus.getJobRunState | train | public static String getJobRunState(int state) {
if (state < 1 || state >= runStates.length) {
return UNKNOWN;
}
return runStates[state];
} | java | {
"resource": ""
} |
q161279 | JobStatus.isJobComplete | train | public synchronized boolean isJobComplete() {
return (runState == JobStatus.SUCCEEDED || runState == JobStatus.FAILED
|| runState == JobStatus.KILLED);
} | java | {
"resource": ""
} |
q161280 | JobStatus.getOldNewJobRunState | train | static int getOldNewJobRunState(
org.apache.hadoop.mapreduce.JobStatus.State state) {
return state.getValue();
} | java | {
"resource": ""
} |
q161281 | BlockLocation.setHosts | train | public void setHosts(String[] hosts) throws IOException {
if (hosts == null) {
this.hosts = new String[0];
} else {
this.hosts = hosts;
}
} | java | {
"resource": ""
} |
q161282 | BlockLocation.setTopologyPaths | train | public void setTopologyPaths(String[] topologyPaths) throws IOException {
if (topologyPaths == null) {
this.topologyPaths = new String[0];
} else {
this.topologyPaths = topologyPaths;
}
} | java | {
"resource": ""
} |
q161283 | RemoteException.unwrapRemoteException | train | public IOException unwrapRemoteException() {
try {
Class<?> realClass = Class.forName(getClassName());
return instantiateException(realClass.asSubclass(IOException.class));
} catch(Exception e) {
// cannot instantiate the original exception, just return this
}
return this;
} | java | {
"resource": ""
} |
q161284 | RemoteException.writeXml | train | public void writeXml(String path, XMLOutputter doc) throws IOException {
doc.startTag(RemoteException.class.getSimpleName());
doc.attribute("path", path);
doc.attribute("class", getClassName());
String msg = getLocalizedMessage();
int i = msg.indexOf("\n");
if (i >= 0) {
msg = msg.substring(0, i);
}
doc.attribute("message", msg.substring(msg.indexOf(":") + 1).trim());
doc.endTag();
} | java | {
"resource": ""
} |
q161285 | PolicyInfo.addDestPath | train | public void addDestPath(String in, Properties repl) throws IOException {
Path dPath = new Path(in);
if (!dPath.isAbsolute() || !dPath.toUri().isAbsolute()) {
throw new IOException("Path " + in + " is not absolute.");
}
PathInfo pinfo = new PathInfo(dPath, repl);
if (this.destPath == null) {
this.destPath = new ArrayList<PathInfo>();
}
this.destPath.add(pinfo);
} | java | {
"resource": ""
} |
q161286 | FSDirectoryNameCache.cacheNameInternal | train | private void cacheNameInternal(INode inode) {
// Name is cached only for files
if (inode.isDirectory()) {
return;
}
ByteArray name = new ByteArray(inode.getLocalNameBytes());
name = nameCache.put(name);
if (name != null) {
inode.setLocalName(name.getBytes());
}
} | java | {
"resource": ""
} |
q161287 | FSDirectoryNameCache.imageLoaded | train | void imageLoaded() throws IOException {
for (Future<Void> task : cachingTasks) {
try {
task.get();
} catch (InterruptedException e) {
throw new IOException("FSDirectory cache received interruption");
} catch (ExecutionException e) {
throw new IOException(e);
}
}
// will not be used after startup
this.cachingTasks = null;
this.cachingExecutor.shutdownNow();
this.cachingExecutor = null;
// process remaining inodes
for(INode inode : cachingTempQueue) {
cacheNameInternal(inode);
}
this.cachingTempQueue = null;
this.imageLoaded = true;
} | java | {
"resource": ""
} |
q161288 | FSDirectory.resetLastInodeId | train | public void resetLastInodeId(long newValue) throws IOException {
try {
inodeId.skipTo(newValue);
} catch(IllegalStateException ise) {
throw new IOException(ise);
}
} | java | {
"resource": ""
} |
q161289 | FSDirectory.waitForReady | train | void waitForReady() {
if (!ready) {
writeLock();
try {
while (!ready) {
try {
cond.await(5000, TimeUnit.MILLISECONDS);
} catch (InterruptedException ie) {
}
}
} finally {
writeUnlock();
}
}
} | java | {
"resource": ""
} |
q161290 | FSDirectory.addFile | train | INodeFileUnderConstruction addFile(String path,
String[] names,
byte[][] components,
INode[] inodes,
PermissionStatus permissions,
short replication,
long preferredBlockSize,
String clientName,
String clientMachine,
DatanodeDescriptor clientNode,
long generationStamp,
long accessTime)
throws IOException {
waitForReady();
// Always do an implicit mkdirs for parent directory tree.
long modTime = FSNamesystem.now();
if (inodes[inodes.length-2] == null) { // non-existent directory
if (!mkdirs(names[names.length-1],
names, components, inodes, inodes.length-1,
permissions, true, modTime)) {
return null;
}
} else if (!inodes[inodes.length-2].isDirectory()) {
NameNode.stateChangeLog.info("DIR* FSDirectory.addFile: "
+ "failed to add " + path + " as its parent is not a directory");
throw new FileNotFoundException("Parent path is not a directory: " + path);
}
if (accessTime == -1){
accessTime = modTime;
}
INodeFileUnderConstruction newNode = new INodeFileUnderConstruction(
allocateNewInodeId(),
permissions,replication,
preferredBlockSize, modTime, accessTime, clientName,
clientMachine, clientNode);
newNode.setLocalName(components[inodes.length-1]);
writeLock();
try {
newNode = addChild(inodes, inodes.length-1, newNode, -1, false);
} finally {
writeUnlock();
}
if (newNode == null) {
NameNode.stateChangeLog.info("DIR* FSDirectory.addFile: "
+"failed to add "+path
+" to the file system");
return null;
}
// add create file record to log, record new generation stamp
fsImage.getEditLog().logOpenFile(path, newNode);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
+path+" is added to the file system");
}
return newNode;
} | java | {
"resource": ""
} |
q161291 | FSDirectory.addToParent | train | INodeDirectory addToParent( byte[] src,
INodeDirectory parentINode,
INode newNode,
boolean propagateModTime,
int childIndex) {
// NOTE: This does not update space counts for parents
// add new node to the parent
INodeDirectory newParent = null;
writeLock();
try {
try {
newParent = rootDir.addToParent(src, newNode, parentINode,
false, propagateModTime, childIndex);
cacheName(newNode);
} catch (FileNotFoundException e) {
return null;
}
if(newParent == null)
return null;
if(!newNode.isDirectory()) {
// Add block->file mapping
INodeFile newF = (INodeFile)newNode;
BlockInfo[] blocks = newF.getBlocks();
for (int i = 0; i < blocks.length; i++) {
newF.setBlock(i, getFSNamesystem().blocksMap.addINodeForLoading(blocks[i], newF));
}
}
} finally {
writeUnlock();
}
return newParent;
} | java | {
"resource": ""
} |
q161292 | FSDirectory.addBlock | train | Block addBlock(String path, INode[] inodes, Block block) throws IOException {
waitForReady();
writeLock();
try {
INodeFile fileNode = (INodeFile) inodes[inodes.length-1];
INode.enforceRegularStorageINode(fileNode,
"addBlock only works for regular file, not " + path);
// check quota limits and updated space consumed
updateCount(inodes, inodes.length-1, 0,
fileNode.getPreferredBlockSize()*fileNode.getReplication(), true);
// associate the new list of blocks with this file
BlockInfo blockInfo =
getFSNamesystem().blocksMap.addINode(block, fileNode,
fileNode.getReplication());
fileNode.getStorage().addBlock(blockInfo);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
+ path + " with " + block
+ " block is added to the in-memory "
+ "file system");
}
} finally {
writeUnlock();
}
return block;
} | java | {
"resource": ""
} |
q161293 | FSDirectory.persistBlocks | train | void persistBlocks(String path, INodeFileUnderConstruction file)
throws IOException {
waitForReady();
writeLock();
try {
fsImage.getEditLog().logOpenFile(path, file);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.persistBlocks: "
+path+" with "+ file.getBlocks().length
+" blocks is persisted to the file system");
}
} finally {
writeUnlock();
}
} | java | {
"resource": ""
} |
q161294 | FSDirectory.closeFile | train | void closeFile(String path, INodeFile file) throws IOException {
waitForReady();
writeLock();
try {
long now = FSNamesystem.now();
// file is closed
file.setModificationTimeForce(now);
fsImage.getEditLog().logCloseFile(path, file);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.closeFile: "
+path+" with "+ file.getBlocks().length
+" blocks is persisted to the file system");
}
} finally {
writeUnlock();
}
} | java | {
"resource": ""
} |
q161295 | FSDirectory.removeBlock | train | boolean removeBlock(String path, INodeFileUnderConstruction fileNode,
Block block) throws IOException {
waitForReady();
writeLock();
try {
// modify file-> block and blocksMap
fileNode.removeBlock(block);
getFSNamesystem().blocksMap.removeBlock(block);
// Remove the block from corruptReplicasMap
getFSNamesystem().corruptReplicas.removeFromCorruptReplicasMap(block);
// write modified block locations to log
fsImage.getEditLog().logOpenFile(path, fileNode);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.removeBlock: "
+path+" with "+block
+" block is removed from the file system");
}
} finally {
writeUnlock();
}
return true;
} | java | {
"resource": ""
} |
q161296 | FSDirectory.getHardLinkId | train | public long getHardLinkId(String src) throws IOException {
byte[][] components = INode.getPathComponents(src);
readLock();
try {
INodeFile node = this.getFileINode(components);
if ((!exists(node)) || (!(node instanceof INodeHardLinkFile))) {
throw new IOException(src + " is not a valid hardlink file");
}
return ((INodeHardLinkFile) node).getHardLinkID();
} finally {
readUnlock();
}
} | java | {
"resource": ""
} |
q161297 | FSDirectory.unprotectedHardLinkTo | train | boolean unprotectedHardLinkTo(String src, String dst, long timestamp)
throws QuotaExceededException, FileNotFoundException {
return unprotectedHardLinkTo(src, null, null, null, dst, null, null, null, timestamp);
} | java | {
"resource": ""
} |
q161298 | FSDirectory.raidFile | train | public void raidFile(INode sourceINodes[], String source,
RaidCodec codec, short expectedSourceRepl, Block[] parityBlocks)
throws IOException {
waitForReady();
long now = FSNamesystem.now();
unprotectedRaidFile(sourceINodes, source, codec, expectedSourceRepl,
parityBlocks, now);
fsImage.getEditLog().logRaidFile(source, codec.id, expectedSourceRepl,
now);
} | java | {
"resource": ""
} |
q161299 | FSDirectory.unprotectedRenameTo | train | boolean unprotectedRenameTo(String src, String dst, long timestamp)
throws QuotaExceededException {
return unprotectedRenameTo(src, null, null, null, dst, null, null, timestamp);
} | java | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.