_id stringlengths 2 7 | title stringlengths 3 140 | partition stringclasses 3
values | text stringlengths 73 34.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q162100 | EditLogInputStream.skipUntil | train | public boolean skipUntil(long txid) throws IOException {
while (true) {
FSEditLogOp op = readOp();
if (op == null) {
return false;
}
if (op.getTransactionId() >= txid) {
cachedOp = op;
return true;
}
}
} | java | {
"resource": ""
} |
q162101 | SkipBadRecords.getSkipOutputPath | train | public static Path getSkipOutputPath(Configuration conf) {
String name = conf.get(OUT_PATH);
if(name!=null) {
if("none".equals(name)) {
return null;
}
return new Path(name);
}
Path outPath = FileOutputFormat.getOutputPath(new JobConf(conf));
return outPath==null ? null : new Path(outPath,
"_logs"+Path.SEPARATOR+"skip");
} | java | {
"resource": ""
} |
q162102 | SkipBadRecords.setSkipOutputPath | train | public static void setSkipOutputPath(JobConf conf, Path path) {
String pathStr = null;
if(path==null) {
pathStr = "none";
} else {
pathStr = path.toString();
}
conf.set(OUT_PATH, pathStr);
} | java | {
"resource": ""
} |
q162103 | MergeSorter.sort | train | public RawKeyValueIterator sort() {
MergeSort m = new MergeSort(this);
int count = super.count;
if (count == 0) return null;
int [] pointers = super.pointers;
int [] pointersCopy = new int[count];
System.arraycopy(pointers, 0, pointersCopy, 0, count);
m.mergeSort(pointers, pointersCopy, 0, count);
return new MRSortResultIterator(super.keyValBuffer, pointersCopy,
super.startOffsets, super.keyLengths, super.valueLengths);
} | java | {
"resource": ""
} |
q162104 | ResourceRequestInfo.write | train | public void write(JsonGenerator jsonGenerator) throws IOException {
// We neither need the list of RequestedNodes, nodes, nor excludedHosts,
// because we can reconstruct them from the request object
jsonGenerator.writeStartObject();
jsonGenerator.writeObjectField("request", request);
jsonGenerator.writeEndObject();
} | java | {
"resource": ""
} |
q162105 | FSOutputSummer.write | train | public synchronized void write(int b) throws IOException {
eventStartWrite();
try {
sum.update(b);
buf[count++] = (byte) b;
if (bytesSentInChunk + count == buf.length) {
flushBuffer(true, shouldKeepPartialChunkData());
}
} finally {
eventEndWrite();
}
} | java | {
"resource": ""
} |
q162106 | FSOutputSummer.write1 | train | private int write1(byte b[], int off, int len) throws IOException {
eventStartWrite();
try {
if(count==0 && bytesSentInChunk + len>=buf.length) {
// local buffer is empty and user data can fill the current chunk
// checksum and output data
final int length = buf.length - bytesSentInChunk;
sum.update(b, off, length);
writeChecksumChunk(b, off, length, false);
// start a new chunk
bytesSentInChunk = 0;
return length;
}
// copy user data to local buffer
int bytesToCopy = buf.length - bytesSentInChunk - count;
bytesToCopy = (len<bytesToCopy) ? len : bytesToCopy;
sum.update(b, off, bytesToCopy);
System.arraycopy(b, off, buf, count, bytesToCopy);
count += bytesToCopy;
if (count + bytesSentInChunk == buf.length) {
// local buffer is full
flushBuffer(true, shouldKeepPartialChunkData());
}
return bytesToCopy;
} finally {
eventEndWrite();
}
} | java | {
"resource": ""
} |
q162107 | FSOutputSummer.writeChecksumChunk | train | private void writeChecksumChunk(byte b[], int off, int len, boolean keep)
throws IOException {
int tempChecksum = (int)sum.getValue();
if (!keep) {
sum.reset();
}
int2byte(tempChecksum, checksum);
writeChunk(b, off, len, checksum);
} | java | {
"resource": ""
} |
q162108 | INode.getUserName | train | public String getUserName() {
int n = (int)PermissionStatusFormat.USER.retrieve(permission);
return SerialNumberManager.INSTANCE.getUser(n);
} | java | {
"resource": ""
} |
q162109 | INode.getGroupName | train | public String getGroupName() {
int n = (int)PermissionStatusFormat.GROUP.retrieve(permission);
return SerialNumberManager.INSTANCE.getGroup(n);
} | java | {
"resource": ""
} |
q162110 | INode.enforceRegularStorageINode | train | public static void enforceRegularStorageINode(INodeFile inode, String msg)
throws IOException {
if (inode.getStorageType() != StorageType.REGULAR_STORAGE) {
LOG.error(msg);
throw new IOException(msg);
}
} | java | {
"resource": ""
} |
q162111 | INode.getPathComponents | train | static byte[][] getPathComponents(String[] strings) {
if (strings.length == 0) {
return new byte[][]{null};
}
byte[][] bytes = new byte[strings.length][];
for (int i = 0; i < strings.length; i++)
bytes[i] = DFSUtil.string2Bytes(strings[i]);
return bytes;
} | java | {
"resource": ""
} |
q162112 | INode.getPathNames | train | static String[] getPathNames(String path) {
if (path == null || !path.startsWith(Path.SEPARATOR)) {
return null;
}
return StringUtils.split(path, Path.SEPARATOR_CHAR);
} | java | {
"resource": ""
} |
q162113 | INode.compareTo | train | public final int compareTo(byte[] name2) {
if (name == name2)
return 0;
int len1 = (name == null ? 0 : name.length);
int len2 = (name2 == null ? 0 : name2.length);
int n = Math.min(len1, len2);
byte b1, b2;
for (int i = 0; i < n; i++) {
b1 = name[i];
b2 = name2[i];
if (b1 != b2)
return b1 - b2;
}
return len1 - len2;
} | java | {
"resource": ""
} |
q162114 | INode.newINode | train | static INode newINode(long id,
PermissionStatus permissions,
BlockInfo[] blocks,
short replication,
long modificationTime,
long atime,
long nsQuota,
long dsQuota,
long preferredBlockSize,
byte inodeType,
long hardLinkID,
RaidCodec codec,
FSImageLoadingContext context) {
if (inodeType == INode.INodeType.REGULAR_INODE.type) {
// Process the regular INode file
if (blocks == null) { // directory
if (nsQuota >= 0 || dsQuota >= 0) { // directory with quota
return new INodeDirectoryWithQuota(
id, permissions, modificationTime, nsQuota, dsQuota);
}
// regular directory
return new INodeDirectory(id, permissions, modificationTime);
}
// file
return new INodeFile(id, permissions, blocks, replication,
modificationTime, atime, preferredBlockSize, null);
} else if (inodeType == INode.INodeType.HARDLINKED_INODE.type) {
// Process the HardLink INode file
// create and register the hard link file info
HardLinkFileInfo hardLinkFileInfo =
INodeHardLinkFile.loadHardLinkFileInfo(hardLinkID, context);
// Reuse the same blocks for the hardlinked files
if (hardLinkFileInfo.getReferenceCnt() > 0) {
blocks = hardLinkFileInfo.getHardLinkedFile(0).getBlocks();
}
// Create the INodeHardLinkFile and increment the reference cnt
INodeHardLinkFile hardLinkFile = new INodeHardLinkFile(id,
permissions,
blocks,
replication,
modificationTime,
atime,
preferredBlockSize,
hardLinkFileInfo);
hardLinkFile.incReferenceCnt();
return hardLinkFile;
} else if (inodeType == INode.INodeType.RAIDED_INODE.type) {
return new INodeFile(id, permissions, blocks, replication,
modificationTime, atime, preferredBlockSize,
codec);
} else {
throw new IllegalArgumentException("Invalide inode type: " + inodeType);
}
} | java | {
"resource": ""
} |
q162115 | DataBlockScannerSet.waitForUpgradeDone | train | private void waitForUpgradeDone(int namespaceId) {
UpgradeManagerDatanode um = datanode.getUpgradeManager(namespaceId);
while (!um.isUpgradeCompleted()) {
try {
datanode.updateAndReportThreadLiveness(BackgroundThread.BLOCK_SCANNER);
Thread.sleep(5000);
LOG.info("sleeping ............");
} catch (InterruptedException e) {
blockScannerThread.interrupt();
return;
}
}
} | java | {
"resource": ""
} |
q162116 | DataBlockScannerSet.getNextNamespaceSliceScanner | train | private DataBlockScanner getNextNamespaceSliceScanner(int currentNamespaceId) {
Integer nextNsId = null;
while ((nextNsId == null) && datanode.shouldRun
&& !blockScannerThread.isInterrupted()) {
waitForOneNameSpaceUp();
synchronized (this) {
if (getNamespaceSetSize() > 0) {
// Find nextNsId by finding the last modified current log file, if any
long lastScanTime = -1;
Iterator<Integer> nsidIterator = namespaceScannerMap.keySet()
.iterator();
while (nsidIterator.hasNext()) {
int nsid = nsidIterator.next();
for (FSDataset.FSVolume vol : dataset.volumes.getVolumes()) {
try {
File currFile = DataBlockScanner.getCurrentFile(vol, nsid);
if (currFile.exists()) {
long lastModified = currFile.lastModified();
if (lastScanTime < lastModified) {
lastScanTime = lastModified;
nextNsId = nsid;
}
}
} catch (IOException e) {
LOG.warn("Received exception: ", e);
}
}
}
// nextNsId can still be -1 if no current log is found,
// find nextNsId sequentially.
if (nextNsId == null) {
try {
if (currentNamespaceId == -1) {
nextNsId = namespaceScannerMap.firstKey();
} else {
nextNsId = namespaceScannerMap.higherKey(currentNamespaceId);
if (nextNsId == null) {
nextNsId = namespaceScannerMap.firstKey();
}
}
} catch (NoSuchElementException e) {
// if firstKey throws an exception
continue;
}
}
if (nextNsId != null) {
return getNSScanner(nextNsId);
}
}
}
LOG.warn("No namespace is up, going to wait");
try {
Thread.sleep(5000);
} catch (InterruptedException ex) {
LOG.warn("Received exception: " + ex);
blockScannerThread.interrupt();
return null;
}
}
return null;
} | java | {
"resource": ""
} |
q162117 | HashFunction.hash | train | public int[] hash(Key k){
byte[] b = k.getBytes();
if (b == null) {
throw new NullPointerException("buffer reference is null");
}
if (b.length == 0) {
throw new IllegalArgumentException("key length must be > 0");
}
int[] result = new int[nbHash];
for (int i = 0, initval = 0; i < nbHash; i++) {
initval = hashFunction.hash(b, initval);
result[i] = Math.abs(initval % maxValue);
}
return result;
} | java | {
"resource": ""
} |
q162118 | HttpServer.createBaseListener | train | protected Connector createBaseListener(Configuration conf)
throws IOException {
Connector ret;
if (conf.getBoolean("hadoop.http.bio", false)) {
SocketConnector conn = new SocketConnector();
conn.setAcceptQueueSize(4096);
conn.setResolveNames(false);
ret = conn;
} else {
SelectChannelConnector conn = new SelectChannelConnector();
conn.setAcceptQueueSize(128);
conn.setResolveNames(false);
conn.setUseDirectBuffers(false);
ret = conn;
}
ret.setLowResourceMaxIdleTime(10000);
ret.setHeaderBufferSize(conf.getInt("hadoop.http.header.buffer.size", 4096));
ret.setMaxIdleTime(conf.getInt("dfs.http.timeout", 200000));
return ret;
} | java | {
"resource": ""
} |
q162119 | HttpServer.getFilterInitializers | train | private static FilterInitializer[] getFilterInitializers(Configuration conf) {
if (conf == null) {
return null;
}
Class<?>[] classes = conf.getClasses(FILTER_INITIALIZER_PROPERTY);
if (classes == null) {
return null;
}
FilterInitializer[] initializers = new FilterInitializer[classes.length];
for(int i = 0; i < classes.length; i++) {
initializers[i] = (FilterInitializer)ReflectionUtils.newInstance(
classes[i], conf);
}
return initializers;
} | java | {
"resource": ""
} |
q162120 | HttpServer.addDefaultApps | train | protected void addDefaultApps(ContextHandlerCollection parent,
final String appDir) throws IOException {
// set up the context for "/logs/" if "hadoop.log.dir" property is defined.
String logDir = System.getProperty("hadoop.log.dir");
if (logDir != null) {
Context logContext = new Context(parent, "/logs");
logContext.setResourceBase(logDir);
logContext.addServlet(StaticServlet.class, "/");
defaultContexts.put(logContext, true);
}
// set up the context for "/static/*"
Context staticContext = new Context(parent, "/static");
staticContext.setResourceBase(appDir + "/static");
staticContext.addServlet(StaticServlet.class, "/*");
defaultContexts.put(staticContext, true);
} | java | {
"resource": ""
} |
q162121 | HttpServer.addDefaultServlets | train | protected void addDefaultServlets() {
// set up default servlets
addServlet("stacks", "/stacks", StackServlet.class);
addServlet("logLevel", "/logLevel", LogLevel.Servlet.class);
addServlet("jmx", "/jmx", JMXJsonServlet.class);
addServlet("metrics", "/metrics", MetricsServlet.class);
addServlet("conf", "/conf", ConfServlet.class);
} | java | {
"resource": ""
} |
q162122 | HttpServer.addContext | train | protected void addContext(String pathSpec, String dir, boolean isFiltered) throws IOException {
if (0 == webServer.getHandlers().length) {
throw new RuntimeException("Couldn't find handler");
}
WebAppContext webAppCtx = new WebAppContext();
webAppCtx.setContextPath(pathSpec);
webAppCtx.setWar(dir);
addContext(webAppCtx, true);
} | java | {
"resource": ""
} |
q162123 | HttpServer.addServlet | train | public void addServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz) {
addInternalServlet(name, pathSpec, clazz);
addFilterPathMapping(pathSpec, webAppContext);
} | java | {
"resource": ""
} |
q162124 | HttpServer.addInternalServlet | train | @Deprecated
public void addInternalServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz) {
ServletHolder holder = new ServletHolder(clazz);
if (name != null) {
holder.setName(name);
}
webAppContext.addServlet(holder, pathSpec);
} | java | {
"resource": ""
} |
q162125 | HttpServer.removeServlet | train | public void removeServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz) {
if(clazz == null) {
return;
}
//remove the filters from filterPathMapping
ServletHandler servletHandler = webAppContext.getServletHandler();
List<FilterMapping> newFilterMappings = new ArrayList<FilterMapping>();
//only add the filter whose pathSpec is not the to-be-removed servlet
for(FilterMapping mapping: servletHandler.getFilterMappings()) {
for(String mappingPathSpec: mapping.getPathSpecs()) {
if(!mappingPathSpec.equals(pathSpec)){
newFilterMappings.add(mapping);
}
}
}
servletHandler.setFilterMappings(newFilterMappings.toArray(new FilterMapping[newFilterMappings.size()]));
removeInternalServlet(name, pathSpec, clazz);
} | java | {
"resource": ""
} |
q162126 | HttpServer.removeInternalServlet | train | public void removeInternalServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz) {
if(null == clazz) {
return;
}
ServletHandler servletHandler = webAppContext.getServletHandler();
List<ServletHolder> newServletHolders = new ArrayList<ServletHolder>();
List<ServletMapping> newServletMappings = new ArrayList<ServletMapping>();
String clazzName = clazz.getName();
Set<String> holdersToRemove = new HashSet<String>();
//find all the holders that hold the servlet to be removed
for(ServletHolder holder : servletHandler.getServlets()) {
try{
if(clazzName.equals(holder.getServlet().getClass().getName())
&& name.equals(holder.getName())) {
holdersToRemove.add(holder.getName());
} else {
newServletHolders.add(holder);
}
} catch(ServletException e) {
LOG.error("exception in removeInternalServlet() when iterating through" +
"servlet holders" + StringUtils.stringifyException(e));
}
}
//if there is no holder to be removed, then the servlet does not exist in
//current context
if(holdersToRemove.size() < 1) {
return;
}
//only add the servlet mapping if it is not to be removed
for(ServletMapping mapping : servletHandler.getServletMappings()) {
//if the mapping's servlet is not to be removed, add to new mappings
if(!holdersToRemove.contains(mapping.getServletName())) {
newServletMappings.add(mapping);
} else {
String[] pathSpecs = mapping.getPathSpecs();
boolean pathSpecMatched = false;
if(pathSpecs != null && pathSpecs.length > 0) {
for(String pathSpecInMapping: pathSpecs) {
if(pathSpecInMapping.equals(pathSpec)) {
pathSpecMatched = true;
break;
}
}
}
//if the pathspec does not match, then add to the new mappings
if(!pathSpecMatched) {
newServletMappings.add(mapping);
}
}
}
servletHandler.setServletMappings(
newServletMappings.toArray(new ServletMapping[newServletMappings.size()]));
servletHandler.setServlets(
newServletHolders.toArray(new ServletHolder[newServletHolders.size()]));
} | java | {
"resource": ""
} |
q162127 | HttpServer.defineFilter | train | protected void defineFilter(Context ctx, String name,
String classname, Map<String,String> parameters, String[] urls) {
FilterHolder holder = new FilterHolder();
holder.setName(name);
holder.setClassName(classname);
holder.setInitParameters(parameters);
FilterMapping fmap = new FilterMapping();
fmap.setPathSpecs(urls);
fmap.setDispatches(Handler.ALL);
fmap.setFilterName(name);
ServletHandler handler = ctx.getServletHandler();
handler.addFilter(holder, fmap);
} | java | {
"resource": ""
} |
q162128 | HttpServer.addFilterPathMapping | train | protected void addFilterPathMapping(String pathSpec,
Context webAppCtx) {
ServletHandler handler = webAppCtx.getServletHandler();
for(String name : filterNames) {
FilterMapping fmap = new FilterMapping();
fmap.setPathSpec(pathSpec);
fmap.setFilterName(name);
fmap.setDispatches(Handler.ALL);
handler.addFilterMapping(fmap);
}
} | java | {
"resource": ""
} |
q162129 | HttpServer.getWebAppsPath | train | protected String getWebAppsPath() throws IOException {
URL url = getClass().getClassLoader().getResource("webapps");
if (url == null)
throw new IOException("webapps not found in CLASSPATH");
return url.toString();
} | java | {
"resource": ""
} |
q162130 | HttpServer.stop | train | public void stop() throws Exception {
listener.close();
webAppContext.clearAttributes();
webServer.removeHandler(webAppContext);
webServer.stop();
} | java | {
"resource": ""
} |
q162131 | AbstractMetricsContext.getAttribute | train | protected String getAttribute(String attributeName) {
String factoryAttribute = contextName + "." + attributeName;
return (String) factory.getAttribute(factoryAttribute);
} | java | {
"resource": ""
} |
q162132 | AbstractMetricsContext.registerUpdater | train | public synchronized void registerUpdater(final Updater updater) {
if (!updaters.containsKey(updater)) {
updaters.put(updater, Boolean.TRUE);
}
} | java | {
"resource": ""
} |
q162133 | AbstractMetricsContext.startTimer | train | private synchronized void startTimer() {
if (timer == null) {
timer = new Timer("Timer thread for monitoring " + getContextName(),
true);
TimerTask task = new TimerTask() {
public void run() {
try {
timerEvent();
}
catch (IOException ioe) {
ioe.printStackTrace();
}
}
};
long millis = period * 1000;
timer.scheduleAtFixedRate(task, millis, millis);
}
} | java | {
"resource": ""
} |
q162134 | AbstractMetricsContext.timerEvent | train | private void timerEvent() throws IOException {
if (isMonitoring) {
Collection<Updater> myUpdaters;
synchronized (this) {
myUpdaters = new ArrayList<Updater>(updaters.keySet());
}
// Run all the registered updates without holding a lock
// on this context
for (Updater updater : myUpdaters) {
try {
updater.doUpdates(this);
}
catch (Throwable throwable) {
throwable.printStackTrace();
}
}
emitRecords();
}
} | java | {
"resource": ""
} |
q162135 | AbstractMetricsContext.emitRecords | train | private synchronized void emitRecords() throws IOException {
for (String recordName : bufferedData.keySet()) {
RecordMap recordMap = bufferedData.get(recordName);
synchronized (recordMap) {
Set<Entry<TagMap, MetricMap>> entrySet = recordMap.entrySet ();
for (Entry<TagMap, MetricMap> entry : entrySet) {
OutputRecord outRec = new OutputRecord(entry.getKey(), entry.getValue());
emitRecord(contextName, recordName, outRec);
}
}
}
flush();
} | java | {
"resource": ""
} |
q162136 | AbstractMetricsContext.sum | train | private Number sum(Number a, Number b) {
if (a instanceof Integer) {
return Integer.valueOf(a.intValue() + b.intValue());
}
else if (a instanceof Float) {
return new Float(a.floatValue() + b.floatValue());
}
else if (a instanceof Short) {
return Short.valueOf((short)(a.shortValue() + b.shortValue()));
}
else if (a instanceof Byte) {
return Byte.valueOf((byte)(a.byteValue() + b.byteValue()));
}
else if (a instanceof Long) {
return Long.valueOf((a.longValue() + b.longValue()));
}
else {
// should never happen
throw new MetricsException("Invalid number type");
}
} | java | {
"resource": ""
} |
q162137 | AbstractMetricsContext.parseAndSetPeriod | train | protected void parseAndSetPeriod(String attributeName) {
String periodStr = getAttribute(attributeName);
if (periodStr != null) {
int period = 0;
try {
period = Integer.parseInt(periodStr);
} catch (NumberFormatException nfe) {
}
if (period <= 0) {
throw new MetricsException("Invalid period: " + periodStr);
}
setPeriod(period);
}
} | java | {
"resource": ""
} |
q162138 | AbstractMetricsContext.getAllRecords | train | @Override
public synchronized Map<String, Collection<OutputRecord>> getAllRecords() {
Map<String, Collection<OutputRecord>> out = new TreeMap<String, Collection<OutputRecord>>();
for (String recordName : bufferedData.keySet()) {
RecordMap recordMap = bufferedData.get(recordName);
synchronized (recordMap) {
List<OutputRecord> records = new ArrayList<OutputRecord>();
Set<Entry<TagMap, MetricMap>> entrySet = recordMap.entrySet();
for (Entry<TagMap, MetricMap> entry : entrySet) {
OutputRecord outRec = new OutputRecord(entry.getKey(), entry.getValue());
records.add(outRec);
}
out.put(recordName, records);
}
}
return out;
} | java | {
"resource": ""
} |
q162139 | SimulatorTaskTracker.accept | train | @Override
public List<SimulatorEvent> accept(SimulatorEvent event) {
if (LOG.isDebugEnabled()) {
LOG.debug("Accepted event " + event);
}
if (event instanceof HeartbeatEvent) {
return processHeartbeatEvent((HeartbeatEvent)event);
} else if (event instanceof TaskAttemptCompletionEvent) {
return processTaskAttemptCompletionEvent((TaskAttemptCompletionEvent)
event);
} else {
throw new IllegalArgumentException("Unhandled event "+event);
}
} | java | {
"resource": ""
} |
q162140 | SimulatorTaskTracker.init | train | public List<SimulatorEvent> init(long when) {
LOG.debug("TaskTracker starting up, current simulation time=" + when);
return Collections.<SimulatorEvent>singletonList(new HeartbeatEvent(this, when));
} | java | {
"resource": ""
} |
q162141 | SimulatorTaskTracker.finishRunningTask | train | private void finishRunningTask(TaskStatus finalStatus, long now) {
TaskAttemptID taskId = finalStatus.getTaskID();
if (LOG.isDebugEnabled()) {
LOG.debug("Finishing running task id=" + taskId + ", now=" + now);
}
SimulatorTaskInProgress tip = tasks.get(taskId);
if (tip == null) {
throw new IllegalArgumentException("Unknown task attempt " + taskId
+ " completed");
}
TaskStatus currentStatus = tip.getTaskStatus();
if (currentStatus.getRunState() != State.RUNNING) {
throw new IllegalArgumentException(
"Task attempt to finish is not running: " + tip);
}
// Check that finalStatus describes a task attempt that has just been
// completed
State finalRunState = finalStatus.getRunState();
if (finalRunState != State.SUCCEEDED && finalRunState != State.FAILED
&& finalRunState != State.KILLED) {
throw new IllegalArgumentException(
"Final run state for completed task can't be : " + finalRunState
+ " " + tip);
}
if (now != finalStatus.getFinishTime()) {
throw new IllegalArgumentException(
"Current time does not match task finish time: now=" + now
+ ", finish=" + finalStatus.getFinishTime());
}
if (currentStatus.getIsMap() != finalStatus.getIsMap()
|| currentStatus.getNumSlots() != finalStatus.getNumSlots()
|| currentStatus.getPhase() != finalStatus.getPhase()
|| currentStatus.getStartTime() != finalStatus.getStartTime()) {
throw new IllegalArgumentException(
"Current status does not match final status");
}
// We can't assert getShuffleFinishTime() and getSortFinishTime() for
// reduces as those were unknown when the task attempt completion event
// was created. We have not called setMapFinishTime() for maps either.
// If we were really thorough we could update the progress of the task
// and check if it is consistent with finalStatus.
// If we've got this far it is safe to update the task status
currentStatus.setRunState(finalStatus.getRunState());
currentStatus.setFinishTime(finalStatus.getFinishTime());
currentStatus.setProgress(finalStatus.getProgress());
// and update the free slots
int numSlots = currentStatus.getNumSlots();
if (tip.isMapTask()) {
usedMapSlots -= numSlots;
if (usedMapSlots < 0) {
throw new IllegalStateException(
"TaskTracker reaches negative map slots: " + usedMapSlots);
}
} else {
usedReduceSlots -= numSlots;
if (usedReduceSlots < 0) {
throw new IllegalStateException(
"TaskTracker reaches negative reduce slots: " + usedReduceSlots);
}
}
} | java | {
"resource": ""
} |
q162142 | SimulatorTaskTracker.processTaskAttemptCompletionEvent | train | private List<SimulatorEvent> processTaskAttemptCompletionEvent(
TaskAttemptCompletionEvent event) {
if (LOG.isDebugEnabled()) {
LOG.debug("Processing task attempt completion event" + event);
}
long now = event.getTimeStamp();
TaskStatus finalStatus = event.getStatus();
TaskAttemptID taskID = finalStatus.getTaskID();
boolean killedEarlier = orphanTaskCompletions.remove(taskID);
if (!killedEarlier) {
finishRunningTask(finalStatus, now);
}
return SimulatorEngine.EMPTY_EVENTS;
} | java | {
"resource": ""
} |
q162143 | SimulatorTaskTracker.createTaskAttemptCompletionEvent | train | private TaskAttemptCompletionEvent createTaskAttemptCompletionEvent(
SimulatorTaskInProgress tip, long now) {
// We need to clone() status as we modify and it goes into an Event
TaskStatus status = (TaskStatus)tip.getTaskStatus().clone();
long delta = tip.getUserSpaceRunTime();
assert delta >= 0 : "TaskAttempt " + tip.getTaskStatus().getTaskID()
+ " has negative UserSpaceRunTime = " + delta;
long finishTime = now + delta;
status.setFinishTime(finishTime);
status.setProgress(1.0f);
status.setRunState(tip.getFinalRunState());
TaskAttemptCompletionEvent event =
new TaskAttemptCompletionEvent(this, status);
if (LOG.isDebugEnabled()) {
LOG.debug("Created task attempt completion event " + event);
}
return event;
} | java | {
"resource": ""
} |
q162144 | SimulatorTaskTracker.handleSimulatorLaunchTaskAction | train | private List<SimulatorEvent> handleSimulatorLaunchTaskAction(
SimulatorLaunchTaskAction action, long now) {
if (LOG.isDebugEnabled()) {
LOG.debug("Handling launch task action " + action);
}
// First, create statuses and update used slots for map and reduce
// task separately
Task task = action.getTask();
TaskAttemptID taskId = task.getTaskID();
if (tasks.containsKey(taskId)) {
throw new IllegalArgumentException("Multiple launch of task id =" + taskId);
}
// Ctor of MapTaskStatus and ReduceTaskStatus need deprecated
// o.a.h.mapred.TaskAttemptID, hence the downgrade
org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi =
org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);
TaskStatus status;
int numSlotsRequired = task.getNumSlotsRequired();
Counters emptyCounters = new Counters();
if (task.isMapTask()) {
status = new MapTaskStatus(taskIdOldApi, 0f, numSlotsRequired,
State.RUNNING, "", "", taskTrackerName,
Phase.MAP, emptyCounters);
usedMapSlots += numSlotsRequired;
if (usedMapSlots > maxMapSlots) {
throw new IllegalStateException("usedMapSlots exceeds maxMapSlots: " +
usedMapSlots + " > " + maxMapSlots);
}
} else {
status = new ReduceTaskStatus(taskIdOldApi, 0f, numSlotsRequired,
State.RUNNING, "", "", taskTrackerName,
Phase.SHUFFLE, emptyCounters);
usedReduceSlots += numSlotsRequired;
if (usedReduceSlots > maxReduceSlots) {
throw new IllegalStateException("usedReduceSlots exceeds usedReduceSlots: " +
usedReduceSlots + " > " + usedReduceSlots);
}
}
// Second, create and store a TIP
status.setStartTime(now);
SimulatorTaskInProgress tip =
new SimulatorTaskInProgress(action, status, now);
tasks.put(taskId, tip);
// Third, schedule events for ourselves
if (task.isMapTask()) {
// we know when this task attempts ends iff it is a map
TaskAttemptCompletionEvent e = createTaskAttemptCompletionEvent(tip, now);
return Collections.<SimulatorEvent>singletonList(e);
} else {
// reduce, completion time can only be determined when all maps are done
return SimulatorEngine.EMPTY_EVENTS;
}
} | java | {
"resource": ""
} |
q162145 | SimulatorTaskTracker.handleKillTaskAction | train | private List<SimulatorEvent> handleKillTaskAction(KillTaskAction action, long now) {
TaskAttemptID taskId = action.getTaskID();
// we don't have a nice(r) toString() in Hadoop's TaskActions
if (LOG.isDebugEnabled()) {
LOG.debug("Handling kill task action, taskId=" + taskId + ", now=" + now);
}
SimulatorTaskInProgress tip = tasks.get(taskId);
// Safety check: We might get a KillTaskAction even for completed reduces
if (tip == null) {
return SimulatorEngine.EMPTY_EVENTS;
}
progressTaskStatus(tip, now); // make progress up to date
TaskStatus finalStatus = (TaskStatus)tip.getTaskStatus().clone();
finalStatus.setFinishTime(now);
finalStatus.setRunState(State.KILLED);
finishRunningTask(finalStatus, now);
if (finalStatus.getIsMap() || finalStatus.getPhase() == Phase.REDUCE) {
// if we have already created a task attempt completion event we remember
// the task id, so that we can safely ignore the event when its delivered
orphanTaskCompletions.add(taskId);
}
return SimulatorEngine.EMPTY_EVENTS;
} | java | {
"resource": ""
} |
q162146 | SimulatorTaskTracker.progressTaskStatus | train | private void progressTaskStatus(SimulatorTaskInProgress tip, long now) {
TaskStatus status = tip.getTaskStatus();
if (status.getRunState() != State.RUNNING) {
return; // nothing to be done
}
boolean isMap = tip.isMapTask();
// Time when the user space code started
long startTime = -1;
// Time spent in map or just in the REDUCE phase of a reduce task
long runTime = tip.getUserSpaceRunTime();
float progress = 0.0f;
if (isMap) {
// We linearly estimate the progress of maps since their start
startTime = status.getStartTime();
progress = ((float)(now - startTime)) / runTime;
} else {
// We don't model reduce progress in the SHUFFLE or SORT phases
// We use linear estimate for the 3rd, REDUCE phase
Phase reducePhase = status.getPhase();
switch (reducePhase) {
case SHUFFLE:
progress = 0.0f; // 0 phase is done out of 3
break;
case SORT:
progress = 1.0f/3; // 1 phase is done out of 3
break;
case REDUCE: {
// REDUCE phase with the user code started when sort finished
startTime = status.getSortFinishTime();
// 0.66f : 2 phases are done out of of 3
progress = 2.0f/3 + (((float) (now - startTime)) / runTime) / 3.0f;
}
break;
default:
// should never get here
throw new IllegalArgumentException("Invalid reducePhase=" + reducePhase);
}
}
final float EPSILON = 0.0001f;
if (progress < -EPSILON || progress > 1 + EPSILON) {
throw new IllegalStateException("Task progress out of range: " + progress);
}
progress = Math.max(Math.min(1.0f, progress), 0.0f);
status.setProgress(progress);
if (LOG.isDebugEnabled()) {
LOG.debug("Updated task progress, taskId=" + status.getTaskID()
+ ", progress=" + status.getProgress());
}
} | java | {
"resource": ""
} |
q162147 | SimulatorTaskTracker.garbageCollectCompletedTasks | train | private void garbageCollectCompletedTasks() {
for (Iterator<TaskAttemptID> iter = tasks.keySet().iterator();
iter.hasNext();) {
TaskAttemptID taskId = iter.next();
SimulatorTaskInProgress tip = tasks.get(taskId);
if (tip.getTaskStatus().getRunState() != State.RUNNING) {
iter.remove();
if (LOG.isDebugEnabled()) {
LOG.debug("Garbage collected SimulatorTIP, taskId=" + taskId);
}
// We don't have to / must not touch usedMapSlots and usedReduceSlots
// as those were already updated by processTaskAttemptCompletionEvent()
// when the task switched its state from running
}
}
} | java | {
"resource": ""
} |
q162148 | SimulatorTaskTracker.processHeartbeatEvent | train | private List<SimulatorEvent> processHeartbeatEvent(HeartbeatEvent event) {
if (LOG.isDebugEnabled()) {
LOG.debug("Processing heartbeat event " + event);
}
long now = event.getTimeStamp();
// Create the TaskTrackerStatus to report
progressTaskStatuses(now);
List<TaskStatus> taskStatuses = collectAndCloneTaskStatuses();
boolean askForNewTask = (usedMapSlots < maxMapSlots ||
usedReduceSlots < maxReduceSlots);
// 0 means failures == 0 here. Undocumented in TaskTracker, but does not
// seem to be used at all in org.apache.hadoop.mapred .
TaskTrackerStatus taskTrackerStatus =
new SimulatorTaskTrackerStatus(taskTrackerName, hostName, httpPort,
taskStatuses, 0,
maxMapSlots, maxReduceSlots, now);
// This is the right, and only, place to release bookkeping memory held
// by completed tasks: after collectAndCloneTaskStatuses() and before
// heartbeat().
// The status of TIPs to be purged is already cloned & copied to
// taskStatuses for reporting
// We shouldn't run the gc after heartbeat() since KillTaskAction might
// produce new completed tasks that we have not yet reported back and
// don't want to purge immediately.
garbageCollectCompletedTasks();
// Transmit the heartbeat
HeartbeatResponse response = null;
try {
response =
jobTracker.heartbeat(taskTrackerStatus, false, firstHeartbeat,
askForNewTask, heartbeatResponseId);
} catch (IOException ioe) {
throw new IllegalStateException("Internal error", ioe);
}
firstHeartbeat = false;
// The heartbeat got through successfully!
heartbeatResponseId = response.getResponseId();
// Process the heartbeat response
List<SimulatorEvent> events = handleHeartbeatResponse(response, now);
// Next heartbeat
events.add(new HeartbeatEvent(this, now + response.getHeartbeatInterval()));
return events;
} | java | {
"resource": ""
} |
q162149 | Decoder.retrieveStripe | train | public StripeInfo retrieveStripe(Block lostBlock, Path p,
long lostBlockOffset, FileSystem fs, Context context,
boolean online)
throws IOException {
StripeInfo si = null;
if (stripeStore != null) {
IOException caughtException = null;
try {
si = stripeStore.getStripe(codec, lostBlock);
} catch (IOException ioe) {
LOG.error(" Fail to get stripe " + codec
+ " : " + lostBlock, ioe);
caughtException = ioe;
}
if (si == null) {
// Stripe is not record, we should report
LogUtils.logRaidReconstructionMetrics(LOGRESULTS.FAILURE, 0,
codec, p, lostBlockOffset,
online? LOGTYPES.ONLINE_RECONSTRUCTION_GET_STRIPE:
LOGTYPES.OFFLINE_RECONSTRUCTION_GET_STRIPE,
fs, caughtException, context);
}
}
return si;
} | java | {
"resource": ""
} |
q162150 | Decoder.retrieveChecksum | train | public Long retrieveChecksum(Block lostBlock, Path p,
long lostBlockOffset, FileSystem fs, Context context)
throws IOException {
Long oldCRC = null;
if (checksumStore != null) {
IOException caughtException = null;
try {
oldCRC = checksumStore.getChecksum(lostBlock);
} catch (IOException ioe) {
LOG.error(" Fail to get checksum for block " + lostBlock, ioe);
caughtException = ioe;
}
// Checksum is not record, we should report
if (oldCRC == null) {
LogUtils.logRaidReconstructionMetrics(LOGRESULTS.FAILURE, 0,
codec, p, lostBlockOffset,
LOGTYPES.OFFLINE_RECONSTRUCTION_GET_CHECKSUM,
fs, caughtException, context);
}
}
return oldCRC;
} | java | {
"resource": ""
} |
q162151 | Decoder.recoverBlockToFileFromStripeInfo | train | public CRC32 recoverBlockToFileFromStripeInfo(
FileSystem srcFs, Path srcPath, Block lostBlock, File localBlockFile,
long blockSize, long lostBlockOffset, long limit,
StripeInfo si, Context context) throws IOException {
OutputStream out = null;
try {
out = new FileOutputStream(localBlockFile);
CRC32 crc = null;
if (checksumStore != null) {
crc = new CRC32();
}
fixErasedBlockImpl(srcFs, srcPath, srcFs, null, true, blockSize,
lostBlockOffset, limit, false, out,
context, crc, si, true, lostBlock);
return crc;
} finally {
if (null != out) {
out.close();
}
}
} | java | {
"resource": ""
} |
q162152 | Decoder.getOldCodeId | train | private String getOldCodeId(FileStatus srcStat ) throws IOException {
if (codec.id.equals("xor") || codec.id.equals("rs")) {
return codec.id;
} else {
// Search for xor/rs parity files
if (ParityFilePair.getParityFile(
Codec.getCodec("xor"), srcStat, this.conf) != null)
return "xor";
if (ParityFilePair.getParityFile(
Codec.getCodec("rs"), srcStat, this.conf) != null)
return "rs";
}
return null;
} | java | {
"resource": ""
} |
q162153 | HsftpFileSystem.setupSsl | train | private static void setupSsl(Configuration conf) {
Configuration sslConf = new Configuration(false);
sslConf.addResource(conf.get("dfs.https.client.keystore.resource",
"ssl-client.xml"));
System.setProperty("javax.net.ssl.trustStore", sslConf.get(
"ssl.client.truststore.location", ""));
System.setProperty("javax.net.ssl.trustStorePassword", sslConf.get(
"ssl.client.truststore.password", ""));
System.setProperty("javax.net.ssl.trustStoreType", sslConf.get(
"ssl.client.truststore.type", "jks"));
System.setProperty("javax.net.ssl.keyStore", sslConf.get(
"ssl.client.keystore.location", ""));
System.setProperty("javax.net.ssl.keyStorePassword", sslConf.get(
"ssl.client.keystore.password", ""));
System.setProperty("javax.net.ssl.keyPassword", sslConf.get(
"ssl.client.keystore.keypassword", ""));
System.setProperty("javax.net.ssl.keyStoreType", sslConf.get(
"ssl.client.keystore.type", "jks"));
} | java | {
"resource": ""
} |
q162154 | TaskController.setup | train | void setup() {
// Cannot set wait for confirmed kill mode if cannot check if task is alive
if (supportsIsTaskAlive()) {
waitForConfirmedKill = getConf().getBoolean(WAIT_FOR_CONFIRMED_KILL_KEY,
WAIT_FOR_CONFIRMED_DEFAULT);
confirmedKillRetries = getConf().getInt(CONFIRMED_KILL_RETRIES_KEY,
CONFIRMED_KILL_RETRIES_DEFAULT);
}
LOG.info("setup: waitForConfirmedKill=" + waitForConfirmedKill +
", confirmedKillRetries=" + confirmedKillRetries);
} | java | {
"resource": ""
} |
q162155 | TaskController.destroyTaskJVM | train | final void destroyTaskJVM(TaskControllerContext context) {
Thread taskJVMDestroyer = new Thread(new DestroyJVMTaskRunnable(context));
taskJVMDestroyer.start();
if (waitForConfirmedKill) {
try {
taskJVMDestroyer.join();
} catch (InterruptedException e) {
throw new IllegalStateException("destroyTaskJVM: Failed to join " +
taskJVMDestroyer.getName());
}
}
} | java | {
"resource": ""
} |
q162156 | INodeRegularStorage.getPenultimateBlock | train | @Override
public Block getPenultimateBlock() {
if (blocks == null || blocks.length <= 1) {
return null;
}
return blocks[blocks.length - 2];
} | java | {
"resource": ""
} |
q162157 | INodeRegularStorage.addBlock | train | @Override
public void addBlock(BlockInfo newblock) {
if (this.blocks == null) {
this.blocks = new BlockInfo[1];
this.blocks[0] = newblock;
} else {
int size = this.blocks.length;
BlockInfo[] newlist = new BlockInfo[size + 1];
System.arraycopy(this.blocks, 0, newlist, 0, size);
newlist[size] = newblock;
this.blocks = newlist;
}
} | java | {
"resource": ""
} |
q162158 | INodeRegularStorage.convertToRaidStorage | train | @Override
public INodeRaidStorage convertToRaidStorage(BlockInfo[] parityBlocks,
RaidCodec codec, int[] checksums, BlocksMap blocksMap, short replication,
INodeFile inode) throws IOException {
if (codec == null) {
throw new IOException("Codec is null");
} else {
return new INodeRaidStorage(codec.convertToRaidStorage(parityBlocks,
blocks, checksums, blocksMap, replication, inode), codec);
}
} | java | {
"resource": ""
} |
q162159 | BlockCompressorStream.write | train | public void write(byte[] b, int off, int len) throws IOException {
// Sanity checks
if (compressor.finished()) {
throw new IOException("write beyond end of stream");
}
if (b == null) {
throw new NullPointerException();
} else if ((off < 0) || (off > b.length) || (len < 0) ||
((off + len) > b.length)) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return;
}
long limlen = compressor.getBytesRead();
if (len + limlen > MAX_INPUT_SIZE && limlen > 0) {
// Adding this segment would exceed the maximum size.
// Flush data if we have it.
finish();
compressor.reset();
}
if (len > MAX_INPUT_SIZE) {
// The data we're given exceeds the maximum size. Any data
// we had have been flushed, so we write out this chunk in segments
// not exceeding the maximum size until it is exhausted.
rawWriteInt(len);
do {
int bufLen = Math.min(len, MAX_INPUT_SIZE);
compressor.setInput(b, off, bufLen);
compressor.finish();
while (!compressor.finished()) {
compress();
}
compressor.reset();
off += bufLen;
len -= bufLen;
} while (len > 0);
return;
}
// Give data to the compressor
compressor.setInput(b, off, len);
if (!compressor.needsInput()) {
// compressor buffer size might be smaller than the maximum
// size, so we permit it to flush if required.
rawWriteInt((int)compressor.getBytesRead());
do {
compress();
} while (!compressor.needsInput());
}
} | java | {
"resource": ""
} |
q162160 | DatanodeBlockInfo.detachBlock | train | boolean detachBlock(int namespaceId, Block block, int numLinks) throws IOException {
if (isDetached()) {
return false;
}
if (blockDataFile.getFile() == null || blockDataFile.volume == null) {
throw new IOException("detachBlock:Block not found. " + block);
}
File meta = null;
if (!inlineChecksum) {
meta = BlockWithChecksumFileWriter.getMetaFile(blockDataFile.getFile(), block);
if (meta == null) {
throw new IOException("Meta file not found for block " + block);
}
}
if (HardLink.getLinkCount(blockDataFile.getFile()) > numLinks) {
DataNode.LOG.info("CopyOnWrite for block " + block);
detachFile(namespaceId, blockDataFile.getFile(), block);
}
if (!inlineChecksum) {
if (HardLink.getLinkCount(meta) > numLinks) {
detachFile(namespaceId, meta, block);
}
}
setDetached();
return true;
} | java | {
"resource": ""
} |
q162161 | Job.addDependingJob | train | public synchronized boolean addDependingJob(Job dependingJob) {
if (this.state == Job.WAITING) { //only allowed to add jobs when waiting
if (this.dependingJobs == null) {
this.dependingJobs = new ArrayList<Job>();
}
return this.dependingJobs.add(dependingJob);
} else {
return false;
}
} | java | {
"resource": ""
} |
q162162 | Job.checkRunningState | train | private void checkRunningState() {
RunningJob running = null;
try {
running = jc.getJob(this.mapredJobID);
if (running.isComplete()) {
if (running.isSuccessful()) {
this.state = Job.SUCCESS;
} else {
this.state = Job.FAILED;
this.message = "Job failed!";
try {
running.killJob();
} catch (IOException e1) {
}
try {
this.jc.close();
} catch (IOException e2) {
}
}
}
} catch (IOException ioe) {
this.state = Job.FAILED;
this.message = StringUtils.stringifyException(ioe);
try {
if (running != null)
running.killJob();
} catch (IOException e1) {
}
try {
this.jc.close();
} catch (IOException e1) {
}
}
} | java | {
"resource": ""
} |
q162163 | Job.checkState | train | synchronized int checkState() {
if (this.state == Job.RUNNING) {
checkRunningState();
}
if (this.state != Job.WAITING) {
return this.state;
}
if (this.dependingJobs == null || this.dependingJobs.size() == 0) {
this.state = Job.READY;
return this.state;
}
Job pred = null;
int n = this.dependingJobs.size();
for (int i = 0; i < n; i++) {
pred = this.dependingJobs.get(i);
int s = pred.checkState();
if (s == Job.WAITING || s == Job.READY || s == Job.RUNNING) {
break; // a pred is still not completed, continue in WAITING
// state
}
if (s == Job.FAILED || s == Job.DEPENDENT_FAILED) {
this.state = Job.DEPENDENT_FAILED;
this.message = "depending job " + i + " with jobID "
+ pred.getJobID() + " failed. " + pred.getMessage();
break;
}
// pred must be in success state
if (i == n - 1) {
this.state = Job.READY;
}
}
return this.state;
} | java | {
"resource": ""
} |
q162164 | Job.submit | train | protected synchronized void submit() {
try {
if (theJobConf.getBoolean("create.empty.dir.if.nonexist", false)) {
FileSystem fs = FileSystem.get(theJobConf);
Path inputPaths[] = FileInputFormat.getInputPaths(theJobConf);
for (int i = 0; i < inputPaths.length; i++) {
if (!fs.exists(inputPaths[i])) {
try {
fs.mkdirs(inputPaths[i]);
} catch (IOException e) {
}
}
}
}
RunningJob running = jc.submitJob(theJobConf);
this.mapredJobID = running.getID();
this.state = Job.RUNNING;
} catch (IOException ioe) {
this.state = Job.FAILED;
this.message = StringUtils.stringifyException(ioe);
}
} | java | {
"resource": ""
} |
q162165 | SimpleSeekableFormatOutputStream.write | train | @Override
public void write(byte[] b, int start, int length) throws IOException {
currentDataSegmentBuffer.write(b, start, length);
flushIfNeeded();
} | java | {
"resource": ""
} |
q162166 | SimpleSeekableFormatOutputStream.flush | train | @Override
public void flush() throws IOException {
// Do not do anything if no data has been written
if (currentDataSegmentBuffer.size() == 0) {
return;
}
// Create the current DataSegment
DataSegmentWriter currentDataSegment =
new DataSegmentWriter(currentDataSegmentBuffer, codec, codecCompressor);
// Update the metadata
updateMetadata(currentDataSegmentBuffer.size(), currentDataSegment.size());
// Write out the DataSegment
currentDataSegment.writeTo(dataSegmentDataOut);
// Clear out the current buffer. Note that this has to be done after
// currentDataSegment.writeTo(...), because currentDataSegment can
// keep a reference to the currentDataSegmentBuffer.
currentDataSegmentBuffer.reset();
// Flush out the underlying stream
dataSegmentDataOut.flush();
} | java | {
"resource": ""
} |
q162167 | DistributedFileSystem.checkPath | train | protected void checkPath(Path path) {
URI thisUri = this.getUri();
URI thatUri = path.toUri();
String thatAuthority = thatUri.getAuthority();
if (thatUri.getScheme() != null
&& thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme())
&& thatUri.getPort() == NameNode.DEFAULT_PORT
&& thisUri.getPort() == -1
&& thatAuthority.substring(0,thatAuthority.indexOf(":"))
.equalsIgnoreCase(thisUri.getAuthority()))
return;
super.checkPath(path);
} | java | {
"resource": ""
} |
q162168 | DistributedFileSystem.append | train | public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
DFSOutputStream op = (DFSOutputStream)dfs.append(getPathName(f), bufferSize, progress);
return new FSDataOutputStream(op, statistics, op.getInitialLen());
} | java | {
"resource": ""
} |
q162169 | DistributedFileSystem.concat | train | public void concat(Path trg, Path [] psrcs, boolean restricted) throws IOException {
String [] srcs = new String [psrcs.length];
for(int i=0; i<psrcs.length; i++) {
srcs[i] = getPathName(psrcs[i]);
}
dfs.concat(getPathName(trg), srcs, restricted);
} | java | {
"resource": ""
} |
q162170 | DistributedFileSystem.concat | train | @Deprecated
public void concat(Path trg, Path [] psrcs) throws IOException {
concat(trg, psrcs, true);
} | java | {
"resource": ""
} |
q162171 | DistributedFileSystem.setQuota | train | public void setQuota(Path src, long namespaceQuota, long diskspaceQuota)
throws IOException {
dfs.setQuota(getPathName(src), namespaceQuota, diskspaceQuota);
} | java | {
"resource": ""
} |
q162172 | DistributedFileSystem.getFileStatus | train | public FileStatus getFileStatus(Path f) throws IOException {
FileStatus fi = dfs.getFileInfo(getPathName(f));
if (fi != null) {
fi.makeQualified(this);
return fi;
} else {
throw new FileNotFoundException("File does not exist: " + f);
}
} | java | {
"resource": ""
} |
q162173 | DataJoinReducerBase.joinAndCollect | train | private void joinAndCollect(Object[] tags, ResetableIterator[] values,
Object key, OutputCollector output, Reporter reporter)
throws IOException {
if (values.length < 1) {
return;
}
Object[] partialList = new Object[values.length];
joinAndCollect(tags, values, 0, partialList, key, output, reporter);
} | java | {
"resource": ""
} |
q162174 | DataJoinReducerBase.joinAndCollect | train | private void joinAndCollect(Object[] tags, ResetableIterator[] values,
int pos, Object[] partialList, Object key,
OutputCollector output, Reporter reporter) throws IOException {
if (values.length == pos) {
// get a value from each source. Combine them
TaggedMapOutput combined = combine(tags, partialList);
collect(key, combined, output, reporter);
return;
}
ResetableIterator nextValues = values[pos];
nextValues.reset();
while (nextValues.hasNext()) {
Object v = nextValues.next();
partialList[pos] = v;
joinAndCollect(tags, values, pos + 1, partialList, key, output, reporter);
}
} | java | {
"resource": ""
} |
q162175 | HadoopServer.purgeJob | train | public void purgeJob(final HadoopJob job) {
runningJobs.remove(job.getJobID());
Display.getDefault().asyncExec(new Runnable() {
public void run() {
fireJobRemoved(job);
}
});
} | java | {
"resource": ""
} |
q162176 | HadoopServer.loadFromXML | train | public boolean loadFromXML(File file) throws ParserConfigurationException,
SAXException, IOException {
Configuration newConf = new Configuration(this.conf);
DocumentBuilder builder =
DocumentBuilderFactory.newInstance().newDocumentBuilder();
Document document = builder.parse(file);
Element root = document.getDocumentElement();
if (!"configuration".equals(root.getTagName()))
return false;
NodeList props = root.getChildNodes();
for (int i = 0; i < props.getLength(); i++) {
Node propNode = props.item(i);
if (!(propNode instanceof Element))
continue;
Element prop = (Element) propNode;
if (!"property".equals(prop.getTagName()))
return false;
NodeList fields = prop.getChildNodes();
String attr = null;
String value = null;
for (int j = 0; j < fields.getLength(); j++) {
Node fieldNode = fields.item(j);
if (!(fieldNode instanceof Element))
continue;
Element field = (Element) fieldNode;
if ("name".equals(field.getTagName()))
attr = ((Text) field.getFirstChild()).getData();
if ("value".equals(field.getTagName()) && field.hasChildNodes())
value = ((Text) field.getFirstChild()).getData();
}
if (attr != null && value != null)
newConf.set(attr, value);
}
this.conf = newConf;
return true;
} | java | {
"resource": ""
} |
q162177 | HadoopServer.storeSettingsToFile | train | public void storeSettingsToFile(File file) throws IOException {
FileOutputStream fos = new FileOutputStream(file);
this.conf.writeXml(fos);
fos.close();
} | java | {
"resource": ""
} |
q162178 | HadoopServer.addPluginConfigDefaultProperties | train | private void addPluginConfigDefaultProperties() {
for (ConfProp prop : ConfProp.values()) {
if (conf.get(prop.name) == null)
conf.set(prop.name, prop.defVal);
}
} | java | {
"resource": ""
} |
q162179 | JobID.downgrade | train | public static JobID downgrade(org.apache.hadoop.mapreduce.JobID old) {
if (old instanceof JobID) {
return (JobID) old;
} else {
return new JobID(old.getJtIdentifier(), old.getId());
}
} | java | {
"resource": ""
} |
q162180 | APITrace.logCall | train | public static void logCall(long entryTime,
long returnTime,
int callIndex,
Object returnValue,
Object argValues[],
long streamId) {
if (!API_TRACE_LOG.isInfoEnabled()) {
return;
}
// determine elapsed time
long elapsed = returnTime;
elapsed -= entryTime;
entryTime -= baseTime;
// TODO: for the first entry, we get negatives for entryTime.
// is this something weird in order the Java instantiates?
// append universal fields (i.e., ones that occur for every call)
StringBuilder line = new StringBuilder();
line.append(pid + ",");
line.append(nextEventId.getAndIncrement() + ",");
line.append(entryTime + ",");
line.append(elapsed + ",");
line.append(callIndex + ",");
line.append(streamId + ",");
line.append(escape(returnValue));
// append the args to the method call
if (argValues != null) {
for (int i = 0; i < argValues.length; i++) {
line.append("," + escape(argValues[i]));
}
}
API_TRACE_LOG.info(line);
} | java | {
"resource": ""
} |
q162181 | SleepJobRunner.calcStats | train | private static Stats calcStats(List<Double> nums) {
double sum = 0.0, mean = 0.0, variance = 0.0, stdDev = 0.0;
for (Double d : nums) {
sum += d.doubleValue();
}
if (nums.size() > 0) {
mean = sum / nums.size();
}
sum = 0.0;
for (Double d : nums) {
sum += (d.doubleValue() - mean) * (d.doubleValue() - mean);
}
if (nums.size() > 0) {
variance = sum / nums.size();
}
stdDev = Math.sqrt(variance);
return new Stats(mean, variance, stdDev);
} | java | {
"resource": ""
} |
q162182 | PoolInfo.write | train | public void write(JsonGenerator jsonGenerator) throws IOException {
jsonGenerator.writeStartObject();
jsonGenerator.writeStringField("poolGroupName", poolGroupName);
jsonGenerator.writeStringField("poolName", poolName);
jsonGenerator.writeEndObject();
} | java | {
"resource": ""
} |
q162183 | PoolInfo.createPoolInfoStrings | train | public static PoolInfoStrings createPoolInfoStrings(PoolInfo poolInfo) {
if (poolInfo == null) {
return null;
}
return new PoolInfoStrings(poolInfo.getPoolGroupName(),
poolInfo.getPoolName());
} | java | {
"resource": ""
} |
q162184 | PoolInfo.createPoolInfo | train | public static PoolInfo createPoolInfo(PoolInfoStrings poolInfoStrings) {
if (poolInfoStrings == null) {
return null;
}
return new PoolInfo(poolInfoStrings.getPoolGroupName(),
poolInfoStrings.getPoolName());
} | java | {
"resource": ""
} |
q162185 | PoolInfo.isLegalPoolInfo | train | public static boolean isLegalPoolInfo(PoolInfo poolInfo) {
if (poolInfo == null || poolInfo.getPoolGroupName() == null ||
poolInfo.getPoolName() == null) {
return false;
}
if (INVALID_REGEX_PATTERN.matcher(poolInfo.getPoolGroupName()).matches() ||
poolInfo.getPoolGroupName().isEmpty()) {
return false;
}
if (INVALID_REGEX_PATTERN.matcher(poolInfo.getPoolName()).matches() ||
poolInfo.getPoolName().isEmpty()) {
return false;
}
return true;
} | java | {
"resource": ""
} |
q162186 | FsPermission.applyUMask | train | public FsPermission applyUMask(FsPermission umask) {
return new FsPermission(useraction.and(umask.useraction.not()),
groupaction.and(umask.groupaction.not()),
otheraction.and(umask.otheraction.not()));
} | java | {
"resource": ""
} |
q162187 | FsPermission.valueOf | train | public static FsPermission valueOf(String unixSymbolicPermission) {
if (unixSymbolicPermission == null) {
return null;
}
else if (unixSymbolicPermission.length() != 10) {
throw new IllegalArgumentException("length != 10(unixSymbolicPermission="
+ unixSymbolicPermission + ")");
}
int n = 0;
for(int i = 1; i < unixSymbolicPermission.length(); i++) {
n = n << 1;
char c = unixSymbolicPermission.charAt(i);
n += (c == '-' || c == 'T' || c == 'S') ? 0: 1;
}
return new FsPermission((short)n);
} | java | {
"resource": ""
} |
q162188 | SecondaryNameNode.initialize | train | private void initialize(Configuration conf) throws IOException {
// initiate Java VM metrics
JvmMetrics.init("SecondaryNameNode", conf.get("session.id"));
// Create connection to the namenode.
shouldRun = true;
nameNodeAddr = NameNode.getClientProtocolAddress(conf);
this.conf = conf;
this.namenode =
(NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
NamenodeProtocol.versionID, nameNodeAddr, conf);
this.namenode.register();
// initialize checkpoint directories
fsName = getInfoServer();
checkpointDirs = getFileStorageDirs(NNStorageConfiguration
.getCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary"));
checkpointEditsDirs = getFileStorageDirs(NNStorageConfiguration
.getCheckpointEditsDirs(conf, "/tmp/hadoop/dfs/namesecondary"));
checkpointImage = new CheckpointStorage(conf);
checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);
// Initialize other scheduling parameters from the configuration
checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
checkpointTxnCount = NNStorageConfiguration.getCheckpointTxnCount(conf);
// initialize the webserver for uploading files.
String infoAddr =
NetUtils.getServerAddress(conf,
"dfs.secondary.info.bindAddress",
"dfs.secondary.info.port",
"dfs.secondary.http.address");
InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
String infoBindIpAddress = infoSocAddr.getAddress().getHostAddress();
int tmpInfoPort = infoSocAddr.getPort();
infoServer = new HttpServer("secondary", infoBindIpAddress, tmpInfoPort,
tmpInfoPort == 0, conf);
infoServer.setAttribute("name.system.image", checkpointImage);
this.infoServer.setAttribute("name.conf", conf);
infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
infoServer.start();
// The web-server port can be ephemeral... ensure we have the correct info
infoPort = infoServer.getPort();
conf.set("dfs.secondary.http.address", infoBindIpAddress + ":" +infoPort);
LOG.info("Secondary Web-server up at: " + infoBindIpAddress + ":" +infoPort);
LOG.warn("Checkpoint Period :" + checkpointPeriod + " secs " +
"(" + checkpointPeriod/60 + " min)");
LOG.warn("Log Size Trigger :" + checkpointTxnCount + " transactions ");
} | java | {
"resource": ""
} |
q162189 | SecondaryNameNode.shutdown | train | public void shutdown() {
shouldRun = false;
try {
if (infoServer != null) infoServer.stop();
} catch (Exception e) {
LOG.warn("Exception shutting down SecondaryNameNode", e);
}
try {
if (checkpointImage != null) checkpointImage.close();
} catch(IOException e) {
LOG.warn(StringUtils.stringifyException(e));
}
} | java | {
"resource": ""
} |
q162190 | SecondaryNameNode.getInfoServer | train | private String getInfoServer() throws IOException {
URI fsName = FileSystem.getDefaultUri(conf);
if (!"hdfs".equals(fsName.getScheme())) {
throw new IOException("This is not a DFS");
}
return NetUtils.getServerAddress(conf, "dfs.info.bindAddress",
"dfs.info.port", "dfs.http.address");
} | java | {
"resource": ""
} |
q162191 | SecondaryNameNode.doCheckpoint | train | boolean doCheckpoint() throws IOException {
LOG.info("Checkpoint starting");
// Do the required initialization of the merge work area.
startCheckpoint();
checkpointImage.ensureCurrentDirExists();
NNStorage dstStorage = checkpointImage.storage;
// Tell the namenode to start logging transactions in a new edit file
// Returns a token that would be used to upload the merged image.
CheckpointSignature sig = namenode.rollEditLog();
// Make sure we're talking to the same NN!
if (checkpointImage.getNamespaceID() != 0) {
// If the image actually has some data, make sure we're talking
// to the same NN as we did before.
sig.validateStorageInfo(checkpointImage.storage);
} else {
// if we're a fresh 2NN, just take the storage info from the server
// we first talk to.
dstStorage.setStorageInfo(sig);
}
// error simulation code for junit test
InjectionHandler.processEventIO(InjectionEvent.SECONDARYNAMENODE_CHECKPOINT0);
RemoteEditLogManifest manifest = namenode
.getEditLogManifest(sig.mostRecentCheckpointTxId + 1);
boolean loadImage = downloadCheckpointFiles(fsName, checkpointImage, sig,
manifest); // Fetch fsimage and edits
doMerge(sig, manifest, loadImage, checkpointImage);
//
// Upload the new image into the NameNode. Then tell the Namenode
// to make this new uploaded image as the most current image.
//
long txid = checkpointImage.getLastAppliedTxId();
TransferFsImage.uploadImageFromStorage(fsName, InetAddress.getLocalHost()
.getHostAddress(), infoPort, dstStorage, txid);
// error simulation code for junit test
InjectionHandler.processEventIO(InjectionEvent.SECONDARYNAMENODE_CHECKPOINT1);
namenode.rollFsImage(new CheckpointSignature(checkpointImage));
LOG.warn("Checkpoint done. New Image Size: "
+ dstStorage.getFsImageName(StorageLocationType.LOCAL, txid).length());
// Since we've successfully checkpointed, we can remove some old
// image files
checkpointImage.purgeOldStorage();
return loadImage;
} | java | {
"resource": ""
} |
q162192 | SecondaryNameNode.doMerge | train | private void doMerge(CheckpointSignature sig, RemoteEditLogManifest manifest,
boolean loadImage, FSImage dstImage) throws IOException {
if (loadImage) { // create an empty namespace if new image
namesystem = new FSNamesystem(checkpointImage, conf);
checkpointImage.setFSNamesystem(namesystem);
}
assert namesystem.dir.fsImage == checkpointImage;
checkpointImage.doMerge(sig, manifest, loadImage);
} | java | {
"resource": ""
} |
q162193 | GenWriterThread.writeControlFile | train | private void writeControlFile(FileSystem fs, Path outputPath,
Path checksumFile, String name) throws IOException {
SequenceFile.Writer write = null;
try {
Path parentDir = new Path(rtc.input, "filelists");
if (!fs.exists(parentDir)) {
fs.mkdirs(parentDir);
}
Path controlFile = new Path(parentDir, name);
write = SequenceFile.createWriter(fs, fs.getConf(), controlFile,
Text.class, Text.class, CompressionType.NONE);
write.append(new Text(outputPath.toString()),
new Text(checksumFile.toString()));
} finally {
if (write != null)
write.close();
write = null;
}
} | java | {
"resource": ""
} |
q162194 | GenWriterThread.prepare | train | @Override
public GenThread[] prepare(JobConf conf, Text key, Text value)
throws IOException {
this.rtc = new GenWriterRunTimeConstants();
super.prepare(conf, key, value, rtc);
rtc.task_name = key.toString() + rtc.taskID;
rtc.roll_interval = conf.getLong(WRITER_ROLL_INTERVAL_KEY,
DEFAULT_ROLL_INTERVAL_SEC) * 1000;
rtc.sync_interval = conf.getLong(WRITER_SYNC_INTERVAL_KEY,
DEFAULT_SYNC_INTERVAL_SEC) * 1000;
rtc.max_time = conf.getLong(MAX_TIME_SEC_KEY, DEFAULT_MAX_TIME_SEC) * 1000;
rtc.data_rate = conf.getLong(WRITER_DATARATE_KEY, DEFAULT_DATA_RATE) * 1024;
rtc.input = value.toString();
LOG.info("data rate: " + rtc.data_rate);
GenWriterThread[] threads = new GenWriterThread[(int)rtc.nthreads];
for (int i=0; i<rtc.nthreads; i++) {
threads[i] = new GenWriterThread(conf,
new Path(new Path(rtc.input, rtc.task_name),
rtc.task_name + "_" + i), rtc.task_name, i, rtc);
}
return threads;
} | java | {
"resource": ""
} |
q162195 | Ingest.setCatchingUp | train | private void setCatchingUp() throws IOException {
try {
if (inputEditStream != null && inputEditStream.isInProgress()) {
catchingUp = (inputEditStream.length() - inputEditStream.getPosition() > catchUpLag);
} else {
catchingUp = true;
}
} catch (Exception e) {
catchingUp = true;
}
} | java | {
"resource": ""
} |
q162196 | Ingest.getLagBytes | train | public long getLagBytes() {
try {
if (inputEditStream != null && inputEditStream.isInProgress()) {
// for file journals it may happen that we read a segment finalized
// by primary, but not refreshed by the standby, so length() returns 0
// hence we take max(-1,lag)
return Math.max(-1,
inputEditStream.length() - this.inputEditStream.getPosition());
}
return -1;
} catch (IOException ex) {
LOG.error("Error getting the lag", ex);
return -1;
}
} | java | {
"resource": ""
} |
q162197 | Ingest.loadFSEdits | train | private int loadFSEdits() throws IOException {
FSDirectory fsDir = fsNamesys.dir;
int numEdits = 0;
long startTime = FSNamesystem.now();
LOG.info("Ingest: Consuming transactions: " + this.toString());
try {
logVersion = inputEditStream.getVersion();
if (!LayoutVersion.supports(Feature.TXID_BASED_LAYOUT, logVersion))
throw new RuntimeException("Log version is too old");
currentPosition = inputEditStream.getPosition();
numEdits = ingestFSEdits(); // continue to ingest
} finally {
LOG.info("Ingest: Closing ingest for segment: " + this.toString());
// At this time we are done reading the transaction log
// We need to sync to have on disk status the same as in memory
// if we saw end segment, we already synced
if(endTxId == -1 && fsDir.fsImage.getEditLog().isOpen()) {
fsDir.fsImage.getEditLog().logSync();
}
inputEditStream.close();
standby.clearIngestState();
}
LOG.info("Ingest: Edits segment: " + this.toString()
+ " edits # " + numEdits
+ " loaded in " + (FSNamesystem.now()-startTime)/1000 + " seconds.");
if (logVersion != FSConstants.LAYOUT_VERSION) // other version
numEdits++; // save this image asap
return numEdits;
} | java | {
"resource": ""
} |
q162198 | Ingest.ingestFSEdit | train | private FSEditLogOp ingestFSEdit(EditLogInputStream inputEditLog)
throws IOException {
FSEditLogOp op = null;
try {
op = inputEditLog.readOp();
InjectionHandler.processEventIO(InjectionEvent.INGEST_READ_OP);
} catch (EOFException e) {
return null; // No more transactions.
} catch (IOException e) {
// rethrow, it's handled in ingestFSEdits()
throw e;
} catch (Exception e) {
// some other problem, maybe unchecked exception
throw new IOException(e);
}
return op;
} | java | {
"resource": ""
} |
q162199 | Ingest.shouldLoad | train | private boolean shouldLoad(long txid) {
boolean shouldLoad = txid > standby.getLastCorrectTxId();
if (!shouldLoad) {
LOG.info("Ingest: skip loading txId: " + txid
+ " to namesystem, but writing to edit log, last correct txid: "
+ standby.getLastCorrectTxId());
}
return shouldLoad;
} | java | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.