_id
stringlengths
2
7
title
stringlengths
3
140
partition
stringclasses
3 values
text
stringlengths
73
34.1k
language
stringclasses
1 value
meta_information
dict
q160500
DFSClient.delete
train
public boolean delete(String src, boolean recursive) throws IOException { checkOpen(); clearFileStatusCache(); try { return namenode.delete(src, recursive); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class); } }
java
{ "resource": "" }
q160501
DFSClient.listPaths
train
public FileStatus[] listPaths(String src) throws IOException { checkOpen(); metrics.incLsCalls(); try { if (namenodeProtocolProxy == null) { return versionBasedListPath(src); } return methodBasedListPath(src); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class); } }
java
{ "resource": "" }
q160502
DFSClient.versionBasedListPathWithLocation
train
private RemoteIterator<LocatedFileStatus> versionBasedListPathWithLocation( final String src) throws IOException { if (namenodeVersion >= ClientProtocol.BULK_BLOCK_LOCATIONS_VERSION) { return iteratorListing(src); } else { return arrayListing(src); } }
java
{ "resource": "" }
q160503
DFSClient.methodBasedListPathWithLocation
train
private RemoteIterator<LocatedFileStatus> methodBasedListPathWithLocation( final String src) throws IOException { if (namenodeProtocolProxy.isMethodSupported( "getLocatedPartialListing", String.class, byte[].class)) { return iteratorListing(src); } else { return arrayListing(src); } }
java
{ "resource": "" }
q160504
DFSClient.arrayListing
train
private RemoteIterator<LocatedFileStatus> arrayListing(final String src) throws IOException { return new RemoteIterator<LocatedFileStatus>() { private FileStatus[] stats; private int i = 0; { //initializer stats = listPaths(src); if (stats == null) { throw new FileNotFoundException("File " + src + " does not exist."); } } @Override public boolean hasNext() throws IOException { return i<stats.length; } @Override public LocatedFileStatus next() throws IOException { if (!hasNext()) { throw new NoSuchElementException("No more entry in " + src); } FileStatus result = stats[i++]; BlockLocation[] locs = result.isDir() ? null : getBlockLocations( result.getPath().toUri().getPath(), 0, result.getLen()); return new LocatedFileStatus(result, locs); } }; }
java
{ "resource": "" }
q160505
DFSClient.iteratorListing
train
private RemoteIterator<LocatedFileStatus> iteratorListing(final String src) throws IOException { return new RemoteIterator<LocatedFileStatus>() { private LocatedDirectoryListing thisListing; private int i; { // initializer // fetch the first batch of entries in the directory thisListing = namenode.getLocatedPartialListing( src, HdfsFileStatus.EMPTY_NAME); if (thisListing == null) { // the directory does not exist throw new FileNotFoundException("File " + src + " does not exist."); } } @Override public boolean hasNext() throws IOException { if (i>=thisListing.getPartialListing().length && thisListing.hasMore()) { // current listing is exhausted & fetch a new listing thisListing = namenode.getLocatedPartialListing( src, thisListing.getLastName()); if (thisListing == null) { throw new FileNotFoundException("File " + src + " does not exist."); } i = 0; } return i < thisListing.getPartialListing().length; } @Override public LocatedFileStatus next() throws IOException { if (!hasNext()) { throw new java.util.NoSuchElementException("No more entry in " + src); } return HdfsFileStatus.toLocatedFileStatus(thisListing.getPartialListing()[i], thisListing.getBlockLocations()[i++], src); } }; }
java
{ "resource": "" }
q160506
DFSClient.iterativeListing
train
private FileStatus[] iterativeListing(String src) throws IOException { // fetch the first batch of entries in the directory DirectoryListing thisListing = namenode.getPartialListing( src, HdfsFileStatus.EMPTY_NAME); if (thisListing == null) { // the directory does not exist return null; } HdfsFileStatus[] partialListing = thisListing.getPartialListing(); if (!thisListing.hasMore()) { // got all entries of the directory FileStatus[] stats = new FileStatus[partialListing.length]; for (int i = 0; i < partialListing.length; i++) { stats[i] = HdfsFileStatus.toFileStatus(partialListing[i], src); } return stats; } // The directory size is too big that it needs to fetch more // estimate the total number of entries in the directory int totalNumEntries = partialListing.length + thisListing.getRemainingEntries(); ArrayList<FileStatus> listing = new ArrayList<FileStatus>(totalNumEntries); // add the first batch of entries to the array list for (HdfsFileStatus fileStatus : partialListing) { listing.add(HdfsFileStatus.toFileStatus(fileStatus, src)); } // now fetch more entries do { thisListing = namenode.getPartialListing(src, thisListing.getLastName()); if (thisListing == null) { return null; // the directory is deleted } partialListing = thisListing.getPartialListing(); for (HdfsFileStatus fileStatus : partialListing) { listing.add(HdfsFileStatus.toFileStatus(fileStatus, src)); } } while (thisListing.hasMore()); return listing.toArray(new FileStatus[listing.size()]); }
java
{ "resource": "" }
q160507
DFSClient.getFileCrc
train
int getFileCrc(String src) throws IOException { checkOpen(); return getFileCrc(dataTransferVersion, src, namenode, namenodeProtocolProxy, socketFactory, socketTimeout); }
java
{ "resource": "" }
q160508
DFSClient.setPermission
train
public void setPermission(String src, FsPermission permission ) throws IOException { checkOpen(); clearFileStatusCache(); try { namenode.setPermission(src, permission); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class); } }
java
{ "resource": "" }
q160509
DFSClient.setOwner
train
public void setOwner(String src, String username, String groupname ) throws IOException { checkOpen(); clearFileStatusCache(); try { namenode.setOwner(src, username, groupname); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class); } }
java
{ "resource": "" }
q160510
DFSClient.versionBasedListCorruptFileBlocks
train
private CorruptFileBlocks versionBasedListCorruptFileBlocks(String path, String cookie) throws IOException { if (namenodeVersion < ClientProtocol.LIST_CORRUPT_FILEBLOCKS_VERSION) { LOG.info("NameNode version is " + namenodeVersion + " Using older version of getCorruptFiles."); if (cookie != null ) { return new CorruptFileBlocks(new String[0], ""); } ArrayList<String> str = new ArrayList<String>(); for (FileStatus stat : namenode.getCorruptFiles()) { String filename = stat.getPath().toUri().getPath(); if (filename.startsWith(path)) { str.add(filename); } } return new CorruptFileBlocks(str.toArray(new String[str.size()]), ""); } return namenode.listCorruptFileBlocks(path, cookie); }
java
{ "resource": "" }
q160511
DFSClient.methodBasedListCorruptFileBlocks
train
private CorruptFileBlocks methodBasedListCorruptFileBlocks(String path, String cookie) throws IOException { if (!namenodeProtocolProxy.isMethodSupported("listCorruptFileBlocks", String.class, String.class)) { LOG.info("NameNode version is " + namenodeVersion + " Using older version of getCorruptFiles."); if (cookie != null ) { return new CorruptFileBlocks(new String[0], ""); } ArrayList<String> str = new ArrayList<String>(); for (FileStatus stat : namenode.getCorruptFiles()) { String filename = stat.getPath().toUri().getPath(); if (filename.startsWith(path)) { str.add(filename); } } return new CorruptFileBlocks(str.toArray(new String[str.size()]), ""); } return namenode.listCorruptFileBlocks(path, cookie); }
java
{ "resource": "" }
q160512
DFSClient.versionBasedSaveNamespace
train
private void versionBasedSaveNamespace(boolean force, boolean uncompressed) throws AccessControlException, IOException { if (namenodeVersion >= ClientProtocol.SAVENAMESPACE_FORCE) { namenode.saveNamespace(force, uncompressed); } else { namenode.saveNamespace(); } }
java
{ "resource": "" }
q160513
DFSClient.methodBasedSaveNamespace
train
private void methodBasedSaveNamespace(boolean force, boolean uncompressed) throws AccessControlException, IOException { if (namenodeProtocolProxy.isMethodSupported( "saveNamespace", boolean.class, boolean.class)) { namenode.saveNamespace(force, uncompressed); } else { namenode.saveNamespace(); } }
java
{ "resource": "" }
q160514
DFSClient.setQuota
train
void setQuota(String src, long namespaceQuota, long diskspaceQuota) throws IOException { // sanity check if ((namespaceQuota <= 0 && namespaceQuota != FSConstants.QUOTA_DONT_SET && namespaceQuota != FSConstants.QUOTA_RESET) || (diskspaceQuota <= 0 && diskspaceQuota != FSConstants.QUOTA_DONT_SET && diskspaceQuota != FSConstants.QUOTA_RESET)) { throw new IllegalArgumentException("Invalid values for quota : " + namespaceQuota + " and " + diskspaceQuota); } try { namenode.setQuota(src, namespaceQuota, diskspaceQuota); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, NSQuotaExceededException.class, DSQuotaExceededException.class); } }
java
{ "resource": "" }
q160515
DFSClient.setTimes
train
public void setTimes(String src, long mtime, long atime) throws IOException { checkOpen(); clearFileStatusCache(); try { namenode.setTimes(src, mtime, atime); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class); } }
java
{ "resource": "" }
q160516
DFSClient.checkBlockRange
train
static void checkBlockRange(List<LocatedBlock> blockRange, long offset, long length) throws IOException { boolean isValid = false; if (!blockRange.isEmpty()) { int numBlocks = blockRange.size(); LocatedBlock firstBlock = blockRange.get(0); LocatedBlock lastBlock = blockRange.get(numBlocks - 1); long segmentEnd = offset + length; // Check that the queried segment is between the beginning of the first // block and the end of the last block in the block range. if (firstBlock.getStartOffset() <= offset && (segmentEnd <= lastBlock.getStartOffset() + lastBlock.getBlockSize())) { isValid = true; // There is a chance the block list is valid LocatedBlock prevBlock = firstBlock; for (int i = 1; i < numBlocks; ++i) { // In this loop, prevBlock is always the block #(i - 1) and curBlock // is the block #i. long prevBlkEnd = prevBlock.getStartOffset() + prevBlock.getBlockSize(); LocatedBlock curBlock = blockRange.get(i); long curBlkOffset = curBlock.getStartOffset(); if (prevBlkEnd != curBlkOffset || // Blocks are not contiguous prevBlkEnd <= offset || // Previous block is redundant segmentEnd <= curBlkOffset) { // Current block is redundant isValid = false; break; } prevBlock = curBlock; } } } if (!isValid) { throw new IOException("Got incorrect block range for " + "offset=" + offset + ", length=" + length + ": " + blockRange); } }
java
{ "resource": "" }
q160517
DFSClient.reportChecksumFailure
train
void reportChecksumFailure(String file, LocatedBlock lblocks[]) { try { reportBadBlocks(lblocks); } catch (IOException ie) { LOG.info("Found corruption while reading " + file + ". Error repairing corrupt blocks. Bad blocks remain. " + StringUtils.stringifyException(ie)); } }
java
{ "resource": "" }
q160518
DFSClient.getDataTransferProtocolVersion
train
public int getDataTransferProtocolVersion() throws IOException { synchronized (dataTransferVersion) { if (dataTransferVersion == -1) { // Get the version number from NN try { int remoteDataTransferVersion = namenode.getDataTransferProtocolVersion(); updateDataTransferProtocolVersionIfNeeded(remoteDataTransferVersion); } catch (RemoteException re) { IOException ioe = re.unwrapRemoteException(IOException.class); if (ioe.getMessage().startsWith(IOException.class.getName() + ": " + NoSuchMethodException.class.getName())) { dataTransferVersion = 14; // last version not supportting this RPC } else { throw ioe; } } if (LOG.isDebugEnabled()) { LOG.debug("Data Transfer Protocal Version is "+ dataTransferVersion); } } return dataTransferVersion; } }
java
{ "resource": "" }
q160519
DFSClient.isInLocalRack
train
public boolean isInLocalRack(InetSocketAddress addr) { if (dnsToSwitchMapping == null || this.localhostNetworkLocation == null) { return false; } ArrayList<String> tempList = new ArrayList<String>(); tempList.add(addr.getAddress().getHostAddress()); List<String> retList = dnsToSwitchMapping.resolve(tempList); if (retList != null && retList.size() > 0) { return retList.get(0).equals(this.localhostNetworkLocation); } else { return false; } }
java
{ "resource": "" }
q160520
RaidMissingBlocks.getTotalCount
train
int getTotalCount() { int ret = 0; for (RaidMissingBlocksPerCodec queue : queues.values()) { ret += queue.getTotalCount(); } return ret; }
java
{ "resource": "" }
q160521
RaidMissingBlocks.remove
train
boolean remove(BlockInfo blockInfo, RaidCodec codec) { if(queues.get(codec).remove(blockInfo)) { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug( "BLOCK* NameSystem.RaidMissingBlocks.remove:" + blockInfo + " file "+ blockInfo.getINode() + " codec " + codec.id); } return true; } return false; }
java
{ "resource": "" }
q160522
SimulatorJobInProgress.initTasks
train
@Override public synchronized void initTasks() throws IOException { boolean loggingEnabled = LOG.isDebugEnabled(); if (loggingEnabled) { LOG.debug("(initTasks@SJIP) Starting Initialization for " + jobId); } numMapTasks = jobStory.getNumberMaps(); numReduceTasks = jobStory.getNumberReduces(); JobHistory.JobInfo.logSubmitted(getJobID(), conf, jobFile.toString(), this.startTime, hasRestarted()); if (loggingEnabled) { LOG.debug("(initTasks@SJIP) Logged to job history for " + jobId); } // checkTaskLimits(); if (loggingEnabled) { LOG.debug("(initTasks@SJIP) Checked task limits for " + jobId); } final String jobFile = "default"; splits = getRawSplits(jobStory.getInputSplits()); if (loggingEnabled) { LOG.debug("(initTasks@SJIP) Created splits for job = " + jobId + " number of splits = " + splits.length); } // createMapTasks(jobFile, splits); numMapTasks = splits.length; maps = new TaskInProgress[numMapTasks]; for (int i=0; i < numMapTasks; ++i) { inputLength += splits[i].getDataLength(); maps[i] = new TaskInProgress(jobId, jobFile, splits[i], conf, this, i, numSlotsPerMap); } if (numMapTasks > 0) { nonRunningMapCache = createCache(splits, maxLevel); if (loggingEnabled) { LOG.debug("initTasks:numMaps=" + numMapTasks + " Size of nonRunningMapCache=" + nonRunningMapCache.size() + " for " + jobId); } } // set the launch time this.launchTime = JobTracker.getClock().getTime(); // createReduceTasks(jobFile); // // Create reduce tasks // this.reduces = new TaskInProgress[numReduceTasks]; for (int i = 0; i < numReduceTasks; i++) { reduces[i] = new TaskInProgress(jobId, jobFile, numMapTasks, i, conf, this, numSlotsPerReduce); nonRunningReduces.add(reduces[i]); } // Calculate the minimum number of maps to be complete before // we should start scheduling reduces completedMapsForReduceSlowstart = (int) Math.ceil((conf.getFloat( "mapred.reduce.slowstart." + "completed.maps", DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART) * numMapTasks)); tasksInited.set(true); if (loggingEnabled) { LOG.debug("Initializing job, nowstatus = " + JobStatus.getJobRunState(getStatus().getRunState())); } setupComplete(); if (loggingEnabled) { LOG.debug("Initializing job, inited-status = " + JobStatus.getJobRunState(getStatus().getRunState())); } }
java
{ "resource": "" }
q160523
SimulatorJobInProgress.getMapTaskAttemptInfo
train
@SuppressWarnings("deprecation") private synchronized TaskAttemptInfo getMapTaskAttemptInfo( TaskTracker taskTracker, TaskAttemptID taskAttemptID) { assert (taskAttemptID.isMap()); JobID jobid = (JobID) taskAttemptID.getJobID(); assert (jobid == getJobID()); // Get splits for the TaskAttempt RawSplit split = splits[taskAttemptID.getTaskID().getId()]; int locality = getClosestLocality(taskTracker, split); TaskID taskId = taskAttemptID.getTaskID(); if (!taskId.isMap()) { assert false : "Task " + taskId + " is not MAP :"; } TaskAttemptInfo taskAttemptInfo = jobStory.getMapTaskAttemptInfoAdjusted( taskId.getId(), taskAttemptID.getId(), locality); if (LOG.isDebugEnabled()) { LOG.debug("get an attempt: " + taskAttemptID.toString() + ", state=" + taskAttemptInfo.getRunState() + ", runtime=" + ((taskId.isMap()) ? taskAttemptInfo.getRuntime() : ((ReduceTaskAttemptInfo) taskAttemptInfo).getReduceRuntime())); } return taskAttemptInfo; }
java
{ "resource": "" }
q160524
SimulatorJobInProgress.getReduceTaskAttemptInfo
train
private TaskAttemptInfo getReduceTaskAttemptInfo(TaskTracker taskTracker, TaskAttemptID taskAttemptID) { assert (!taskAttemptID.isMap()); TaskID taskId = taskAttemptID.getTaskID(); TaskType taskType; if (taskAttemptID.isMap()) { taskType = TaskType.MAP; } else { taskType = TaskType.REDUCE; } TaskAttemptInfo taskAttemptInfo = jobStory.getTaskAttemptInfo(taskType, taskId.getId(), taskAttemptID.getId()); if (LOG.isDebugEnabled()) { LOG.debug("get an attempt: " + taskAttemptID.toString() + ", state=" + taskAttemptInfo.getRunState() + ", runtime=" + ((taskAttemptID.isMap()) ? taskAttemptInfo.getRuntime() : ((ReduceTaskAttemptInfo) taskAttemptInfo).getReduceRuntime())); } return taskAttemptInfo; }
java
{ "resource": "" }
q160525
FastCopy.alignDatanodes
train
public static void alignDatanodes(DatanodeInfo[] dstLocs, DatanodeInfo[] srcLocs) { for (int i = 0; i < dstLocs.length; i++) { for (int j = 0; j < srcLocs.length; j++) { if (i == j) continue; if (dstLocs[i].equals(srcLocs[j])) { if (i < j) { swap(i, j, srcLocs); } else { swap(i, j, dstLocs); } break; } } } }
java
{ "resource": "" }
q160526
FastCopy.shutdown
train
public void shutdown() throws IOException { // Clean up RPC connections. Iterator <ClientDatanodeProtocol> connections = datanodeMap.values().iterator(); while(connections.hasNext()) { ClientDatanodeProtocol cnxn = connections.next(); RPC.stopProxy(cnxn); } datanodeMap.clear(); executor.shutdownNow(); synchronized (leaseCheckers) { for (LeaseChecker checker : leaseCheckers.values()) { checker.closeRenewal(); } } }
java
{ "resource": "" }
q160527
FastCopy.copy
train
public void copy(List<FastFileCopyRequest> requests) throws Exception { List<Future<CopyResult>> results = new ArrayList<Future<CopyResult>>(); for (FastFileCopyRequest r : requests) { Callable<CopyResult> fastFileCopy = new FastFileCopy(r.getSrc(), r.getDestination(), r.srcFs, r.dstFs); Future<CopyResult> f = executor.submit(fastFileCopy); results.add(f); } for (Future<CopyResult> f : results) { f.get(); } }
java
{ "resource": "" }
q160528
FastCopy.getDirectoryListing
train
private static void getDirectoryListing(FileStatus root, FileSystem fs, List<CopyPath> result, Path dstPath) throws IOException { if (!root.isDir()) { result.add(new CopyPath(root.getPath(), dstPath)); return; } for (FileStatus child : fs.listStatus(root.getPath())) { getDirectoryListing(child, fs, result, new Path(dstPath, child.getPath() .getName())); } }
java
{ "resource": "" }
q160529
FastCopy.expandDirectories
train
private static List<CopyPath> expandDirectories(FileSystem fs, List<Path> paths, Path dstPath) throws IOException { List<CopyPath> newList = new ArrayList<CopyPath>(); FileSystem dstFs = dstPath.getFileSystem(defaultConf); boolean isDstFile = false; try { FileStatus dstPathStatus = dstFs.getFileStatus(dstPath); if (!dstPathStatus.isDir()) { isDstFile = true; } } catch (FileNotFoundException e) { isDstFile = true; } for (Path path : paths) { FileStatus pathStatus = fs.getFileStatus(path); if (!pathStatus.isDir()) { // This is the case where the destination is a file, in this case, we // allow only a single source file. This check has been done below in // FastCopy#parseFiles(List, String[]) if (isDstFile) { newList.add(new CopyPath(path, dstPath)); } else { newList.add(new CopyPath(path, new Path(dstPath, path.getName()))); } } else { // If we are copying /a/b/c into /x/y/z and 'z' does not exist, we // create the structure /x/y/z/f*, where f* represents all files and // directories in c/ Path rootPath = dstPath; // This ensures if we copy a directory like /a/b/c to a directory // /x/y/z/, we will create the directory structure /x/y/z/c, if 'z' // exists. if (dstFs.exists(dstPath)) { rootPath = new Path(dstPath, pathStatus.getPath().getName()); } getDirectoryListing(pathStatus, fs, newList, rootPath); } } return newList; }
java
{ "resource": "" }
q160530
FastCopy.expandSingle
train
private static List<CopyPath> expandSingle(Path src, Path dstPath) throws IOException { List<Path> expandedPaths = new ArrayList<Path>(); FileSystem fs = src.getFileSystem(defaultConf); FileStatus[] stats = fs.globStatus(src); if (stats == null || stats.length == 0) { throw new IOException("Path : " + src + " is invalid"); } for (FileStatus stat : stats) { expandedPaths.add(stat.getPath()); } List<CopyPath> expandedDirs = expandDirectories(fs, expandedPaths, dstPath); return expandedDirs; }
java
{ "resource": "" }
q160531
FastCopy.expandSrcs
train
private static List<CopyPath> expandSrcs(List<Path> srcs, Path dstPath) throws IOException { List<CopyPath> expandedSrcs = new ArrayList<CopyPath>(); for (Path src : srcs) { expandedSrcs.addAll(expandSingle(src, dstPath)); } return expandedSrcs; }
java
{ "resource": "" }
q160532
SequenceFileRecordReader.getProgress
train
public float getProgress() throws IOException { if (end == start) { return 0.0f; } else { return Math.min(1.0f, (in.getPosition() - start) / (float)(end - start)); } }
java
{ "resource": "" }
q160533
ShellParser.findPattern
train
protected String findPattern(String strPattern, String text, int grp) { Pattern pattern = Pattern.compile(strPattern, Pattern.MULTILINE); Matcher matcher = pattern.matcher(text); if (matcher.find(0)) return matcher.group(grp); return null; }
java
{ "resource": "" }
q160534
ShellParser.findAll
train
protected String findAll(String strPattern, String text, int grp, String separator) { String retval = ""; boolean firstTime = true; Pattern pattern = Pattern.compile(strPattern); Matcher matcher = pattern.matcher(text); while (matcher.find()) { retval += (firstTime ? "" : separator) + matcher.group(grp); firstTime = false; } return retval; }
java
{ "resource": "" }
q160535
JMXJsonServlet.renderMBeans
train
private int renderMBeans(JsonGenerator jg, String[] mBeanNames) throws IOException, MalformedObjectNameException { jg.writeStartObject(); Set<ObjectName> nameQueries, queriedObjects; nameQueries = new HashSet<ObjectName>(); queriedObjects = new HashSet<ObjectName>(); // if no mbean names provided, add one null entry to query everything if (mBeanNames == null) { nameQueries.add(null); } else { for (String mBeanName : mBeanNames) { if (mBeanName != null) { nameQueries.add(new ObjectName(mBeanName)); } } } // perform name queries for (ObjectName nameQuery : nameQueries) { queriedObjects.addAll(mBeanServer.queryNames(nameQuery, null)); } // render each query result for (ObjectName objectName : queriedObjects) { renderMBean(jg, objectName); } jg.writeEndObject(); return HttpServletResponse.SC_OK; }
java
{ "resource": "" }
q160536
JMXJsonServlet.renderMBean
train
private void renderMBean(JsonGenerator jg, ObjectName objectName) throws IOException { MBeanInfo beanInfo; String className; jg.writeObjectFieldStart(objectName.toString()); jg.writeStringField("beanName", objectName.toString()); try { beanInfo = mBeanServer.getMBeanInfo(objectName); className = beanInfo.getClassName(); // if we have the generic BaseModelMBean for className, attempt to get // more specific name if ("org.apache.commons.modeler.BaseModelMBean".equals(className)) { try { className = (String) mBeanServer.getAttribute(objectName, "modelerType"); } catch (Exception e) { // it's fine if no more-particular name can be found } } jg.writeStringField("className", className); for (MBeanAttributeInfo attr : beanInfo.getAttributes()) { writeAttribute(jg, objectName, attr); } } catch (OperationsException e) { // Some general MBean exception occurred. writeException(jg, e); } catch (ReflectionException e) { // This happens when the code inside the JMX bean threw an exception, so // log it and don't output the bean. writeException(jg, e); } jg.writeEndObject(); }
java
{ "resource": "" }
q160537
DatanodeProtocols.sendHeartbeat
train
public DatanodeCommand[] sendHeartbeat(DatanodeRegistration registration, long capacity, long dfsUsed, long remaining, long namespaceUsed, int xmitsInProgress, int xceiverCount) throws IOException { throw new IOException("sendHeartbeat" + errMessage); }
java
{ "resource": "" }
q160538
FileFixer.setPolicyInfo
train
void setPolicyInfo(Collection<PolicyInfo> all) throws IOException { this.all = all; this.pathToPolicy.clear(); // keep a reverse map from all top-level paths to policies for (PolicyInfo pinfo: all) { pathToPolicy.add(new PathToPolicy(pinfo.getSrcPath(), pinfo)); for (PathInfo d:pinfo.getDestPaths()) { pathToPolicy.add(new PathToPolicy(d.rpath, pinfo)); } } // keep all paths sorted in revere lexicographical order so that // we longest path is first. Comparator<PathToPolicy> comp = new Comparator<PathToPolicy>() { public int compare(PathToPolicy p1, PathToPolicy p2) { return 0 - p1.spath.compareTo(p2.spath); } }; Collections.sort(pathToPolicy, comp); }
java
{ "resource": "" }
q160539
FileFixer.run
train
public void run() { while (running) { try { LOG.info("FileFixer continuing to run..."); doFindFiles(); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); } catch (Error err) { LOG.error("Exiting after encountering " + StringUtils.stringifyException(err)); shutdown(); throw err; } try { // Sleep before proceeding to fix more files. Thread.sleep(blockFixInterval); } catch (InterruptedException ie) { LOG.error("Encountering InturruptedException " + StringUtils.stringifyException(ie)); } } }
java
{ "resource": "" }
q160540
FileFixer.createClientDatanodeProtocolProxy
train
static ClientDatanodeProtocol createClientDatanodeProtocolProxy ( DatanodeInfo datanodeid, Configuration conf) throws IOException { InetSocketAddress addr = NetUtils.createSocketAddr( datanodeid.getHost() + ":" + datanodeid.getIpcPort()); if (ClientDatanodeProtocol.LOG.isDebugEnabled()) { ClientDatanodeProtocol.LOG.info("ClientDatanodeProtocol addr=" + addr); } try { return (ClientDatanodeProtocol)RPC.getProxy(ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID, addr, conf); } catch (RPC.VersionMismatch e) { long clientVersion = e.getClientVersion(); long datanodeVersion = e.getServerVersion(); if (clientVersion > datanodeVersion && !ProtocolCompatible.isCompatibleClientDatanodeProtocol( clientVersion, datanodeVersion)) { throw new RPC.VersionIncompatible( ClientDatanodeProtocol.class.getName(), clientVersion, datanodeVersion); } return (ClientDatanodeProtocol)e.getProxy(); } }
java
{ "resource": "" }
q160541
GaloisField.getInstance
train
public static GaloisField getInstance(int fieldSize, int primitivePolynomial) { int key = ((fieldSize << 16) & 0xFFFF0000) + (primitivePolynomial & 0x0000FFFF); GaloisField gf; synchronized (instances) { gf = instances.get(key); if (gf == null) { gf = new GaloisField(fieldSize, primitivePolynomial); instances.put(key, gf); } } return gf; }
java
{ "resource": "" }
q160542
GaloisField.add
train
public int add(int x, int y) { assert(x >= 0 && x < getFieldSize() && y >= 0 && y < getFieldSize()); return x ^ y; }
java
{ "resource": "" }
q160543
GaloisField.multiply
train
public int multiply(int x, int y) { assert(x >= 0 && x < getFieldSize() && y >= 0 && y < getFieldSize()); return mulTable[x][y]; }
java
{ "resource": "" }
q160544
GaloisField.divide
train
public int divide(int x, int y) { assert(x >= 0 && x < getFieldSize() && y > 0 && y < getFieldSize()); return divTable[x][y]; }
java
{ "resource": "" }
q160545
GaloisField.power
train
public int power(int x, int n) { assert(x >= 0 && x < getFieldSize()); if (n == 0) { return 1; } if (x == 0) { return 0; } x = logTable[x] * n; if (x < primitivePeriod) { return powTable[x]; } x = x % primitivePeriod; return powTable[x]; }
java
{ "resource": "" }
q160546
GaloisField.solveVandermondeSystem
train
public void solveVandermondeSystem(int[] x, byte[][] y, int len, int dataStart, int dataLen) { assert(x.length <= len && y.length <= len); int dataEnd = dataStart + dataLen; for (int i = 0; i < len - 1; i++) { for (int j = len - 1; j > i; j--) { for (int k = dataStart; k < dataEnd; k++) { y[j][k] = (byte)(y[j][k] ^ mulTable[x[i]][y[j - 1][k] & 0x000000FF]); } } } for (int i = len - 1; i >= 0; i--) { for (int j = i + 1; j < len; j++) { for (int k = dataStart; k < dataEnd; k++) { y[j][k] = (byte)(divTable[y[j][k] & 0x000000FF][x[j] ^ x[j - i - 1]]); } } for (int j = i; j < len - 1; j++) { for (int k = dataStart; k < dataEnd; k++) { y[j][k] = (byte)(y[j][k] ^ y[j + 1][k]); } } } }
java
{ "resource": "" }
q160547
GaloisField.substitute
train
public void substitute(byte[][] p, byte[] q, int x) { substitute(p, q, x, 0, p[0].length); }
java
{ "resource": "" }
q160548
PoolSchedulable.snapshotConfig
train
private void snapshotConfig() { synchronized (configManager) { maximum = configManager.getPoolMaximum(poolInfo, getType()); minimum = configManager.getPoolMinimum(poolInfo, getType()); weight = configManager.getWeight(poolInfo); priority = configManager.getPriority(poolInfo); preemptable = configManager.isPoolPreemptable(poolInfo); shareStarvingRatio = configManager.getShareStarvingRatio(); minPreemptPeriod = configManager.getMinPreemptPeriod(); starvingTimeForShare = configManager.getStarvingTimeForShare(); starvingTimeForMinimum = configManager.getStarvingTimeForMinimum(); } }
java
{ "resource": "" }
q160549
PoolSchedulable.addSession
train
public void addSession(String id, Session session) { synchronized (session) { SessionSchedulable schedulable = new SessionSchedulable(session, getType()); idToSession.put(id, schedulable); } }
java
{ "resource": "" }
q160550
PoolSchedulable.getScheduleQueue
train
public Queue<SessionSchedulable> getScheduleQueue() { if (scheduleQueue == null) { scheduleQueue = createSessionQueue(configManager.getPoolComparator(poolInfo)); } return scheduleQueue; }
java
{ "resource": "" }
q160551
PoolSchedulable.getPreemptQueue
train
public Queue<SessionSchedulable> getPreemptQueue() { if (preemptQueue == null) { ScheduleComparator comparator = null; switch (configManager.getPoolComparator(poolInfo)) { case FIFO: comparator = ScheduleComparator.FIFO_PREEMPT; break; case FAIR: comparator = ScheduleComparator.FAIR_PREEMPT; break; case DEADLINE: comparator = ScheduleComparator.DEADLINE_PREEMPT; break; default: throw new IllegalArgumentException("Unknown comparator"); } preemptQueue = createSessionQueue(comparator); } return preemptQueue; }
java
{ "resource": "" }
q160552
PoolSchedulable.createSessionQueue
train
public Queue<SessionSchedulable> createSessionQueue( ScheduleComparator comparator) { int initCapacity = snapshotSessions.size() == 0 ? 1 : snapshotSessions.size(); Queue<SessionSchedulable> sessionQueue = new PriorityQueue<SessionSchedulable>(initCapacity, comparator); sessionQueue.addAll(snapshotSessions); return sessionQueue; }
java
{ "resource": "" }
q160553
PoolSchedulable.isStarving
train
public boolean isStarving(long now) { double starvingShare = getShare() * shareStarvingRatio; if (getGranted() >= Math.ceil(starvingShare)) { lastTimeAboveStarvingShare = now; } if (getGranted() >= Math.min(getShare(), getMinimum())) { lastTimeAboveMinimum = now; } if (now - lastPreemptTime < getMinPreemptPeriod()) { // Prevent duplicate preemption return false; } if (LOG.isDebugEnabled()) { LOG.debug("Pool:" + getName() + " lastTimeAboveMinimum:" + lastTimeAboveMinimum + " lastTimeAboveStarvingShare:" + lastTimeAboveStarvingShare + " minimumStarvingTime:" + getMinimumStarvingTime(now) + " shareStarvingTime:" + getShareStarvingTime(now) + " starvingTime:" + getStarvingTime(now)); } if (getMinimumStarvingTime(now) >= 0) { LOG.info("Pool:" + getName() + " for type:" + getType() + " is starving min:" + getMinimum() + " granted:" + getGranted()); lastPreemptTime = now; return true; } if (getShareStarvingTime(now) >= 0) { LOG.info("Pool:" + getName() + " for type:" + getType() + " is starving share:" + getShare() + " starvingRatio:" + shareStarvingRatio + " starvingShare:" + starvingShare + " granted:" + getGranted()); lastPreemptTime = now; return true; } return false; }
java
{ "resource": "" }
q160554
PoolSchedulable.getStarvingTime
train
public long getStarvingTime(long now) { long starvingTime = Math.max( getMinimumStarvingTime(now), getShareStarvingTime(now)); return starvingTime; }
java
{ "resource": "" }
q160555
DistCh.getMapCount
train
private static int getMapCount(int srcCount, int numNodes) { int numMaps = (int)(srcCount / OP_PER_MAP); numMaps = Math.min(numMaps, numNodes * MAX_MAPS_PER_NODE); return Math.max(numMaps, 1); }
java
{ "resource": "" }
q160556
AvatarZooKeeperClient.registerPrimarySsId
train
public synchronized void registerPrimarySsId(String address, Long ssid) throws IOException { String node = getSsIdNode(address); zkCreateRecursively(node, SerializableUtils.toBytes(ssid), true, ssid.toString()); }
java
{ "resource": "" }
q160557
AvatarZooKeeperClient.registerLastTxId
train
public synchronized void registerLastTxId(String address, ZookeeperTxId lastTxid) throws IOException { String node = getLastTxIdNode(address); zkCreateRecursively(node, lastTxid.toBytes(), true, lastTxid.toString()); }
java
{ "resource": "" }
q160558
AvatarZooKeeperClient.getPrimarySsId
train
public Long getPrimarySsId(String address, boolean sync) throws IOException, KeeperException, InterruptedException, ClassNotFoundException { Stat stat = new Stat(); String node = getSsIdNode(address); byte[] data = getNodeData(node, stat, false, sync); if (data == null) { return null; } return (Long) SerializableUtils.getFromBytes(data, Long.class); }
java
{ "resource": "" }
q160559
AvatarZooKeeperClient.getPrimaryLastTxId
train
public ZookeeperTxId getPrimaryLastTxId(String address, boolean sync) throws IOException, KeeperException, InterruptedException, ClassNotFoundException { Stat stat = new Stat(); String node = getLastTxIdNode(address); byte[] data = getNodeData(node, stat, false, sync); if (data == null) { return null; } return ZookeeperTxId.getFromBytes(data); }
java
{ "resource": "" }
q160560
AvatarZooKeeperClient.getPrimaryAvatarAddress
train
public String getPrimaryAvatarAddress(String address, Stat stat, boolean retry) throws IOException, KeeperException, InterruptedException { return getPrimaryAvatarAddress(address, stat, retry, false); }
java
{ "resource": "" }
q160561
TaskRunner.addJobJarToClassPath
train
private void addJobJarToClassPath(String localJarFile, StringBuffer classPath) { File jobCacheDir = new File (new Path(localJarFile).getParent().toString()); File[] libs = new File(jobCacheDir, "lib").listFiles(); String sep = System.getProperty("path.separator"); if (libs != null) { for (int i = 0; i < libs.length; i++) { classPath.append(sep); // add libs from jar to classpath classPath.append(libs[i]); } } classPath.append(sep); classPath.append(new File(jobCacheDir, "classes")); classPath.append(sep); classPath.append(jobCacheDir); }
java
{ "resource": "" }
q160562
TaskRunner.appendSystemClasspath
train
private static void appendSystemClasspath(JobConf conf, String pathSeparator, StringBuffer classPath) { // The alternate runtime can be used to debug tasks by putting a // custom version of the mapred libraries. This will get loaded before // the TT's jars. String debugRuntime = conf.get("mapred.task.debug.runtime.classpath"); if (debugRuntime != null) { classPath.append(pathSeparator); classPath.append(debugRuntime); } // Determine system classpath for tasks. Default to tasktracker's // classpath. String systemClasspath = System.getenv( MAPREDUCE_TASK_SYSTEM_CLASSPATH_PROPERTY); if (systemClasspath == null) { systemClasspath = System.getProperty("java.class.path"); } if (LOG.isDebugEnabled()) { LOG.debug("System classpath " + systemClasspath); } classPath.append(pathSeparator); classPath.append(systemClasspath); }
java
{ "resource": "" }
q160563
RaidHistogram.setNewWindows
train
public synchronized void setNewWindows(ArrayList<Long> newWindows) throws IOException { if (newWindows.size() != windows.size()) { throw new IOException( "Number of new windows need to be the same as that of old ones"); } Collections.sort(newWindows); for (int i = 0; i < newWindows.size(); i++) { if (newWindows.get(i) > windows.get(i)) { throw new IOException ("New window " + newWindows.get(i) + " should be smaller than the old one " + windows.get(i)); } windows.set(i, newWindows.get(i)); } }
java
{ "resource": "" }
q160564
JobQueuesManager.reorderJobs
train
private void reorderJobs(JobInProgress job, JobSchedulingInfo oldInfo, QueueInfo qi) { if(qi.removeWaitingJob(oldInfo) != null) { qi.addWaitingJob(job); } if(qi.removeRunningJob(oldInfo) != null) { qi.addRunningJob(job); } }
java
{ "resource": "" }
q160565
JobQueuesManager.makeJobRunning
train
private void makeJobRunning(JobInProgress job, JobSchedulingInfo oldInfo, QueueInfo qi) { // Removing of the job from job list is responsibility of the //initialization poller. // Add the job to the running queue qi.addRunningJob(job); }
java
{ "resource": "" }
q160566
JobQueuesManager.jobStateChanged
train
private void jobStateChanged(JobStatusChangeEvent event, QueueInfo qi) { JobInProgress job = event.getJobInProgress(); JobSchedulingInfo oldJobStateInfo = new JobSchedulingInfo(event.getOldStatus()); // Check if the ordering of the job has changed // For now priority and start-time can change the job ordering if (event.getEventType() == EventType.PRIORITY_CHANGED || event.getEventType() == EventType.START_TIME_CHANGED) { // Make a priority change reorderJobs(job, oldJobStateInfo, qi); } else if (event.getEventType() == EventType.RUN_STATE_CHANGED) { // Check if the job is complete int runState = job.getStatus().getRunState(); if (runState == JobStatus.SUCCEEDED || runState == JobStatus.FAILED || runState == JobStatus.KILLED) { jobCompleted(job, oldJobStateInfo, qi); } else if (runState == JobStatus.RUNNING) { makeJobRunning(job, oldJobStateInfo, qi); } } }
java
{ "resource": "" }
q160567
PipeMapRed.splitKeyVal
train
void splitKeyVal(byte[] line, int length, Text key, Text val) throws IOException { int numKeyFields = getNumOfKeyFields(); byte[] separator = getFieldSeparator(); // Need to find numKeyFields separators int pos = UTF8ByteArrayUtils.findBytes(line, 0, length, separator); for(int k=1; k<numKeyFields && pos!=-1; k++) { pos = UTF8ByteArrayUtils.findBytes(line, pos + separator.length, length, separator); } try { if (pos == -1) { key.set(line, 0, length); val.set(""); } else { StreamKeyValUtil.splitKeyVal(line, 0, length, key, val, pos, separator.length); } } catch (CharacterCodingException e) { LOG.warn(StringUtils.stringifyException(e)); } }
java
{ "resource": "" }
q160568
PipeMapRed.write
train
void write(Object value) throws IOException { if (clientInputSerializer != null) { clientInputSerializer.serialize(value); return; } byte[] bval; int valSize; if (value instanceof BytesWritable) { BytesWritable val = (BytesWritable) value; bval = val.getBytes(); valSize = val.getLength(); } else if (value instanceof Text) { Text val = (Text) value; bval = val.getBytes(); valSize = val.getLength(); } else { String sval = value.toString(); bval = sval.getBytes("UTF-8"); valSize = bval.length; } clientOut_.write(bval, 0, valSize); }
java
{ "resource": "" }
q160569
XmlEditsVisitor.writeTag
train
private void writeTag(String tag, String value) throws IOException { printIndents(); if(value.length() > 0) { write("<" + tag + ">" + value + "</" + tag + ">\n"); } else { write("<" + tag + "/>\n"); } }
java
{ "resource": "" }
q160570
SnapshotNode.updateLeasedFiles
train
void updateLeasedFiles(SnapshotStorage ssStore) throws IOException { FSNamesystem fsNamesys = ssStore.getFSNamesystem(); List<Block> blocksForNN = new ArrayList<Block>(); leaseUpdateThreadPool = new ThreadPoolExecutor(1, maxLeaseUpdateThreads, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>()); ((ThreadPoolExecutor)leaseUpdateThreadPool).allowCoreThreadTimeOut(true); // Try to update lengths for leases from DN LightWeightLinkedSet<Lease> sortedLeases = fsNamesys.leaseManager.getSortedLeases(); Iterator<Lease> itr = sortedLeases.iterator(); while (itr.hasNext()) { Lease lease = itr.next(); for (String path : lease.getPaths()) { // Update file lengths using worker threads to increase throughput leaseUpdateThreadPool.execute( new LeaseUpdateWorker(conf, path, fsNamesys, blocksForNN)); } } try { leaseUpdateThreadPool.shutdown(); // Wait till update tasks finish successfully (max 20 mins?) if (!leaseUpdateThreadPool.awaitTermination(1200, TimeUnit.SECONDS)) { throw new IOException("Updating lease files failed"); } } catch (InterruptedException e) { throw new IOException("Snapshot creation interrupted while updating leased files"); } // Fetch block lengths for renamed/deleted leases from NN long[] blockIds = new long[blocksForNN.size()]; for (int i = 0; i < blocksForNN.size(); ++i) { blockIds[i] = blocksForNN.get(i).getBlockId(); } long[] lengths = namenode.getBlockLengths(blockIds); for (int i = 0; i < blocksForNN.size(); ++i) { if (lengths[i] == -1) { // Couldn't update block length, keep preferred length LOG.error("Couldn't update length for block " + blocksForNN.get(i)); } else { blocksForNN.get(i).setNumBytes(lengths[i]); } } }
java
{ "resource": "" }
q160571
SnapshotNode.downloadSnapshotFiles
train
void downloadSnapshotFiles(SnapshotStorage ssStore) throws IOException { CheckpointSignature start = namenode.getCheckpointSignature(); ssStore.storage.setStorageInfo(start); CheckpointSignature end = null; boolean success; do { // Clear temp files prepareDownloadDirs(); // get fsimage File[] srcNames = ssStore.getImageFiles(); assert srcNames.length == 1 : "No snapshot temporary dir."; TransferFsImage.downloadImageToStorage(fileServer, HdfsConstants.INVALID_TXID, ssStore, true, srcNames); LOG.info("Downloaded file " + srcNames[0].getName() + " size " + srcNames[0].length() + " bytes."); // get edits file srcNames = ssStore.getEditsFiles(); assert srcNames.length == 1 : "No snapshot temporary dir."; TransferFsImage.downloadEditsToStorage(fileServer, new RemoteEditLog(), ssStore, false); LOG.info("Downloaded file " + srcNames[0].getName() + " size " + srcNames[0].length() + " bytes."); // get edits.new file (only if in the middle of ckpt) try { srcNames = ssStore.getEditsNewFiles(); assert srcNames.length == 1 : "No snapshot temporary dir."; TransferFsImage.downloadEditsToStorage(fileServer, new RemoteEditLog(), ssStore, true); LOG.info("Downloaded file " + srcNames[0].getName() + " size " + srcNames[0].length() + " bytes."); } catch (FileNotFoundException e) { // do nothing } end = namenode.getCheckpointSignature(); // Are the downloaded files consistent? success = end.checkpointTime == start.checkpointTime && end.checkpointState != CheckpointStates.UPLOAD_DONE; start = end; } while (!success); }
java
{ "resource": "" }
q160572
LeaseManager.reassignLease
train
synchronized Lease reassignLease(Lease lease, String src, String newHolder) { assert newHolder != null : "new lease holder is null"; LeaseOpenTime leaseOpenTime = null; if (lease != null) { leaseOpenTime = removeLease(lease, src); } return addLease(newHolder, src, leaseOpenTime != null ? leaseOpenTime.openTime : System.currentTimeMillis()); }
java
{ "resource": "" }
q160573
LeaseManager.findPath
train
synchronized String findPath(INodeFileUnderConstruction pendingFile) throws IOException { Lease lease = getLease(pendingFile.getClientName()); if (lease != null) { String src = lease.findPath(pendingFile); if (src != null) { return src; } } throw new IOException("pendingFile (=" + pendingFile + ") not found." + "(lease=" + lease + ")"); }
java
{ "resource": "" }
q160574
LeaseManager.removeLease
train
synchronized LeaseOpenTime removeLease(Lease lease, String src) { LeaseOpenTime leaseOpenTime = sortedLeasesByPath.remove(src); if (!lease.removePath(src)) { LOG.error(src + " not found in lease.paths (=" + lease.paths + ")"); } if (!lease.hasPath()) { leases.remove(lease.holder); if (!sortedLeases.remove(lease)) { LOG.error(lease + " not found in sortedLeases"); } } return leaseOpenTime; }
java
{ "resource": "" }
q160575
LeaseManager.removeLease
train
synchronized void removeLease(String holder, String src) { Lease lease = getLease(holder); if (lease != null) { removeLease(lease, src); } }
java
{ "resource": "" }
q160576
LeaseManager.checkLeases
train
synchronized void checkLeases() { int numPathsChecked = 0; for(; sortedLeases.size() > 0; ) { final Lease oldest = sortedLeases.first(); if (!oldest.expiredHardLimit()) { return; } // internalReleaseLease() removes paths corresponding to empty files, // i.e. it needs to modify the collection being iterated over // causing ConcurrentModificationException String[] leasePaths = new String[oldest.getPaths().size()]; oldest.getPaths().toArray(leasePaths); LOG.info("Lease " + oldest + " has expired hard limit. Recovering lease for paths: " + Arrays.toString(leasePaths)); for(String p : leasePaths) { if (++numPathsChecked > this.maxPathsPerCheck) { return; } try { fsnamesystem.getFSNamesystemMetrics().numLeaseRecoveries.inc(); fsnamesystem.internalReleaseLeaseOne( oldest, p, this.discardLastBlockIfNoSync); } catch (IOException e) { LOG.error("Cannot release the path "+p+" in the lease "+oldest, e); removeLease(oldest, p); fsnamesystem.getFSNamesystemMetrics().numLeaseManagerMonitorExceptions.inc(); } } } }
java
{ "resource": "" }
q160577
Progress.addPhase
train
public Progress addPhase(String status) { Progress phase = addPhase(); phase.setStatus(status); return phase; }
java
{ "resource": "" }
q160578
Progress.complete
train
public void complete() { // we have to traverse up to our parent, so be careful about locking. Progress myParent; synchronized(this) { progress = 1.0f; myParent = parent; } if (myParent != null) { // this will synchronize on the parent, so we make sure we release // our lock before getting the parent's, since we're traversing // against the normal traversal direction used by get() or toString(). // We don't need transactional semantics, so we're OK doing this. myParent.startNextPhase(); } }
java
{ "resource": "" }
q160579
Progress.get
train
public synchronized float get() { Progress node = this; while (node.parent != null) { // find the root node = parent; } return node.getInternal(); }
java
{ "resource": "" }
q160580
Progress.getInternal
train
private synchronized float getInternal() { int phaseCount = phases.size(); if (phaseCount != 0) { float subProgress = currentPhase < phaseCount ? phase().getInternal() : 0.0f; return progressPerPhase*(currentPhase + subProgress); } else { return progress; } }
java
{ "resource": "" }
q160581
DFSLocationsRoot.reloadLocations
train
private synchronized void reloadLocations() { map.clear(); for (HadoopServer location : ServerRegistry.getInstance().getServers()) map.put(location, new DFSLocation(provider, location)); }
java
{ "resource": "" }
q160582
LogUtils.incrLogMetrics
train
public static void incrLogMetrics(Map<String, Long> incrMetrics) { if (incrMetrics == null || incrMetrics.size() == 0) { return; } MetricsRegistry registry = RaidNodeMetrics.getInstance( RaidNodeMetrics.DEFAULT_NAMESPACE_ID).getMetricsRegistry(); Map<String, MetricsTimeVaryingLong> logMetrics = RaidNodeMetrics.getInstance( RaidNodeMetrics.DEFAULT_NAMESPACE_ID).logMetrics; synchronized(logMetrics) { for (String key : incrMetrics.keySet()) { if (!logMetrics.containsKey(key)) { logMetrics.put(key, new MetricsTimeVaryingLong(key, registry)); } ((MetricsTimeVaryingLong)logMetrics.get(key)).inc(incrMetrics.get(key)); } } }
java
{ "resource": "" }
q160583
SessionDriver.getLocalAddress
train
static java.net.InetAddress getLocalAddress() throws IOException { try { return java.net.InetAddress.getLocalHost(); } catch (java.net.UnknownHostException e) { throw new IOException(e); } }
java
{ "resource": "" }
q160584
SessionDriver.initializeServer
train
private ServerSocket initializeServer(CoronaConf conf) throws IOException { // Choose any free port. ServerSocket sessionServerSocket = new ServerSocket(0, 0, getLocalAddress()); TServerSocket tServerSocket = new TServerSocket(sessionServerSocket, conf.getCMSoTimeout()); TFactoryBasedThreadPoolServer.Args args = new TFactoryBasedThreadPoolServer.Args(tServerSocket); args.processor(new SessionDriverServiceProcessor(incoming)); args.transportFactory(new TTransportFactory()); args.protocolFactory(new TBinaryProtocol.Factory(true, true)); args.stopTimeoutVal = 0; server = new TFactoryBasedThreadPoolServer( args, new TFactoryBasedThreadPoolServer.DaemonThreadFactory()); return sessionServerSocket; }
java
{ "resource": "" }
q160585
SessionDriver.setName
train
public void setName(String name) throws IOException { if (failException != null) { throw failException; } if (name == null || name.length() == 0) { return; } sessionInfo.name = name; SessionInfo newInfo = new SessionInfo(sessionInfo); cmNotifier.addCall( new ClusterManagerService.sessionUpdateInfo_args(sessionId, newInfo)); }
java
{ "resource": "" }
q160586
SessionDriver.setPriority
train
public void setPriority(SessionPriority prio) throws IOException { if (failException != null) { throw failException; } sessionInfo.priority = prio; SessionInfo newInfo = new SessionInfo(sessionInfo); cmNotifier.addCall( new ClusterManagerService.sessionUpdateInfo_args(sessionId, newInfo)); }
java
{ "resource": "" }
q160587
SessionDriver.setDeadline
train
public void setDeadline(long sessionDeadline) throws IOException { if (failException != null) { throw failException; } sessionInfo.deadline = sessionDeadline; SessionInfo newInfo = new SessionInfo(sessionInfo); cmNotifier.addCall( new ClusterManagerService.sessionUpdateInfo_args(sessionId, newInfo)); }
java
{ "resource": "" }
q160588
SessionDriver.setUrl
train
public void setUrl(String url) throws IOException { if (failException != null) { throw failException; } sessionInfo.url = url; SessionInfo newInfo = new SessionInfo(sessionInfo); cmNotifier.addCall( new ClusterManagerService.sessionUpdateInfo_args(sessionId, newInfo)); }
java
{ "resource": "" }
q160589
SessionDriver.stopRemoteSession
train
public void stopRemoteSession(String remoteId) { cmNotifier.addCall(new ClusterManagerService.sessionEnd_args(remoteId, SessionStatus.TIMED_OUT)); }
java
{ "resource": "" }
q160590
SessionDriver.stop
train
public void stop(SessionStatus status, List<ResourceType> resourceTypes, List<NodeUsageReport> reportList) { LOG.info("Stopping session driver"); running = false; // clear all calls from the notifier and append the feedback and session // end. cmNotifier.clearCalls(); if (reportList != null && !reportList.isEmpty()) { cmNotifier.addCall( new ClusterManagerService.nodeFeedback_args( sessionId, resourceTypes, reportList)); } cmNotifier.addCall( new ClusterManagerService.sessionEnd_args(sessionId, status)); cmNotifier.doShutdown(); server.stop(); incomingCallExecutor.interrupt(); }
java
{ "resource": "" }
q160591
SessionDriver.join
train
public void join() throws InterruptedException { serverThread.join(); long start = System.currentTimeMillis(); cmNotifier.join(SESSION_DRIVER_WAIT_INTERVAL); long end = System.currentTimeMillis(); if (end - start >= SESSION_DRIVER_WAIT_INTERVAL) { LOG.warn("Taking more than " + SESSION_DRIVER_WAIT_INTERVAL + " for cmNotifier to die"); } incomingCallExecutor.join(); }
java
{ "resource": "" }
q160592
SessionDriver.requestResources
train
public void requestResources(List<ResourceRequest> wanted) throws IOException { if (failException != null) { throw failException; } cmNotifier.addCall( new ClusterManagerService.requestResource_args(sessionId, wanted)); }
java
{ "resource": "" }
q160593
SessionDriver.releaseResources
train
public void releaseResources(List<ResourceRequest> released) throws IOException { if (failException != null) { throw failException; } List<Integer> releasedIds = new ArrayList<Integer>(); for (ResourceRequest req : released) { releasedIds.add(req.getId()); } cmNotifier.addCall( new ClusterManagerService.releaseResource_args(sessionId, releasedIds)); }
java
{ "resource": "" }
q160594
UserDefinedValueAggregatorDescriptor.createInstance
train
public static Object createInstance(String className) { Object retv = null; try { ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); Class<?> theFilterClass = Class.forName(className, true, classLoader); Constructor meth = theFilterClass.getDeclaredConstructor(argArray); meth.setAccessible(true); retv = meth.newInstance(); } catch (Exception e) { throw new RuntimeException(e); } return retv; }
java
{ "resource": "" }
q160595
LightWeightBitSet.set
train
public static void set(long[] bits, int pos) { int offset = pos >> LONG_SHIFT; if (offset >= bits.length) throw new IndexOutOfBoundsException(); bits[offset] |= 1L << pos; }
java
{ "resource": "" }
q160596
LightWeightBitSet.clear
train
public static void clear(long[] bits, int pos) { int offset = pos >> LONG_SHIFT; if (offset >= bits.length) throw new IndexOutOfBoundsException(); bits[offset] &= ~(1L << pos); }
java
{ "resource": "" }
q160597
LightWeightBitSet.get
train
public static boolean get(long[] bits, int pos) { int offset = pos >> LONG_SHIFT; if (offset >= bits.length) return false; return (bits[offset] & (1L << pos)) != 0; }
java
{ "resource": "" }
q160598
LightWeightBitSet.cardinality
train
public static int cardinality(long[] bits) { int card = 0; for (int i = bits.length - 1; i >= 0; i--) { long a = bits[i]; if (a == 0) continue; if (a == -1) { card += 64; continue; } // Successively collapse alternating bit groups into a sum. a = ((a >> 1) & 0x5555555555555555L) + (a & 0x5555555555555555L); a = ((a >> 2) & 0x3333333333333333L) + (a & 0x3333333333333333L); int b = (int) ((a >>> 32) + a); b = ((b >> 4) & 0x0f0f0f0f) + (b & 0x0f0f0f0f); b = ((b >> 8) & 0x00ff00ff) + (b & 0x00ff00ff); card += ((b >> 16) & 0x0000ffff) + (b & 0x0000ffff); } return card; }
java
{ "resource": "" }
q160599
HttpImageUploadStream.checkState
train
void checkState() throws IOException { int majority = getMajoritySize(); int numDisabled = 0; for (HttpImageUploadChannel ch : uploadChannels) { numDisabled += ch.isDisabled() ? 1 : 0; } if (numDisabled >= majority) { Map<HttpImageUploadChannel, Void> successes = new HashMap<HttpImageUploadChannel, Void>(); Map<HttpImageUploadChannel, Throwable> exceptions = new HashMap<HttpImageUploadChannel, Throwable>(); for (HttpImageUploadChannel ch : uploadChannels) { if (ch.isDisabled()) { exceptions.put(ch, ch.getErrorStatus()); } else { successes.put(ch, null); } } throw QuorumException.create("Failed when uploading", successes, exceptions); } }
java
{ "resource": "" }