_id stringlengths 2 7 | title stringlengths 3 140 | partition stringclasses 3
values | text stringlengths 73 34.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q161000 | ChecksumUtil.updateChunkChecksum | train | public static void updateChunkChecksum(
byte[] buf,
int checksumOff,
int dataOff,
int dataLen,
DataChecksum checksum
) throws IOException {
int bytesPerChecksum = checksum.getBytesPerChecksum();
int checksumSize = checksum.getChecksumSize();
int curChecksumOff = checksumOff;
int curDataOff = dataOff;
int numChunks = (dataLen + bytesPerChecksum - 1) / bytesPerChecksum;
int dataLeft = dataLen;
for (int i = 0; i < numChunks; i++) {
int len = Math.min(dataLeft, bytesPerChecksum);
checksum.reset();
checksum.update(buf, curDataOff, len);
checksum.writeValue(buf, curChecksumOff, false);
curDataOff += len;
curChecksumOff += checksumSize;
dataLeft -= len;
}
} | java | {
"resource": ""
} |
q161001 | IOUtils.copyBytesAndGenerateCRC | train | public static long copyBytesAndGenerateCRC(InputStream in, OutputStream out,
int buffSize, boolean close, IOThrottler throttler)
throws IOException {
PrintStream ps = out instanceof PrintStream ? (PrintStream)out : null;
byte buf[] = new byte[buffSize];
Checksum sum = new NativeCrc32();
sum.reset();
try {
if (throttler != null) {
throttler.throttle((long) buffSize);
}
int bytesRead = in.read(buf);
while (bytesRead >= 0) {
sum.update(buf, 0, bytesRead);
out.write(buf, 0, bytesRead);
if ((ps != null) && ps.checkError()) {
throw new IOException("Unable to write to output stream.");
}
if (throttler != null) {
throttler.throttle((long) buffSize);
}
bytesRead = in.read(buf);
}
} finally {
if(close) {
out.close();
in.close();
}
}
return sum.getValue();
} | java | {
"resource": ""
} |
q161002 | IOUtils.writeFully | train | public static void writeFully(FileChannel fc, ByteBuffer buf,
long offset) throws IOException {
do {
offset += fc.write(buf, offset);
} while (buf.remaining() > 0);
} | java | {
"resource": ""
} |
q161003 | WritableUtils.cloneInto | train | @Deprecated
public static void cloneInto(Writable dst, Writable src) throws IOException {
ReflectionUtils.cloneWritableInto(dst, src);
} | java | {
"resource": ""
} |
q161004 | WritableUtils.readStringSafely | train | public static String readStringSafely(
DataInput in, int maxLength)
throws IOException, IllegalArgumentException {
int length = readVInt(in);
if (length < 0 || length > maxLength) {
throw new IllegalArgumentException(
"Encoded byte size for String was " + length +
", which is outside of 0.." +
maxLength + " range.");
}
byte [] bytes = new byte[length];
in.readFully(bytes, 0, length);
return Text.decode(bytes);
} | java | {
"resource": ""
} |
q161005 | BookKeeperEditLogOutputStream.flushAndSync | train | @Override
protected void flushAndSync(boolean durable) throws IOException {
if (outputStream == null) {
throw new IOException("Trying to use aborted output stream!");
}
if (doubleBuf.isFlushed()) {
return;
}
// Under the covers, this invokes a method that does not return until
// we have successfully persisted the buffer to a quorum of bookies.
doubleBuf.flushTo(outputStream);
} | java | {
"resource": ""
} |
q161006 | AvatarNode.waitForRestart | train | public void waitForRestart() {
if (standbyThread != null) {
try {
// if this is the standby avatarnode, then wait for the Standby to exit
standbyThread.join();
} catch (InterruptedException ie) {
//eat it up
}
standbyThread = null;
LOG.info("waitForRestart: Standby thread exited.");
InjectionHandler.processEvent(InjectionEvent.AVATARNODE_WAIT_FOR_RESTART);
while (failoverState == FailoverState.START_FAILOVER
|| failoverState == FailoverState.AWAIT_FAILOVER) {
LOG.info("Current state : " + failoverState
+ ". Waiting for failover ....");
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
throw new RuntimeException("waitForRestart() interrupted");
}
}
// if we are still in standbymode, that means we need to restart from
// scratch.
if (getAvatar() == Avatar.STANDBY) {
runInfo.isRunning = false;
LOG.info("waitForRestart Stopping encapsulated namenode.");
super.stop(); // terminate encapsulated namenode
super.join(); // wait for encapsulated namenode to exit
shutdownStandby();
LOG.info("waitForRestart exiting");
return;
}
}
super.join(); // wait for encapsulated namenode
} | java | {
"resource": ""
} |
q161007 | AvatarNode.getProtocolVersion | train | public long getProtocolVersion(String protocol,
long clientVersion) throws IOException {
if (protocol.equals(AvatarProtocol.class.getName())) {
return AvatarProtocol.versionID;
} else {
return super.getProtocolVersion(protocol, clientVersion);
}
} | java | {
"resource": ""
} |
q161008 | AvatarNode.verifyEditStreams | train | private void verifyEditStreams() throws IOException {
// we check if the shared stream is still available
if (getFSImage().getEditLog().isSharedJournalAvailable()
&& InjectionHandler
.trueCondition(InjectionEvent.AVATARNODE_CHECKEDITSTREAMS)) {
return;
}
// for sanity check if the number of available journals
// is equal to the number of configured ones
int expectedEditStreams = NNStorageConfiguration.getNamespaceEditsDirs(
confg).size();
int actualEditStreams = this.namesystem.getFSImage().getEditLog()
.getNumberOfAvailableJournals();
if (expectedEditStreams == actualEditStreams
&& InjectionHandler
.trueCondition(InjectionEvent.AVATARNODE_CHECKEDITSTREAMS)) {
return;
}
String msg = "Failover: Cannot proceed - shared journal is not available. "
+ "Number of required edit streams: " + expectedEditStreams
+ " current number: " + actualEditStreams;
LOG.fatal(msg);
throw new IOException(msg);
} | java | {
"resource": ""
} |
q161009 | AvatarNode.shutdown | train | public synchronized void shutdown(boolean synchronous) throws IOException {
LOG.info("Failover: Asynchronous shutdown for: " + currentAvatar);
// check permissions before any other actions
super.namesystem.checkSuperuserPrivilege();
if (runInfo.shutdown) {
LOG.info("Failover: Node already shut down");
return;
}
// check edit streams
// if this fails, we still have a chance to fix it
// and shutdown again
verifyEditStreams();
runInfo.shutdown = true;
Thread shutdownThread = new ShutdownAvatarThread(this);
shutdownThread.setName("ShutDown thread for : " + serverAddress);
shutdownThread.setDaemon(false);
shutdownThread.start();
if (synchronous) {
LOG.info("Failover: Waiting for shutdown to complete");
try {
shutdownThread.join();
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
} | java | {
"resource": ""
} |
q161010 | AvatarNode.stopRPC | train | protected void stopRPC(boolean interruptClientHandlers) throws IOException {
try {
// stop avatardatanode server
stopRPCInternal(server, "avatardatanode", interruptClientHandlers);
// stop namenode rpc (client, datanode)
super.stopRPC(interruptClientHandlers);
// wait for avatardatanode rpc
stopWaitRPCInternal(server, "avatardatanode");
} catch (InterruptedException ex) {
throw new IOException("stopRPC() interrupted", ex);
}
} | java | {
"resource": ""
} |
q161011 | AvatarNode.printUsage | train | private static void printUsage() {
System.err.println(
"Usage: java AvatarNode [" +
StartupOption.STANDBY.getName() + "] | [" +
StartupOption.NODEZERO.getName() + "] | [" +
StartupOption.NODEONE.getName() + "] | [" +
StartupOption.FORMAT.getName() + "] | [" +
StartupOption.UPGRADE.getName() + "] | [" +
StartupOption.ROLLBACK.getName() + "] | [" +
StartupOption.FINALIZE.getName() + "] | [" +
StartupOption.IMPORT.getName() + "]");
} | java | {
"resource": ""
} |
q161012 | AvatarNode.validateStartupOptions | train | static void validateStartupOptions(StartupInfo startInfo) throws IOException {
// sync cannot be specified along with format or finalize
if (startInfo.isStandby) {
if (startInfo.startOpt == StartupOption.FORMAT ||
startInfo.startOpt == StartupOption.FINALIZE ||
startInfo.startOpt == StartupOption.ROLLBACK ||
startInfo.startOpt == StartupOption.UPGRADE) {
throw new IOException("Standby avatar node cannot be started with " +
startInfo.startOpt + " option.");
}
}
} | java | {
"resource": ""
} |
q161013 | AvatarNode.parseArguments | train | private static StartupInfo parseArguments(String args[]) {
InstanceId instance = InstanceId.NODEZERO;
StartupOption startOpt = StartupOption.REGULAR;
boolean isStandby= false;
String serviceName = null;
boolean force = false;
int argsLen = (args == null) ? 0 : args.length;
for (int i=0; i < argsLen; i++) {
String cmd = args[i];
if (StartupOption.SERVICE.getName().equalsIgnoreCase(cmd)) {
if (++i < argsLen) {
serviceName = args[i];
} else {
return null;
}
} else if (StartupOption.STANDBY.getName().equalsIgnoreCase(cmd)) {
isStandby = true;
} else if (StartupOption.NODEZERO.getName().equalsIgnoreCase(cmd)) {
instance = InstanceId.NODEZERO;
} else if (StartupOption.NODEONE.getName().equalsIgnoreCase(cmd)) {
instance = InstanceId.NODEONE;
} else if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.FORMAT;
} else if (StartupOption.FORMATFORCE.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.FORMATFORCE;
} else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.REGULAR;
} else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.UPGRADE;
} else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.ROLLBACK;
} else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.FINALIZE;
} else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.IMPORT;
} else if (StartupOption.FORCE.getName().equalsIgnoreCase(cmd)) {
force = true;
} else {
return null;
}
}
return new StartupInfo(startOpt, instance, isStandby, serviceName, force);
} | java | {
"resource": ""
} |
q161014 | AvatarNode.initializeGenericKeys | train | public static void initializeGenericKeys(Configuration conf, String serviceKey) {
if ((serviceKey == null) || serviceKey.isEmpty()) {
return;
}
NameNode.initializeGenericKeys(conf, serviceKey);
DFSUtil.setGenericConf(conf, serviceKey, AVATARSERVICE_SPECIFIC_KEYS);
// adjust meta directory names for this service
adjustMetaDirectoryNames(conf, serviceKey);
} | java | {
"resource": ""
} |
q161015 | AvatarNode.adjustMetaDirectoryNames | train | public static void adjustMetaDirectoryNames(Configuration conf, String serviceKey) {
adjustMetaDirectoryName(conf, DFS_SHARED_NAME_DIR0_KEY, serviceKey);
adjustMetaDirectoryName(conf, DFS_SHARED_NAME_DIR1_KEY, serviceKey);
adjustMetaDirectoryName(conf, DFS_SHARED_EDITS_DIR0_KEY, serviceKey);
adjustMetaDirectoryName(conf, DFS_SHARED_EDITS_DIR1_KEY, serviceKey);
} | java | {
"resource": ""
} |
q161016 | AvatarNode.isPrimaryAlive | train | private static void isPrimaryAlive(String zkRegistry) throws IOException {
String parts[] = zkRegistry.split(":");
if (parts.length != 2) {
throw new IllegalArgumentException("Invalid Address : " + zkRegistry);
}
String host = parts[0];
int port = Integer.parseInt(parts[1]);
InetSocketAddress clientSocket = new InetSocketAddress(host, port);
ServerSocket socket = new ServerSocket();
socket.bind(clientSocket);
socket.close();
} | java | {
"resource": ""
} |
q161017 | AvatarNode.getRemoteNamenodeAddress | train | static InetSocketAddress getRemoteNamenodeAddress(Configuration conf,
InstanceId instance)
throws IOException {
String fs = null;
if (instance == InstanceId.NODEZERO) {
fs = conf.get(DFS_NAMENODE_RPC_ADDRESS1_KEY);
if (fs == null)
fs = conf.get("fs.default.name1");
} else if (instance == InstanceId.NODEONE) {
fs = conf.get(DFS_NAMENODE_RPC_ADDRESS0_KEY);
if (fs == null)
fs = conf.get("fs.default.name0");
} else {
throw new IOException("Unknown instance " + instance);
}
if(fs != null) {
Configuration newConf = new Configuration(conf);
newConf.set(FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY, fs);
conf = newConf;
}
return NameNode.getClientProtocolAddress(conf);
} | java | {
"resource": ""
} |
q161018 | AvatarNode.getRemoteNamenodeHttpName | train | static String getRemoteNamenodeHttpName(Configuration conf,
InstanceId instance)
throws IOException {
if (instance == InstanceId.NODEZERO) {
return conf.get("dfs.http.address1");
} else if (instance == InstanceId.NODEONE) {
return conf.get("dfs.http.address0");
} else {
throw new IOException("Unknown instance " + instance);
}
} | java | {
"resource": ""
} |
q161019 | SocksSocketFactory.setProxy | train | private void setProxy(String proxyStr) {
String[] strs = proxyStr.split(":", 2);
if (strs.length != 2)
throw new RuntimeException("Bad SOCKS proxy parameter: " + proxyStr);
String host = strs[0];
int port = Integer.parseInt(strs[1]);
this.proxy =
new Proxy(Proxy.Type.SOCKS, InetSocketAddress.createUnresolved(host,
port));
} | java | {
"resource": ""
} |
q161020 | PoolMetadata.addResourceMetadata | train | public void addResourceMetadata(String resourceName,
ResourceMetadata resourceMetadata) {
if (resourceMetadataMap.put(resourceName, resourceMetadata) != null) {
throw new RuntimeException("Resource name " + resourceName +
" already exists!");
}
} | java | {
"resource": ""
} |
q161021 | PoolMetadata.getResourceMetadata | train | public ResourceMetadata getResourceMetadata(String resourceName) {
if (!resourceMetadataMap.containsKey(resourceName)) {
throw new RuntimeException("No resource metadata for " + resourceName);
}
return resourceMetadataMap.get(resourceName);
} | java | {
"resource": ""
} |
q161022 | ClusterNode.readClusterNodeInfo | train | private void readClusterNodeInfo(CoronaSerializer coronaSerializer)
throws IOException {
coronaSerializer.readField("clusterNodeInfo");
clusterNodeInfo = new ClusterNodeInfo();
// Expecting the START_OBJECT token for clusterNodeInfo
coronaSerializer.readStartObjectToken("clusterNodeInfo");
coronaSerializer.readField("name");
clusterNodeInfo.name = coronaSerializer.readValueAs(String.class);
coronaSerializer.readField("address");
clusterNodeInfo.address = coronaSerializer.readValueAs(InetAddress.class);
coronaSerializer.readField("total");
clusterNodeInfo.total = coronaSerializer.readValueAs(ComputeSpecs.class);
coronaSerializer.readField("free");
clusterNodeInfo.free = coronaSerializer.readValueAs(ComputeSpecs.class);
coronaSerializer.readField("resourceInfos");
clusterNodeInfo.resourceInfos = coronaSerializer.readValueAs(Map.class);
// Expecting the END_OBJECT token for clusterNodeInfo
coronaSerializer.readEndObjectToken("clusterNodeInfo");
} | java | {
"resource": ""
} |
q161023 | ClusterNode.readGrants | train | private void readGrants(CoronaSerializer coronaSerializer)
throws IOException {
// Expecting the START_OBJECT token for grants
coronaSerializer.readStartObjectToken("grants");
JsonToken current = coronaSerializer.nextToken();
while (current != JsonToken.END_OBJECT) {
// We can access the key for the grant, but it is not required
// Expecting the START_OBJECT token for the grant
coronaSerializer.readStartObjectToken("grant");
coronaSerializer.readField("grantId");
GrantId grantId = new GrantId(coronaSerializer);
coronaSerializer.readField("grant");
ResourceRequestInfo resourceRequestInfo =
new ResourceRequestInfo(coronaSerializer);
// Expecting the END_OBJECT token for the grant
coronaSerializer.readEndObjectToken("grant");
// This will update the grants map and the resourceTypeToStatsMap map
addGrant(grantId.getSessionId(), resourceRequestInfo);
current = coronaSerializer.nextToken();
}
} | java | {
"resource": ""
} |
q161024 | ClusterNode.write | train | public void write(JsonGenerator jsonGenerator) throws IOException {
jsonGenerator.writeStartObject();
// clusterNodeInfo begins
jsonGenerator.writeFieldName("clusterNodeInfo");
jsonGenerator.writeStartObject();
jsonGenerator.writeStringField("name", clusterNodeInfo.name);
jsonGenerator.writeObjectField("address", clusterNodeInfo.address);
jsonGenerator.writeObjectField("total", clusterNodeInfo.total);
jsonGenerator.writeObjectField("free", clusterNodeInfo.free);
jsonGenerator.writeObjectField("resourceInfos",
clusterNodeInfo.resourceInfos);
jsonGenerator.writeEndObject();
// clusterNodeInfo ends
// grants begins
jsonGenerator.writeFieldName("grants");
jsonGenerator.writeStartObject();
for (Map.Entry<GrantId, ResourceRequestInfo> entry : grants.entrySet()) {
jsonGenerator.writeFieldName(entry.getKey().unique);
jsonGenerator.writeStartObject();
jsonGenerator.writeFieldName("grantId");
entry.getKey().write(jsonGenerator);
jsonGenerator.writeFieldName("grant");
entry.getValue().write(jsonGenerator);
jsonGenerator.writeEndObject();
}
jsonGenerator.writeEndObject();
// grants ends
jsonGenerator.writeEndObject();
// We skip the hostNode and lastHeartbeatTime as they need not be persisted.
// resourceTypeToMaxCpu and resourceTypeToStatsMap can be rebuilt using the
// conf and the grants respectively.
} | java | {
"resource": ""
} |
q161025 | ClusterNode.initResourceTypeToMaxCpuMap | train | public void initResourceTypeToMaxCpuMap(Map<Integer, Map<ResourceType,
Integer>> cpuToResourcePartitioning) {
resourceTypeToMaxCpu =
getResourceTypeToCountMap((int) clusterNodeInfo.total.numCpus,
cpuToResourcePartitioning);
} | java | {
"resource": ""
} |
q161026 | ClusterNode.getResourceTypeToCountMap | train | public static Map<ResourceType, Integer> getResourceTypeToCountMap(
int numCpus,
Map<Integer, Map<ResourceType, Integer>> cpuToResourcePartitioning) {
Map<ResourceType, Integer> ret =
cpuToResourcePartitioning.get(numCpus);
if (ret == null) {
Map<ResourceType, Integer> oneCpuMap = cpuToResourcePartitioning.get(1);
if (oneCpuMap == null) {
throw new RuntimeException(
"No matching entry for cpu count: " + numCpus +
" in node and no 1 cpu map");
}
ret = new EnumMap<ResourceType, Integer>(ResourceType.class);
for (ResourceType key: oneCpuMap.keySet()) {
ret.put(key, oneCpuMap.get(key).intValue() * numCpus);
}
}
return ret;
} | java | {
"resource": ""
} |
q161027 | ImageLoaderCurrent.processINodesUC | train | private void processINodesUC(DataInputStream in, ImageVisitor v,
boolean skipBlocks) throws IOException {
int numINUC = in.readInt();
v.visitEnclosingElement(ImageElement.INODES_UNDER_CONSTRUCTION,
ImageElement.NUM_INODES_UNDER_CONSTRUCTION, numINUC);
for(int i = 0; i < numINUC; i++) {
checkInterruption();
v.visitEnclosingElement(ImageElement.INODE_UNDER_CONSTRUCTION);
byte [] name = FSImageSerialization.readBytes(in);
String n = new String(name, "UTF8");
v.visit(ImageElement.INODE_PATH, n);
if (LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
v.visit(ImageElement.INODE_ID, in.readLong());
}
v.visit(ImageElement.REPLICATION, in.readShort());
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
v.visit(ImageElement.PREFERRED_BLOCK_SIZE, in.readLong());
int numBlocks = in.readInt();
processBlocks(in, v, numBlocks, skipBlocks);
processPermission(in, v);
v.visit(ImageElement.CLIENT_NAME, FSImageSerialization.readString(in));
v.visit(ImageElement.CLIENT_MACHINE, FSImageSerialization.readString(in));
// Skip over the datanode descriptors, which are still stored in the
// file but are not used by the datanode or loaded into memory
int numLocs = in.readInt();
for(int j = 0; j < numLocs; j++) {
in.readShort();
in.readLong();
in.readLong();
in.readLong();
in.readInt();
FSImageSerialization.readString(in);
FSImageSerialization.readString(in);
WritableUtils.readEnum(in, AdminStates.class);
}
v.leaveEnclosingElement(); // INodeUnderConstruction
}
v.leaveEnclosingElement(); // INodesUnderConstruction
} | java | {
"resource": ""
} |
q161028 | ImageLoaderCurrent.processBlocks | train | private void processBlocks(DataInputStream in, ImageVisitor v,
int numBlocks, boolean skipBlocks) throws IOException {
v.visitEnclosingElement(ImageElement.BLOCKS,
ImageElement.NUM_BLOCKS, numBlocks);
// directory or symlink, no blocks to process
if(numBlocks == -1 || numBlocks == -2) {
v.leaveEnclosingElement(); // Blocks
return;
}
if(skipBlocks) {
int fieldsBytes = Long.SIZE * 3;
if (LayoutVersion.supports(Feature.BLOCK_CHECKSUM, imageVersion)) {
// For block checksum
fieldsBytes += Integer.SIZE;
}
int bytesToSkip = ((fieldsBytes /* fields */) / 8 /*bits*/) * numBlocks;
if(in.skipBytes(bytesToSkip) != bytesToSkip)
throw new IOException("Error skipping over blocks");
} else {
for(int j = 0; j < numBlocks; j++) {
v.visitEnclosingElement(ImageElement.BLOCK);
v.visit(ImageElement.BLOCK_ID, in.readLong());
v.visit(ImageElement.NUM_BYTES, in.readLong());
v.visit(ImageElement.GENERATION_STAMP, in.readLong());
if (LayoutVersion.supports(Feature.BLOCK_CHECKSUM, imageVersion)) {
v.visit(ImageElement.BLOCK_CHECKSUM, in.readInt());
}
v.leaveEnclosingElement(); // Block
}
}
v.leaveEnclosingElement(); // Blocks
} | java | {
"resource": ""
} |
q161029 | ImageLoaderCurrent.processPermission | train | private void processPermission(DataInputStream in, ImageVisitor v)
throws IOException {
v.visitEnclosingElement(ImageElement.PERMISSIONS);
v.visit(ImageElement.USER_NAME, Text.readStringOpt(in));
v.visit(ImageElement.GROUP_NAME, Text.readStringOpt(in));
FsPermission fsp = new FsPermission(in.readShort());
v.visit(ImageElement.PERMISSION_STRING, fsp.toString());
v.leaveEnclosingElement(); // Permissions
} | java | {
"resource": ""
} |
q161030 | ImageLoaderCurrent.processINodes | train | private void processINodes(DataInputStream in, ImageVisitor v,
long numInodes, boolean skipBlocks) throws IOException {
v.visitEnclosingElement(ImageElement.INODES,
ImageElement.NUM_INODES, numInodes);
if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION, imageVersion)) {
processLocalNameINodes(in, v, numInodes, skipBlocks);
} else { // full path name
processFullNameINodes(in, v, numInodes, skipBlocks);
}
v.leaveEnclosingElement(); // INodes
} | java | {
"resource": ""
} |
q161031 | ImageLoaderCurrent.processINode | train | private void processINode(DataInputStream in, ImageVisitor v,
boolean skipBlocks, String parentName) throws IOException {
checkInterruption();
v.visitEnclosingElement(ImageElement.INODE);
String pathName = FSImageSerialization.readString(in);
if (parentName != null) { // local name
pathName = "/" + pathName;
if (!"/".equals(parentName)) { // children of non-root directory
pathName = parentName + pathName;
}
}
v.visit(ImageElement.INODE_PATH, pathName);
if (LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
v.visit(ImageElement.INODE_ID, in.readLong());
}
if (LayoutVersion.supports(Feature.HARDLINK, imageVersion)) {
byte inodeType = in.readByte();
if (inodeType == INode.INodeType.HARDLINKED_INODE.type) {
v.visit(ImageElement.INODE_TYPE, INode.INodeType.HARDLINKED_INODE.toString());
long hardlinkID = WritableUtils.readVLong(in);
v.visit(ImageElement.INODE_HARDLINK_ID, hardlinkID);
} else if (inodeType == INode.INodeType.RAIDED_INODE.type) {
v.visit(ImageElement.INODE_TYPE, INode.INodeType.RAIDED_INODE.toString());
String codecId = WritableUtils.readString(in);
v.visit(ImageElement.RAID_CODEC_ID, codecId);
} else {
v.visit(ImageElement.INODE_TYPE, INode.INodeType.REGULAR_INODE.toString());
}
}
v.visit(ImageElement.REPLICATION, in.readShort());
v.visit(ImageElement.MODIFICATION_TIME, in.readLong());
if(LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imageVersion))
v.visit(ImageElement.ACCESS_TIME, in.readLong());
v.visit(ImageElement.BLOCK_SIZE, in.readLong());
int numBlocks = in.readInt();
processBlocks(in, v, numBlocks, skipBlocks);
// File or directory
if (numBlocks > 0 || numBlocks == -1) {
v.visit(ImageElement.NS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imageVersion))
v.visit(ImageElement.DS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
}
if (numBlocks == -2) {
v.visit(ImageElement.SYMLINK, Text.readString(in));
}
processPermission(in, v);
v.leaveEnclosingElement(); // INode
} | java | {
"resource": ""
} |
q161032 | QuorumJournalManager.createNewUniqueEpoch | train | Map<AsyncLogger, NewEpochResponseProto> createNewUniqueEpoch()
throws IOException {
Preconditions.checkState(!loggers.isEpochEstablished(),
"epoch already created");
Map<AsyncLogger, GetJournalStateResponseProto> lastPromises =
loggers.waitForWriteQuorum(loggers.getJournalState(),
getJournalStateTimeoutMs, "getJournalState()");
long maxPromised = Long.MIN_VALUE;
for (GetJournalStateResponseProto resp : lastPromises.values()) {
maxPromised = Math.max(maxPromised, resp.getLastPromisedEpoch());
}
assert maxPromised >= 0;
long myEpoch = maxPromised + 1;
Map<AsyncLogger, NewEpochResponseProto> resps =
loggers.waitForWriteQuorum(loggers.newEpoch(nsInfo, myEpoch),
newEpochTimeoutMs, "newEpoch(" + myEpoch + ")");
loggers.setEpoch(myEpoch);
return resps;
} | java | {
"resource": ""
} |
q161033 | QuorumJournalManager.hasSomeDataInternal | train | private boolean hasSomeDataInternal(boolean image) throws IOException {
QuorumCall<AsyncLogger, Boolean> call = image ? loggers.isImageFormatted() :
loggers.isJournalFormatted();
try {
call.waitFor(loggers.size(), 0, 0, hasDataTimeoutMs, "hasSomeData");
} catch (InterruptedException e) {
throw new IOException("Interrupted while determining if JNs have data");
} catch (TimeoutException e) {
throw new IOException("Timed out waiting for response from loggers");
}
if (call.countExceptions() > 0) {
call.throwQuorumException(
"Unable to check if JNs are ready for formatting");
}
// If any of the loggers returned with a non-empty manifest, then
// we should prompt for format.
for (Boolean hasData : call.getResults().values()) {
if (hasData) {
return true;
}
}
// Otherwise, none were formatted, we can safely format.
return false;
} | java | {
"resource": ""
} |
q161034 | QuorumJournalManager.selectInputStreams | train | @Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
long fromTxnId, boolean inProgressOk, boolean validateInProgressSegments)
throws IOException {
QuorumCall<AsyncLogger, RemoteEditLogManifest> q =
loggers.getEditLogManifest(fromTxnId);
// we insist on getting all responses, even if they are to be exceptions
// this will fail if we cannot get majority of successes
Map<AsyncLogger, RemoteEditLogManifest> resps = loggers
.waitForReadQuorumWithAllResponses(q, selectInputStreamsTimeoutMs,
"selectInputStreams");
if(LOG.isDebugEnabled()) {
LOG.debug("selectInputStream manifests:\n" +
Joiner.on("\n").withKeyValueSeparator(": ").join(resps));
}
final PriorityQueue<EditLogInputStream> allStreams =
new PriorityQueue<EditLogInputStream>(64,
JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
for (Map.Entry<AsyncLogger, RemoteEditLogManifest> e : resps.entrySet()) {
AsyncLogger logger = e.getKey();
RemoteEditLogManifest manifest = e.getValue();
for (RemoteEditLog remoteLog : manifest.getLogs()) {
EditLogInputStream elis = new URLLogInputStream(logger,
remoteLog.getStartTxId(), httpConnectReadTimeoutMs);
if (elis.isInProgress() && !inProgressOk) {
continue;
}
allStreams.add(elis);
}
}
// we pass 0 as min redundance as we do not care about this here
JournalSet.chainAndMakeRedundantStreams(
streams, allStreams, fromTxnId, inProgressOk, 0);
} | java | {
"resource": ""
} |
q161035 | QuorumJournalManager.getCheckpointOutputStream | train | @Override
public OutputStream getCheckpointOutputStream(long txid) throws IOException {
return new HttpImageUploadStream(httpAddresses, journalId, nsInfo, txid,
loggers.getEpoch(), imageUploadBufferSize, imageUploadMaxBufferedChunks);
} | java | {
"resource": ""
} |
q161036 | QuorumJournalManager.saveDigestAndRenameCheckpointImage | train | @Override
public boolean saveDigestAndRenameCheckpointImage(long txid, MD5Hash digest) {
try {
LOG.info("Saving md5: " + digest + " for txid: " + txid);
QuorumCall<AsyncLogger, Void> q = loggers
.saveDigestAndRenameCheckpointImage(txid, digest);
loggers.waitForWriteQuorum(q, writeTxnsTimeoutMs,
"saveDigestAndRenameCheckpointImage(" + txid + ")");
return true;
} catch (IOException e) {
LOG.error("Exception when rolling the image:", e);
return false;
}
} | java | {
"resource": ""
} |
q161037 | QuorumJournalManager.getImageManifest | train | @Override
public RemoteImageManifest getImageManifest(long fromTxnId)
throws IOException {
QuorumCall<AsyncLogger, RemoteImageManifest> q = loggers
.getImageManifest(fromTxnId);
Map<AsyncLogger, RemoteImageManifest> resps = loggers
.waitForReadQuorumWithAllResponses(q, getImageManifestTimeoutMs,
"getImageManifest");
return createImageManifest(resps.values());
} | java | {
"resource": ""
} |
q161038 | QuorumJournalManager.createImageManifest | train | static RemoteImageManifest createImageManifest(
Collection<RemoteImageManifest> resps) throws IOException {
// found valid images (with md5 hash)
Map<Long, RemoteImage> images = Maps.newHashMap();
for (RemoteImageManifest rm : resps) {
for (RemoteImage ri : rm.getImages()) {
if (ri.getDigest() == null) {
LOG.info("Skipping: " + ri + " as it does not have md5 digest");
continue;
}
if (images.containsKey(ri.getTxId())) {
// we already have seen this image
// two images from different nodes should be the same
if (!images.get(ri.getTxId()).equals(ri)) {
throw new IOException(
"Images received from different nodes do not match: "
+ images.get(ri.getTxId()) + " vs: " + ri);
}
} else {
// store image
images.put(ri.getTxId(), ri);
}
}
}
List<RemoteImage> result = Lists.newArrayList();
for (RemoteImage ri : images.values()) {
result.add(ri);
}
// we need to sort the images
Collections.sort(result);
return new RemoteImageManifest(result);
} | java | {
"resource": ""
} |
q161039 | QuorumJournalManager.getImageInputStream | train | @Override
public ImageInputStream getImageInputStream(long txid) throws IOException {
URLImageInputStream stream = loggers.getImageInputStream(txid,
httpConnectReadTimeoutMs);
if (stream == null) {
throw new IOException("Cannot obtain input stream for image: " + txid);
}
return new ImageInputStream(txid, stream, stream.getImageDigest(),
stream.toString(), stream.getSize());
} | java | {
"resource": ""
} |
q161040 | MD5Hash.read | train | public static MD5Hash read(DataInput in) throws IOException {
MD5Hash result = new MD5Hash();
result.readFields(in);
return result;
} | java | {
"resource": ""
} |
q161041 | MD5Hash.digest | train | public static MD5Hash digest(InputStream in) throws IOException {
final byte[] buffer = new byte[4*1024];
int fileLength = 0;
final MessageDigest digester = DIGESTER_FACTORY.get();
for(int n; (n = in.read(buffer)) != -1; ) {
digester.update(buffer, 0, n);
fileLength += n;
}
return new MD5Hash(digester.digest(), fileLength);
} | java | {
"resource": ""
} |
q161042 | MD5Hash.digest | train | public static MD5Hash digest(byte[] data, int start, int len) {
byte[] digest;
MessageDigest digester = DIGESTER_FACTORY.get();
digester.update(data, start, len);
digest = digester.digest();
return new MD5Hash(digest);
} | java | {
"resource": ""
} |
q161043 | CleanupQueue.forceClean | train | public void forceClean() {
while(true) {
PathDeletionContext context = null;
try {
context = cleanupThread.queue.poll(50L, TimeUnit.MILLISECONDS);
if (context == null) {
return;
}
if (!deletePath(context)) {
LOG.warn("forceClean:Unable to delete path " + context.fullPath);
}
else {
LOG.info("foceClean DELETED " + context.fullPath);
}
} catch (InterruptedException e) {
return;
} catch (Exception e) {
LOG.warn("Error deleting path " + context.fullPath + ": " + e);
}
}
} | java | {
"resource": ""
} |
q161044 | UtilizationCollector.stop | train | public void stop() {
running = false;
if (server != null) server.stop();
if (aggregateDaemon != null) aggregateDaemon.interrupt();
} | java | {
"resource": ""
} |
q161045 | UtilizationCollector.getTaskTrackerUtilization | train | @Override
public TaskTrackerUtilization getTaskTrackerUtilization(String hostName)
throws IOException {
if (taskTrackerReports.get(hostName) == null) {
return null;
}
return taskTrackerReports.get(hostName).getTaskTrackerUtilization();
} | java | {
"resource": ""
} |
q161046 | UtilizationCollector.main | train | public static void main(String argv[]) throws Exception {
StringUtils.startupShutdownMessage(UtilizationCollector.class, argv, LOG);
try {
Configuration conf = new Configuration();
UtilizationCollector collector = new UtilizationCollector(conf);
if (collector != null) {
collector.join();
}
} catch (Throwable e) {
LOG.error(StringUtils.stringifyException(e));
System.exit(-1);
}
} | java | {
"resource": ""
} |
q161047 | HadoopArchives.checkPaths | train | private static void checkPaths(Configuration conf, List<Path> paths) throws
IOException {
for (Path p : paths) {
FileSystem fs = p.getFileSystem(conf);
if (!fs.exists(p)) {
throw new FileNotFoundException("Source " + p + " does not exist.");
}
}
} | java | {
"resource": ""
} |
q161048 | HadoopArchives.relPathToRoot | train | private Path relPathToRoot(Path fullPath, Path root) {
// just take some effort to do it
// rather than just using substring
// so that we do not break sometime later
Path justRoot = new Path(Path.SEPARATOR);
if (fullPath.depth() == root.depth()) {
return justRoot;
}
else if (fullPath.depth() > root.depth()) {
Path retPath = new Path(fullPath.getName());
Path parent = fullPath.getParent();
for (int i=0; i < (fullPath.depth() - root.depth() -1); i++) {
retPath = new Path(parent.getName(), retPath);
parent = parent.getParent();
}
return new Path(justRoot, retPath);
}
return null;
} | java | {
"resource": ""
} |
q161049 | HadoopArchives.cleanJobDirectory | train | private void cleanJobDirectory() {
try {
FileSystem jobfs = jobDirectory.getFileSystem(conf);
jobfs.delete(jobDirectory, true);
} catch(IOException ioe) {
LOG.warn("Unable to clean tmp directory " + jobDirectory, ioe);
}
} | java | {
"resource": ""
} |
q161050 | HadoopArchives.archive | train | private void archive(Path parentPath, List<Path> srcPaths, Path outputPath, boolean append)
throws IOException {
parentPath = parentPath.makeQualified(parentPath.getFileSystem(conf));
checkPaths(conf, srcPaths);
Path destinationDir = outputPath.getParent();
FileOutputFormat.setOutputPath(conf, outputPath);
FileSystem outFs = outputPath.getFileSystem(conf);
if (append) {
if (!outFs.exists(outputPath)) {
throw new IOException("Invalid Output. HAR File " + outputPath + "doesn't exist");
}
if (outFs.isFile(outputPath)) {
throw new IOException("Invalid Output. HAR File " + outputPath
+ "must be represented as directory");
}
} else {
if (outFs.exists(outputPath)) {
throw new IOException("Invalid Output: " + outputPath + ". File already exists");
}
if (outFs.isFile(destinationDir)) {
throw new IOException("Invalid Output. " + outputPath + " is not a directory");
}
}
long totalSize = writeFilesToProcess(parentPath, srcPaths);
FileSystem fs = parentPath.getFileSystem(conf);
conf.set(SRC_LIST_LABEL, srcFiles.toString());
conf.set(SRC_PARENT_LABEL, parentPath.makeQualified(fs).toString());
conf.setLong(TOTAL_SIZE_LABEL, totalSize);
long partSize = conf.getLong(HAR_PARTSIZE_LABEL, HAR_PARTSIZE_DEFAULT);
int numMaps = (int) (totalSize / partSize);
//run atleast one map.
conf.setNumMapTasks(numMaps == 0 ? 1 : numMaps);
conf.setNumReduceTasks(1);
conf.setOutputFormat(NullOutputFormat.class);
conf.setMapOutputKeyClass(IntWritable.class);
conf.setMapOutputValueClass(Text.class);
conf.set("hadoop.job.history.user.location", "none");
//make sure no speculative execution is done
conf.setSpeculativeExecution(false);
if (append) {
// set starting offset for mapper
int partId = findFirstAvailablePartId(outputPath);
conf.setInt(PART_ID_OFFSET, partId);
Path index = new Path(outputPath, HarFileSystem.INDEX_NAME);
Path indexDirectory = new Path(outputPath, HarFileSystem.INDEX_NAME + ".copy");
outFs.mkdirs(indexDirectory);
Path indexCopy = new Path(indexDirectory, "data");
outFs.rename(index, indexCopy);
MultipleInputs.addInputPath(conf, jobDirectory, HArchiveInputFormat.class,
HArchivesMapper.class);
MultipleInputs.addInputPath(conf, indexDirectory, TextInputFormat.class,
HArchivesConvertingMapper.class);
conf.setReducerClass(HArchivesMergingReducer.class);
} else {
conf.setMapperClass(HArchivesMapper.class);
conf.setInputFormat(HArchiveInputFormat.class);
FileInputFormat.addInputPath(conf, jobDirectory);
conf.setReducerClass(HArchivesReducer.class);
}
JobClient.runJob(conf);
cleanJobDirectory();
} | java | {
"resource": ""
} |
q161051 | HadoopArchives.writeLineToMasterIndex | train | private static void writeLineToMasterIndex(FSDataOutputStream stream, long startHash,
long endHash, long indexStartPos, long indexEndPos) throws IOException {
String toWrite = startHash + " " + endHash + " " + indexStartPos + " " + indexEndPos + "\n";
stream.write(toWrite.getBytes());
} | java | {
"resource": ""
} |
q161052 | HadoopArchives.createNewPartStream | train | private FSDataOutputStream createNewPartStream(Path dst, int partId) throws IOException {
String partName = PART_PREFIX + partId;
Path output = new Path(dst, partName);
FileSystem destFs = output.getFileSystem(conf);
FSDataOutputStream partStream = destFs.create(output, false,
conf.getInt("io.file.buffer.size", 4096), destFs.getDefaultReplication(),
conf.getLong(HAR_BLOCKSIZE_LABEL, HAR_BLOCKSIZE_DEFAULT));
return partStream;
} | java | {
"resource": ""
} |
q161053 | MetaRecoveryContext.ask | train | public static String ask(String prompt, String firstChoice, String... choices)
throws IOException {
while (true) {
LOG.info(prompt);
StringBuilder responseBuilder = new StringBuilder();
while (true) {
int c = System.in.read();
if (c == -1 || c == '\r' || c == '\n') {
break;
}
responseBuilder.append((char)c);
}
String response = responseBuilder.toString();
if (response.equalsIgnoreCase(firstChoice))
return firstChoice;
for (String c : choices) {
if (response.equalsIgnoreCase(c)) {
return c;
}
}
LOG.error("I'm sorry, I cannot understand your response.\n");
}
} | java | {
"resource": ""
} |
q161054 | SimulatedTaskRunner.launchTask | train | public void launchTask(TaskInProgress tip) throws IOException {
LOG.info("Launching simulated task " + tip.getTask().getTaskID() +
" for job " + tip.getTask().getJobID());
TaskUmbilicalProtocol umbilicalProtocol = taskTracker.getUmbilical(tip);
// For map tasks, we can just finish the task after some time. Same thing
// with cleanup tasks, as we don't need to be waiting for mappers to finish
if (tip.getTask().isMapTask() || tip.getTask().isTaskCleanupTask() ||
tip.getTask().isJobCleanupTask() || tip.getTask().isJobSetupTask() ) {
addTipToFinish(tip, umbilicalProtocol);
} else {
MapperWaitThread mwt =
new MapperWaitThread(tip, this, umbilicalProtocol);
// Save a reference to the mapper wait thread so that we can stop them if
// the task gets killed
mapperWaitThreadMap.put(tip, mwt);
mwt.start();
}
} | java | {
"resource": ""
} |
q161055 | SimulatedTaskRunner.addTipToFinish | train | protected void addTipToFinish(TaskInProgress tip,
TaskUmbilicalProtocol umbilicalProtocol) {
long currentTime = System.currentTimeMillis();
long finishTime = currentTime + Math.abs(rand.nextLong()) %
timeToFinishTask;
LOG.info("Adding TIP " + tip.getTask().getTaskID() +
" to finishing queue with start time " +
currentTime + " and finish time " + finishTime +
" (" + ((finishTime - currentTime) / 1000.0) + " sec) to thread " +
getName());
TipToFinish ttf = new TipToFinish(tip, finishTime, umbilicalProtocol);
tipQueue.put(ttf);
// Interrupt the waiting thread. We could put in additional logic to only
// interrupt when necessary, but probably not worth the complexity.
this.interrupt();
} | java | {
"resource": ""
} |
q161056 | SimulatedTaskRunner.run | train | @Override
public void run() {
while (true) {
// Wait to get a TIP
TipToFinish ttf = null;
try {
LOG.debug("Waiting for a TIP");
ttf = tipQueue.take();
} catch (InterruptedException e) {
LOG.info("Got interrupted exception while waiting to take()");
continue;
}
LOG.debug(" Got a TIP " + ttf.getTip().getTask().getTaskID() +
" at time " + System.currentTimeMillis() + " with finish time " +
ttf.getTimeToFinish());
// Wait until it's time to finish the task. Since the TIP was pulled from
// the priority queue, this should be the first task in the queue that
// needs to be finished. If we get interrupted, that means that it's
// possible that we added a TIP that should finish earlier
boolean interrupted = false;
while (true) {
long currentTime = System.currentTimeMillis();
if (currentTime < ttf.getTimeToFinish()) {
try {
long sleepTime = ttf.getTimeToFinish() - currentTime;
LOG.debug("Sleeping for " + sleepTime + " ms");
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
LOG.debug("Finisher thread was interrupted", e);
interrupted = true;
break;
}
} else {
break;
}
}
// Wait was interrupted, then it could mean that we added a task
// that needs to finish sooner. Put that task back and start again
if (interrupted) {
LOG.info("Putting back TIP " + ttf.getTip().getTask().getTaskID() +
" for job " + ttf.getTip().getTask().getJobID());
tipQueue.put(ttf);
continue;
}
// Finish the task
TaskInProgress tip = ttf.getTip();
ttf.finishTip();
// Also clean up the mapper wait thread map for reducers. It should exist
// for reduce tasks that are not cleanup tasks
if (!tip.getTask().isMapTask() &&
!tip.getTask().isTaskCleanupTask() &&
!tip.getTask().isJobCleanupTask() &&
!tip.getTask().isJobSetupTask()) {
if (!mapperWaitThreadMap.containsKey(tip)) {
throw new RuntimeException("Unable to find mapper wait thread for " +
tip.getTask().getTaskID() + " job " + tip.getTask().getJobID());
}
LOG.debug("Removing mapper wait thread for " +
tip.getTask().getTaskID() + " job " + tip.getTask().getJobID());
mapperWaitThreadMap.remove(tip);
} else if (mapperWaitThreadMap.containsKey(tip)) {
throw new RuntimeException("Mapper wait thread exists for" +
tip.getTask().getTaskID() + " job " + tip.getTask().getJobID() +
" when it shouldn't!");
}
}
} | java | {
"resource": ""
} |
q161057 | SimulatedTaskRunner.cancel | train | public void cancel(TaskInProgress tip) {
LOG.info("Canceling task " + tip.getTask().getTaskID() + " of job " +
tip.getTask().getJobID());
// Cancel & remove the map completion finish thread for reduce tasks.
if (!tip.getTask().isMapTask() && !tip.getTask().isTaskCleanupTask()) {
if (!mapperWaitThreadMap.containsKey(tip)) {
throw new RuntimeException("Mapper wait thread doesn't exist " +
"for " + tip.getTask().getTaskID());
}
LOG.debug("Interrupting mapper wait thread for " +
tip.getTask().getTaskID() + " job " +
tip.getTask().getJobID());
mapperWaitThreadMap.get(tip).interrupt();
LOG.debug("Removing mapper wait thread for " +
tip.getTask().getTaskID() + " job " + tip.getTask().getJobID());
mapperWaitThreadMap.remove(tip);
} else {
LOG.debug(tip.getTask().getTaskID() + " is not a reduce task, so " +
"not canceling mapper wait thread");
}
removeFromFinishingQueue(tip);
} | java | {
"resource": ""
} |
q161058 | HadoopLogParser.parseLine | train | public EventRecord parseLine(String line) throws IOException {
EventRecord retval = null;
if (line != null) {
// process line
String patternStr = "(" + dateformat + ")";
patternStr += "\\s+";
patternStr += "(" + timeformat + ")";
patternStr += ".{4}\\s(\\w*)\\s"; // for logLevel
patternStr += "\\s*([\\w+\\.?]+)"; // for source
patternStr += ":\\s+(.+)"; // for the message
Pattern pattern = Pattern.compile(patternStr);
Matcher matcher = pattern.matcher(line);
if (matcher.find(0) && matcher.groupCount() >= 5) {
retval = new EventRecord(hostname, ips, parseDate(matcher.group(1),
matcher.group(2)),
"HadoopLog",
matcher.group(3), // loglevel
matcher.group(4), // source
matcher.group(5)); // message
} else {
retval = new EventRecord();
}
}
return retval;
} | java | {
"resource": ""
} |
q161059 | HadoopLogParser.parseDate | train | protected Calendar parseDate(String strDate, String strTime) {
Calendar retval = Calendar.getInstance();
// set date
String[] fields = strDate.split("-");
retval.set(Calendar.YEAR, Integer.parseInt(fields[0]));
retval.set(Calendar.MONTH, Integer.parseInt(fields[1]));
retval.set(Calendar.DATE, Integer.parseInt(fields[2]));
// set time
fields = strTime.split(":");
retval.set(Calendar.HOUR_OF_DAY, Integer.parseInt(fields[0]));
retval.set(Calendar.MINUTE, Integer.parseInt(fields[1]));
retval.set(Calendar.SECOND, Integer.parseInt(fields[2]));
return retval;
} | java | {
"resource": ""
} |
q161060 | HadoopLogParser.findHostname | train | private void findHostname() {
String startupInfo = Environment.runCommand(
"grep --max-count=1 STARTUP_MSG:\\s*host " + file.getName()).toString();
Pattern pattern = Pattern.compile("\\s+(\\w+/.+)\\s+");
Matcher matcher = pattern.matcher(startupInfo);
if (matcher.find(0)) {
hostname = matcher.group(1).split("/")[0];
ips = new String[1];
ips[0] = matcher.group(1).split("/")[1];
}
} | java | {
"resource": ""
} |
q161061 | CorruptReplicasMap.addToCorruptReplicasMap | train | public boolean addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn) {
Collection<DatanodeDescriptor> nodes = getNodes(blk);
if (nodes == null) {
nodes = new TreeSet<DatanodeDescriptor>();
corruptReplicasMap.put(blk, nodes);
}
boolean added = false;
if (!nodes.contains(dn)) {
added = nodes.add(dn);
NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
blk.getBlockName() +
" added as corrupt on " + dn.getName() +
" by " + Server.getRemoteIp());
} else {
NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
"duplicate requested for " +
blk.getBlockName() + " to add as corrupt " +
"on " + dn.getName() +
" by " + Server.getRemoteIp());
}
return added;
} | java | {
"resource": ""
} |
q161062 | CorruptReplicasMap.removeFromCorruptReplicasMap | train | boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode) {
Collection<DatanodeDescriptor> datanodes = corruptReplicasMap.get(blk);
if (datanodes==null)
return false;
if (datanodes.remove(datanode)) { // remove the replicas
if (datanodes.isEmpty()) {
// remove the block if there is no more corrupted replicas
corruptReplicasMap.remove(blk);
}
return true;
}
return false;
} | java | {
"resource": ""
} |
q161063 | CorruptReplicasMap.getNodes | train | Collection<DatanodeDescriptor> getNodes(Block blk) {
if (corruptReplicasMap.size() == 0)
return null;
return corruptReplicasMap.get(blk);
} | java | {
"resource": ""
} |
q161064 | CorruptReplicasMap.isReplicaCorrupt | train | boolean isReplicaCorrupt(Block blk, DatanodeDescriptor node) {
Collection<DatanodeDescriptor> nodes = getNodes(blk);
return ((nodes != null) && (nodes.contains(node)));
} | java | {
"resource": ""
} |
q161065 | ProtocolSignature.getFingerprint | train | static int getFingerprint(Method method) {
int hashcode = method.getName().hashCode();
hashcode = hashcode + 31*method.getReturnType().getName().hashCode();
for (Class<?> type : method.getParameterTypes()) {
hashcode = 31*hashcode ^ type.getName().hashCode();
}
return hashcode;
} | java | {
"resource": ""
} |
q161066 | ProtocolSignature.getFingerprints | train | private static int[] getFingerprints(Method[] methods) {
if (methods == null) {
return null;
}
int[] hashCodes = new int[methods.length];
for (int i = 0; i<methods.length; i++) {
hashCodes[i] = getFingerprint(methods[i]);
}
return hashCodes;
} | java | {
"resource": ""
} |
q161067 | ProtocolSignature.getSigFingerprint | train | private static ProtocolSigFingerprint getSigFingerprint(
Class <? extends VersionedProtocol> protocol, long serverVersion) {
String protocolName = protocol.getName();
synchronized (PROTOCOL_FINGERPRINT_CACHE) {
ProtocolSigFingerprint sig = PROTOCOL_FINGERPRINT_CACHE.get(protocolName);
if (sig == null) {
int[] serverMethodHashcodes = getFingerprints(protocol.getMethods());
sig = new ProtocolSigFingerprint(
new ProtocolSignature(serverVersion, serverMethodHashcodes),
getFingerprint(serverMethodHashcodes));
PROTOCOL_FINGERPRINT_CACHE.put(protocolName, sig);
}
return sig;
}
} | java | {
"resource": ""
} |
q161068 | Utilities.incrComputeSpecs | train | public static void incrComputeSpecs(ComputeSpecs target, ComputeSpecs incr) {
target.numCpus += incr.numCpus;
target.memoryMB += incr.memoryMB;
target.diskGB += incr.diskGB;
} | java | {
"resource": ""
} |
q161069 | Utilities.decrComputeSpecs | train | public static void decrComputeSpecs(ComputeSpecs target, ComputeSpecs decr) {
target.numCpus -= decr.numCpus;
target.memoryMB -= decr.memoryMB;
target.diskGB -= decr.diskGB;
} | java | {
"resource": ""
} |
q161070 | Utilities.waitThreadTermination | train | public static void waitThreadTermination(Thread thread) {
while (thread != null && thread.isAlive()) {
thread.interrupt();
try {
thread.join();
} catch (InterruptedException e) {
}
}
} | java | {
"resource": ""
} |
q161071 | Utilities.appInfoToAddress | train | public static InetAddress appInfoToAddress(String info) {
Matcher m = INET_ADDRESS_PATTERN.matcher(info);
if (m.find()) {
int port = Integer.parseInt(m.group(2));
return new InetAddress(m.group(1), port);
}
return null;
} | java | {
"resource": ""
} |
q161072 | Utilities.makeProcessExitOnUncaughtException | train | public static void makeProcessExitOnUncaughtException(final Log log) {
Thread.setDefaultUncaughtExceptionHandler(
new Thread.UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
log.error("UNCAUGHT: Thread " + t.getName() +
" got an uncaught exception", e);
System.exit(1);
}
});
} | java | {
"resource": ""
} |
q161073 | ZkUtil.interruptedException | train | public static void interruptedException(String msg, InterruptedException e)
throws IOException {
Thread.currentThread().interrupt();
LOG.error(msg, e);
throw new IOException(msg, e);
} | java | {
"resource": ""
} |
q161074 | DistributedRaidFileSystem.getBlockSize | train | private static long getBlockSize(LocatedBlocks lbs) throws IOException {
List<LocatedBlock> locatedBlocks = lbs.getLocatedBlocks();
long bs = -1;
for (LocatedBlock lb: locatedBlocks) {
if (lb.getBlockSize() > bs) {
bs = lb.getBlockSize();
}
}
return bs;
} | java | {
"resource": ""
} |
q161075 | DistributedRaidFileSystem.undelete | train | @Override
public boolean undelete(Path f, String userName) throws IOException {
List<Codec> codecList = Codec.getCodecs();
Path[] parityPathList = new Path[codecList.size()];
for (int i=0; i<parityPathList.length; i++) {
parityPathList[i] = new Path(codecList.get(i).parityDirectory,
makeRelative(f));
}
// undelete the src file.
if (!fs.undelete(f, userName)) {
return false;
}
// try to undelete the parity file
for (Path parityPath : parityPathList) {
fs.undelete(parityPath, userName);
}
return true;
} | java | {
"resource": ""
} |
q161076 | DistributedRaidFileSystem.searchHarDir | train | private boolean searchHarDir(FileStatus stat)
throws IOException {
if (!stat.isDir()) {
return false;
}
String pattern = stat.getPath().toString() + "/*" + RaidNode.HAR_SUFFIX
+ "*";
FileStatus[] stats = globStatus(new Path(pattern));
if (stats != null && stats.length > 0) {
return true;
}
stats = fs.listStatus(stat.getPath());
// search deeper.
for (FileStatus status : stats) {
if (searchHarDir(status)) {
return true;
}
}
return false;
} | java | {
"resource": ""
} |
q161077 | HardLinkFileInfo.getHardLinkedFile | train | public INodeHardLinkFile getHardLinkedFile(int i) {
if (i < this.linkedFiles.size()) {
return this.linkedFiles.get(i);
}
return null;
} | java | {
"resource": ""
} |
q161078 | HardLinkFileInfo.removeLinkedFile | train | public void removeLinkedFile(INodeHardLinkFile file) {
// Remove the target file from the linkedFiles
// Since the equal function in the INode only compares the name field, it would be safer to
// iterate the INodeHardLinkFile and compare the reference directly.
for (int i = 0 ; i < linkedFiles.size(); i++) {
// compare the reference !
if (linkedFiles.get(i) == file) {
// remove the file by index
linkedFiles.remove(i);
break;
}
}
INodeFile newOwner = null;
if (linkedFiles.size() == 1) {
// revert the last INodeHardLinkFile to the regular INodeFile
INodeHardLinkFile lastReferencedFile= linkedFiles.get(0);
INodeFile inodeFile = new INodeFile(lastReferencedFile);
lastReferencedFile.parent.replaceChild(inodeFile);
// clear the linkedFiles
linkedFiles.clear();
// set the new owner
newOwner = inodeFile;
} else {
if (file.getBlocks() != null &&
file.getBlocks().length >0 &&
(file.getBlocks()[0].getINode() == file)) {
// need to find a new owner for all the block infos
newOwner = linkedFiles.get(0);
}
}
if (newOwner != null) {
for (BlockInfo blkInfo : file.getBlocks()) {
blkInfo.setINode(newOwner);
}
}
} | java | {
"resource": ""
} |
q161079 | HardLinkFileInfo.setPermissionStatus | train | protected void setPermissionStatus(PermissionStatus ps) {
for (INodeHardLinkFile linkedFile : linkedFiles) {
linkedFile.setPermissionStatus(ps, false);
}
} | java | {
"resource": ""
} |
q161080 | HardLinkFileInfo.setUser | train | protected void setUser(String user) {
for (INodeHardLinkFile linkedFile : linkedFiles) {
linkedFile.setUser(user, false);
}
} | java | {
"resource": ""
} |
q161081 | HardLinkFileInfo.setGroup | train | protected void setGroup(String group) {
for (INodeHardLinkFile linkedFile : linkedFiles) {
linkedFile.setGroup(group, false);
}
} | java | {
"resource": ""
} |
q161082 | HardLinkFileInfo.setPermission | train | protected void setPermission(FsPermission permission) {
for (INodeHardLinkFile linkedFile : linkedFiles) {
linkedFile.setPermission(permission, false);
}
} | java | {
"resource": ""
} |
q161083 | RAMDirectoryUtil.writeRAMFiles | train | public static void writeRAMFiles(DataOutput out, RAMDirectory dir,
String[] names) throws IOException {
out.writeInt(names.length);
for (int i = 0; i < names.length; i++) {
Text.writeString(out, names[i]);
long length = dir.fileLength(names[i]);
out.writeLong(length);
if (length > 0) {
// can we avoid the extra copy?
IndexInput input = null;
try {
input = dir.openInput(names[i], BUFFER_SIZE);
int position = 0;
byte[] buffer = new byte[BUFFER_SIZE];
while (position < length) {
int len =
position + BUFFER_SIZE <= length ? BUFFER_SIZE
: (int) (length - position);
input.readBytes(buffer, 0, len);
out.write(buffer, 0, len);
position += len;
}
} finally {
if (input != null) {
input.close();
}
}
}
}
} | java | {
"resource": ""
} |
q161084 | RAMDirectoryUtil.readRAMFiles | train | public static void readRAMFiles(DataInput in, RAMDirectory dir)
throws IOException {
int numFiles = in.readInt();
for (int i = 0; i < numFiles; i++) {
String name = Text.readString(in);
long length = in.readLong();
if (length > 0) {
// can we avoid the extra copy?
IndexOutput output = null;
try {
output = dir.createOutput(name);
int position = 0;
byte[] buffer = new byte[BUFFER_SIZE];
while (position < length) {
int len =
position + BUFFER_SIZE <= length ? BUFFER_SIZE
: (int) (length - position);
in.readFully(buffer, 0, len);
output.writeBytes(buffer, 0, len);
position += len;
}
} finally {
if (output != null) {
output.close();
}
}
}
}
} | java | {
"resource": ""
} |
q161085 | LocalityStats.record | train | public void record(
TaskInProgress tip, String host, long inputBytes) {
synchronized (localityRecords) {
localityRecords.add(new Record(tip, host, inputBytes));
localityRecords.notify();
}
} | java | {
"resource": ""
} |
q161086 | LocalityStats.computeStatistics | train | private void computeStatistics(Record record) {
computeStatistics(record.tip, record.host, record.inputBytes);
} | java | {
"resource": ""
} |
q161087 | LocalityStats.computeStatistics | train | private void computeStatistics(
TaskInProgress tip, String host, long inputBytes) {
int level = this.maxLevel;
String[] splitLocations = tip.getSplitLocations();
if (splitLocations.length > 0) {
Node tracker = topologyCache.getNode(host);
// find the right level across split locations
for (String local : splitLocations) {
Node datanode = topologyCache.getNode(local);
int newLevel = this.maxLevel;
if (tracker != null && datanode != null) {
newLevel = getMatchingLevelForNodes(tracker, datanode, maxLevel);
}
if (newLevel < level) {
level = newLevel;
if (level == 0) {
break;
}
}
}
}
boolean updateTaskCountOnly = inputBytes < 0;
switch (level) {
case 0:
if (updateTaskCountOnly) {
LOG.info("Chose data-local task " + tip.getTIPId());
jobCounters.incrCounter(Counter.DATA_LOCAL_MAPS, 1);
jobStats.incNumDataLocalMaps();
} else {
jobCounters.incrCounter(Counter.LOCAL_MAP_INPUT_BYTES, inputBytes);
jobStats.incLocalMapInputBytes(inputBytes);
}
break;
case 1:
if (updateTaskCountOnly) {
LOG.info("Chose rack-local task " + tip.getTIPId());
jobCounters.incrCounter(Counter.RACK_LOCAL_MAPS, 1);
jobStats.incNumRackLocalMaps();
} else {
jobCounters.incrCounter(Counter.RACK_MAP_INPUT_BYTES, inputBytes);
jobStats.incRackMapInputBytes(inputBytes);
}
break;
default:
LOG.info("Chose non-local task " + tip.getTIPId() + " at level " + level);
// check if there is any locality
if (updateTaskCountOnly && level != this.maxLevel) {
jobCounters.incrCounter(Counter.OTHER_LOCAL_MAPS, 1);
}
break;
}
} | java | {
"resource": ""
} |
q161088 | EditsLoaderCurrent.visitTxId | train | private void visitTxId() throws IOException {
if (LayoutVersion.supports(Feature.STORED_TXIDS, editsVersion)) {
v.visitLong(EditsElement.TRANSACTION_ID);
}
} | java | {
"resource": ""
} |
q161089 | EditsLoaderCurrent.visit_OP_ADD_or_OP_CLOSE | train | private void visit_OP_ADD_or_OP_CLOSE(FSEditLogOpCodes editsOpCode)
throws IOException {
visitTxId();
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, editsVersion)) {
IntToken opAddLength = v.visitInt(EditsElement.LENGTH);
// this happens if the edits is not properly ended (-1 op code),
// it is padded at the end with all zeros, OP_ADD is zero so
// without this check we would treat all zeros as empty OP_ADD)
if (opAddLength.value == 0) {
throw new IOException("OpCode " + editsOpCode
+ " has zero length (corrupted edits)");
}
}
v.visitStringUTF8(EditsElement.PATH);
if (LayoutVersion.supports(Feature.ADD_INODE_ID, editsVersion)) {
v.visitLong(EditsElement.INODE_ID);
}
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, editsVersion)) {
v.visitShort(EditsElement.REPLICATION);
v.visitLong(EditsElement.MTIME);
v.visitLong(EditsElement.ATIME);
v.visitLong(EditsElement.BLOCKSIZE);
} else {
v.visitStringUTF8(EditsElement.REPLICATION);
v.visitStringUTF8(EditsElement.MTIME);
v.visitStringUTF8(EditsElement.ATIME);
v.visitStringUTF8(EditsElement.BLOCKSIZE);
}
// now read blocks
visit_Blocks();
// PERMISSION_STATUS
v.visitEnclosingElement(EditsElement.PERMISSION_STATUS);
v.visitStringText( EditsElement.USERNAME);
v.visitStringText( EditsElement.GROUPNAME);
v.visitShort( EditsElement.FS_PERMISSIONS);
v.leaveEnclosingElement();
if(editsOpCode == FSEditLogOpCodes.OP_ADD) {
v.visitStringUTF8(EditsElement.CLIENT_NAME);
v.visitStringUTF8(EditsElement.CLIENT_MACHINE);
}
} | java | {
"resource": ""
} |
q161090 | EditsLoaderCurrent.loadEdits | train | @Override
public void loadEdits() throws IOException {
try {
v.start();
v.visitEnclosingElement(EditsElement.EDITS);
IntToken editsVersionToken = v.visitInt(EditsElement.EDITS_VERSION);
editsVersion = editsVersionToken.value;
if(!canLoadVersion(editsVersion)) {
throw new IOException("Cannot process editLog version " +
editsVersionToken.value);
}
FSEditLogOpCodes editsOpCode;
do {
v.visitEnclosingElement(EditsElement.RECORD);
ByteToken opCodeToken;
try {
opCodeToken = v.visitByte(EditsElement.OPCODE);
} catch (EOFException eof) {
// Getting EOF when reading the opcode is fine --
// it's just a finalized edits file
// Just fake the OP_INVALID here.
opCodeToken = new ByteToken(EditsElement.OPCODE);
opCodeToken.fromByte(FSEditLogOpCodes.OP_INVALID.getOpCode());
v.visit(opCodeToken);
}
editsOpCode = FSEditLogOpCodes.fromByte(opCodeToken.value);
v.visitEnclosingElement(EditsElement.DATA);
visitOpCode(editsOpCode);
v.leaveEnclosingElement(); // DATA
if (editsOpCode != FSEditLogOpCodes.OP_INVALID &&
LayoutVersion.supports(Feature.EDITS_CHESKUM, editsVersion)) {
v.visitInt(EditsElement.CHECKSUM);
}
v.leaveEnclosingElement(); // RECORD
} while(editsOpCode != FSEditLogOpCodes.OP_INVALID);
v.leaveEnclosingElement(); // EDITS
v.finish();
} catch(IOException e) {
// Tell the visitor to clean up, then re-throw the exception
v.finishAbnormally();
throw e;
}
} | java | {
"resource": ""
} |
q161091 | CounterGroup.getResourceBundle | train | private static ResourceBundle getResourceBundle(String enumClassName) {
String bundleName = enumClassName.replace('$','_');
return ResourceBundle.getBundle(bundleName);
} | java | {
"resource": ""
} |
q161092 | CounterGroup.findCounter | train | protected Counter findCounter(String counterName, String displayName) {
Counter result = counters.get(counterName);
if (result == null) {
result = new Counter(counterName, displayName);
counters.put(counterName, result);
}
return result;
} | java | {
"resource": ""
} |
q161093 | CounterGroup.localize | train | private String localize(String key, String defaultValue) {
String result = defaultValue;
if (bundle != null) {
try {
result = bundle.getString(key);
}
catch (MissingResourceException mre) {
}
}
return result;
} | java | {
"resource": ""
} |
q161094 | NameNode.getDefaultAddress | train | public static String getDefaultAddress(Configuration conf) {
URI uri = FileSystem.getDefaultUri(conf);
String authority = uri.getAuthority();
if (authority == null) {
throw new IllegalArgumentException(String.format(
"Invalid URI for NameNode address (check %s): %s has no authority.",
FileSystem.FS_DEFAULT_NAME_KEY, uri.toString()));
}
return authority;
} | java | {
"resource": ""
} |
q161095 | NameNode.initialize | train | protected void initialize() throws IOException {
// set service-level authorization security policy
if (serviceAuthEnabled =
getConf().getBoolean(
ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
PolicyProvider policyProvider =
(PolicyProvider)(ReflectionUtils.newInstance(
getConf().getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
HDFSPolicyProvider.class, PolicyProvider.class),
getConf()));
SecurityUtil.setPolicy(new ConfiguredPolicy(getConf(), policyProvider));
}
// This is a check that the port is free
// create a socket and bind to it, throw exception if port is busy
// This has to be done before we are reading Namesystem not to waste time and fail fast
NetUtils.isSocketBindable(getClientProtocolAddress(getConf()));
NetUtils.isSocketBindable(getDNProtocolAddress(getConf()));
NetUtils.isSocketBindable(getHttpServerAddress(getConf()));
long serverVersion = ClientProtocol.versionID;
this.clientProtocolMethodsFingerprint = ProtocolSignature
.getMethodsSigFingerPrint(ClientProtocol.class, serverVersion);
myMetrics = new NameNodeMetrics(getConf(), this);
this.clusterName = getConf().get(FSConstants.DFS_CLUSTER_NAME);
this.namesystem = new FSNamesystem(this, getConf());
// HACK: from removal of FSNamesystem.getFSNamesystem().
JspHelper.fsn = this.namesystem;
this.startDNServer();
startHttpServer(getConf());
} | java | {
"resource": ""
} |
q161096 | NameNode.adjustMetaDirectoryNames | train | protected static void adjustMetaDirectoryNames(Configuration conf, String serviceKey) {
adjustMetaDirectoryName(conf, DFS_NAMENODE_NAME_DIR_KEY, serviceKey);
adjustMetaDirectoryName(conf, DFS_NAMENODE_EDITS_DIR_KEY, serviceKey);
adjustMetaDirectoryName(conf, DFS_NAMENODE_CHECKPOINT_DIR_KEY, serviceKey);
adjustMetaDirectoryName(
conf, DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, serviceKey);
} | java | {
"resource": ""
} |
q161097 | NameNode.stopRPC | train | protected void stopRPC(boolean interruptClientHandlers)
throws IOException, InterruptedException {
// stop client handlers
stopRPCInternal(server, "client", interruptClientHandlers);
// stop datanode handlers
stopRPCInternal(dnProtocolServer, "datanode", interruptClientHandlers);
// waiting for the ongoing requests to complete
stopWaitRPCInternal(server, "client");
stopWaitRPCInternal(dnProtocolServer, "datanode");
} | java | {
"resource": ""
} |
q161098 | NameNode.stop | train | public void stop() {
if (stopRequested)
return;
stopRequested = true;
LOG.info("Stopping http server");
try {
if (httpServer != null) httpServer.stop();
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
}
LOG.info("Stopping namesystem");
if(namesystem != null) namesystem.close();
LOG.info("Stopping emptier");
if(emptier != null) emptier.interrupt();
LOG.info("Stopping rpc servers");
if(server != null) server.stop();
if (dnProtocolServer != null) dnProtocolServer.stop();
LOG.info("Stopping metrics");
if (myMetrics != null) {
myMetrics.shutdown();
}
LOG.info("Stopping namesystem mbeans");
if (namesystem != null) {
namesystem.shutdown();
}
} | java | {
"resource": ""
} |
q161099 | NameNode.addBlock | train | public LocatedBlock addBlock(String src,
String clientName) throws IOException {
return addBlock(src, clientName, null);
} | java | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.