_id stringlengths 2 7 | title stringlengths 3 140 | partition stringclasses 3
values | text stringlengths 73 34.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q161600 | AvatarShell.waitForLastTxIdNode | train | private void waitForLastTxIdNode(AvatarZooKeeperClient zk, Configuration conf)
throws Exception {
// Gather session id and transaction id data.
String address = conf.get(NameNode.DFS_NAMENODE_RPC_ADDRESS_KEY);
long maxWaitTime = this.getMaxWaitTimeForWaitTxid();
long start = System.currentTimeMillis();
while (true) {
if (System.currentTimeMillis() - start > maxWaitTime) {
throw new IOException("No valid last txid znode found");
}
try {
long sessionId = zk.getPrimarySsId(address, false);
ZookeeperTxId zkTxId = zk.getPrimaryLastTxId(address, false);
if (sessionId != zkTxId.getSessionId()) {
LOG.warn("Session Id in the ssid node : " + sessionId
+ " does not match the session Id in the txid node : "
+ zkTxId.getSessionId() + " retrying...");
Thread.sleep(retrySleep);
continue;
}
} catch (Throwable e) {
LOG.warn("Caught exception : " + e + " retrying ...");
Thread.sleep(retrySleep);
continue;
}
break;
}
} | java | {
"resource": ""
} |
q161601 | AvatarShell.setAvatar | train | public int setAvatar(String role, boolean noverification, String serviceName, String instance)
throws IOException {
Avatar dest;
if (Avatar.ACTIVE.toString().equalsIgnoreCase(role)) {
dest = Avatar.ACTIVE;
} else if (Avatar.STANDBY.toString().equalsIgnoreCase(role)) {
throw new IOException("setAvatar Command only works to switch avatar" +
" from Standby to Primary");
} else {
throw new IOException("Unknown avatar type " + role);
}
Avatar current = avatarnode.getAvatar();
if (current == dest) {
System.out.println("This instance is already in " + current + " avatar.");
} else {
try {
avatarnode.quiesceForFailover(noverification);
} catch (RemoteException re) {
handleRemoteException(re);
}
avatarnode.performFailover();
updateZooKeeper(serviceName , instance);
}
return 0;
} | java | {
"resource": ""
} |
q161602 | AvatarShell.isServiceSpecified | train | public static boolean isServiceSpecified(String command, Configuration conf,
String[] argv) {
if (conf.get(FSConstants.DFS_FEDERATION_NAMESERVICES) != null) {
for (int i = 0; i < argv.length; i++) {
if (argv[i].equals("-service")) {
// found service specs
return true;
}
}
// no service specs
printServiceErrorMessage(command, conf);
return false;
}
return true;
} | java | {
"resource": ""
} |
q161603 | MapTaskStatus.calculateRate | train | private double calculateRate(long cumulative, long currentTime) {
long timeSinceMapStart = 0;
assert getPhase() == Phase.MAP : "MapTaskStatus not in map phase!";
long startTime = getStartTime();
timeSinceMapStart = currentTime - startTime;
if (timeSinceMapStart <= 0) {
LOG.error("Current time is " + currentTime +
" but start time is " + startTime);
return 0;
}
return cumulative/timeSinceMapStart;
} | java | {
"resource": ""
} |
q161604 | ResourceLimit.hasEnoughMemory | train | public boolean hasEnoughMemory(ClusterNode node) {
int total = node.getTotal().memoryMB;
int free = node.getFree().memoryMB;
if (free < nodeReservedMemoryMB) {
if (LOG.isDebugEnabled()) {
LOG.debug(node.getHost() + " not enough memory." +
" totalMB:" + total +
" free:" + free +
" limit:" + nodeReservedMemoryMB);
}
return false;
}
return true;
} | java | {
"resource": ""
} |
q161605 | ResourceLimit.hasEnoughDiskSpace | train | private boolean hasEnoughDiskSpace(ClusterNode node) {
int total = node.getTotal().diskGB;
int free = node.getFree().diskGB;
if (free < nodeReservedDiskGB) {
if (LOG.isDebugEnabled()) {
LOG.debug(node.getHost() + " not enough disk space." +
" totalMB:" + total +
" free:" + free +
" limit:" + nodeReservedDiskGB);
}
return false;
}
return true;
} | java | {
"resource": ""
} |
q161606 | RetouchedBloomFilter.selectiveClearing | train | public void selectiveClearing(Key k, short scheme) {
if (k == null) {
throw new NullPointerException("Key can not be null");
}
if (!membershipTest(k)) {
throw new IllegalArgumentException("Key is not a member");
}
int index = 0;
int[] h = hash.hash(k);
switch(scheme) {
case RANDOM:
index = randomRemove();
break;
case MINIMUM_FN:
index = minimumFnRemove(h);
break;
case MAXIMUM_FP:
index = maximumFpRemove(h);
break;
case RATIO:
index = ratioRemove(h);
break;
default:
throw new AssertionError("Undefined selective clearing scheme");
}
clearBit(index);
} | java | {
"resource": ""
} |
q161607 | RetouchedBloomFilter.minimumFnRemove | train | private int minimumFnRemove(int[] h) {
int minIndex = Integer.MAX_VALUE;
double minValue = Double.MAX_VALUE;
for (int i = 0; i < nbHash; i++) {
double keyWeight = getWeight(keyVector[h[i]]);
if (keyWeight < minValue) {
minIndex = h[i];
minValue = keyWeight;
}
}
return minIndex;
} | java | {
"resource": ""
} |
q161608 | RetouchedBloomFilter.maximumFpRemove | train | private int maximumFpRemove(int[] h) {
int maxIndex = Integer.MIN_VALUE;
double maxValue = Double.MIN_VALUE;
for (int i = 0; i < nbHash; i++) {
double fpWeight = getWeight(fpVector[h[i]]);
if (fpWeight > maxValue) {
maxValue = fpWeight;
maxIndex = h[i];
}
}
return maxIndex;
} | java | {
"resource": ""
} |
q161609 | RetouchedBloomFilter.ratioRemove | train | private int ratioRemove(int[] h) {
computeRatio();
int minIndex = Integer.MAX_VALUE;
double minValue = Double.MAX_VALUE;
for (int i = 0; i < nbHash; i++) {
if (ratio[h[i]] < minValue) {
minValue = ratio[h[i]];
minIndex = h[i];
}
}
return minIndex;
} | java | {
"resource": ""
} |
q161610 | RetouchedBloomFilter.clearBit | train | private void clearBit(int index) {
if (index < 0 || index >= vectorSize) {
throw new ArrayIndexOutOfBoundsException(index);
}
List<Key> kl = keyVector[index];
List<Key> fpl = fpVector[index];
// update key list
int listSize = kl.size();
for (int i = 0; i < listSize && !kl.isEmpty(); i++) {
removeKey(kl.get(0), keyVector);
}
kl.clear();
keyVector[index].clear();
//update false positive list
listSize = fpl.size();
for (int i = 0; i < listSize && !fpl.isEmpty(); i++) {
removeKey(fpl.get(0), fpVector);
}
fpl.clear();
fpVector[index].clear();
//update ratio
ratio[index] = 0.0;
//update bit vector
bits.clear(index);
} | java | {
"resource": ""
} |
q161611 | RetouchedBloomFilter.createVector | train | @SuppressWarnings("unchecked")
private void createVector() {
fpVector = new List[vectorSize];
keyVector = new List[vectorSize];
ratio = new double[vectorSize];
for (int i = 0; i < vectorSize; i++) {
fpVector[i] = Collections.synchronizedList(new ArrayList<Key>());
keyVector[i] = Collections.synchronizedList(new ArrayList<Key>());
ratio[i] = 0.0;
}
} | java | {
"resource": ""
} |
q161612 | AsyncLoggerSet.setEpoch | train | void setEpoch(long e) {
Preconditions.checkState(!isEpochEstablished(),
"Epoch already established: epoch=%s", myEpoch);
myEpoch = e;
for (AsyncLogger l : loggers) {
l.setEpoch(e);
}
} | java | {
"resource": ""
} |
q161613 | AsyncLoggerSet.setCommittedTxId | train | public void setCommittedTxId(long txid, boolean force) {
for (AsyncLogger logger : loggers) {
logger.setCommittedTxId(txid, force);
}
} | java | {
"resource": ""
} |
q161614 | AsyncLoggerSet.waitForWriteQuorum | train | <V> Map<AsyncLogger, V> waitForWriteQuorum(QuorumCall<AsyncLogger, V> q,
int timeoutMs, String operationName) throws IOException {
int majority = getMajoritySize();
int numLoggers = loggers.size();
checkMajoritySize(majority, numLoggers);
return waitForQuorumInternal(q, loggers.size(), majority, numLoggers
- majority + 1, majority, timeoutMs, operationName);
} | java | {
"resource": ""
} |
q161615 | AsyncLoggerSet.appendHtmlReport | train | void appendHtmlReport(StringBuilder sb) {
sb.append("<table class=\"storage\">");
sb.append("<thead><tr><td>JN</td><td>Status</td></tr></thead>\n");
for (AsyncLogger l : loggers) {
sb.append("<tr>");
sb.append("<td>" + JspUtil.escapeXml(l.toString()) + "</td>");
sb.append("<td>");
l.appendHtmlReport(sb);
sb.append("</td></tr>\n");
}
sb.append("</table>");
} | java | {
"resource": ""
} |
q161616 | HtmlQuoting.quoteHtmlChars | train | public static void quoteHtmlChars(OutputStream output, byte[] buffer,
int off, int len) throws IOException {
for(int i=off; i < off+len; i++) {
switch (buffer[i]) {
case '&': output.write(ampBytes); break;
case '<': output.write(ltBytes); break;
case '>': output.write(gtBytes); break;
case '\'': output.write(aposBytes); break;
case '"': output.write(quotBytes); break;
default: output.write(buffer, i, 1);
}
}
} | java | {
"resource": ""
} |
q161617 | HtmlQuoting.quoteHtmlChars | train | public static String quoteHtmlChars(String item) {
if (item == null) {
return null;
}
byte[] bytes = item.getBytes();
if (needsQuoting(bytes, 0, bytes.length)) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try {
quoteHtmlChars(buffer, bytes, 0, bytes.length);
} catch (IOException ioe) {
// Won't happen, since it is a bytearrayoutputstream
}
return buffer.toString();
} else {
return item;
}
} | java | {
"resource": ""
} |
q161618 | HtmlQuoting.quoteOutputStream | train | public static OutputStream quoteOutputStream(final OutputStream out
) throws IOException {
return new OutputStream() {
private byte[] data = new byte[1];
@Override
public void write(byte[] data, int off, int len) throws IOException {
quoteHtmlChars(out, data, off, len);
}
@Override
public void write(int b) throws IOException {
data[0] = (byte) b;
quoteHtmlChars(out, data, 0, 1);
}
@Override
public void flush() throws IOException {
out.flush();
}
@Override
public void close() throws IOException {
out.close();
}
};
} | java | {
"resource": ""
} |
q161619 | HtmlQuoting.unquoteHtmlChars | train | public static String unquoteHtmlChars(String item) {
if (item == null) {
return null;
}
int next = item.indexOf('&');
// nothing was quoted
if (next == -1) {
return item;
}
int len = item.length();
int posn = 0;
StringBuilder buffer = new StringBuilder();
while (next != -1) {
buffer.append(item.substring(posn, next));
if (item.startsWith("&", next)) {
buffer.append('&');
next += 5;
} else if (item.startsWith("'", next)) {
buffer.append('\'');
next += 6;
} else if (item.startsWith(">", next)) {
buffer.append('>');
next += 4;
} else if (item.startsWith("<", next)) {
buffer.append('<');
next += 4;
} else if (item.startsWith(""", next)) {
buffer.append('"');
next += 6;
} else {
int end = item.indexOf(';', next)+1;
if (end == 0) {
end = len;
}
throw new IllegalArgumentException("Bad HTML quoting for " +
item.substring(next,end));
}
posn = next;
next = item.indexOf('&', posn);
}
buffer.append(item.substring(posn, len));
return buffer.toString();
} | java | {
"resource": ""
} |
q161620 | IndexCache.getIndexInformation | train | public IndexRecord getIndexInformation(String mapId, int reduce,
Path fileName) throws IOException {
IndexInformation info = cache.get(mapId);
if (info == null) {
info = readIndexFileToCache(fileName, mapId);
} else {
synchronized (info) {
while (null == info.mapSpillRecord) {
try {
info.wait();
} catch (InterruptedException e) {
throw new IOException("Interrupted waiting for construction", e);
}
}
}
LOG.debug("IndexCache HIT: MapId " + mapId + " found");
}
if (info.mapSpillRecord.size() == 0 ||
info.mapSpillRecord.size() < reduce) {
throw new IOException("Invalid request " +
" Map Id = " + mapId + " Reducer = " + reduce +
" Index Info Length = " + info.mapSpillRecord.size());
}
return info.mapSpillRecord.getIndex(reduce);
} | java | {
"resource": ""
} |
q161621 | IndexCache.removeMap | train | public void removeMap(String mapId) {
IndexInformation info = cache.remove(mapId);
if (info != null) {
totalMemoryUsed.addAndGet(-info.getSize());
if (!queue.remove(mapId)) {
LOG.warn("Map ID" + mapId + " not found in queue!!");
}
} else {
LOG.info("Map ID " + mapId + " not found in cache");
}
} | java | {
"resource": ""
} |
q161622 | IndexCache.freeIndexInformation | train | private synchronized void freeIndexInformation() {
while (totalMemoryUsed.get() > totalMemoryAllowed) {
String s = queue.remove();
IndexInformation info = cache.remove(s);
if (info != null) {
totalMemoryUsed.addAndGet(-info.getSize());
}
}
} | java | {
"resource": ""
} |
q161623 | JobTracker.getStatusesOnHost | train | private List<TaskTrackerStatus> getStatusesOnHost(String hostName) {
List<TaskTrackerStatus> statuses = new ArrayList<TaskTrackerStatus>();
synchronized (taskTrackers) {
for (TaskTracker tt : taskTrackers.values()) {
TaskTrackerStatus status = tt.getStatus();
if (hostName.equals(status.getHost())) {
statuses.add(status);
}
}
}
return statuses;
} | java | {
"resource": ""
} |
q161624 | JobTracker.markCompletedTaskAttempt | train | void markCompletedTaskAttempt(String taskTracker, TaskAttemptID taskid) {
// tracker --> taskid
Set<TaskAttemptID> taskset = trackerToMarkedTasksMap.get(taskTracker);
if (taskset == null) {
taskset = new TreeSet<TaskAttemptID>();
trackerToMarkedTasksMap.put(taskTracker, taskset);
}
taskset.add(taskid);
LOG.debug("Marked '" + taskid + "' from '" + taskTracker + "'");
} | java | {
"resource": ""
} |
q161625 | JobTracker.markCompletedJob | train | void markCompletedJob(JobInProgress job) {
for (TaskInProgress tip : job.getTasks(TaskType.JOB_SETUP)) {
for (TaskStatus taskStatus : tip.getTaskStatuses()) {
if (taskStatus.getRunState() != TaskStatus.State.RUNNING &&
taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING &&
taskStatus.getRunState() != TaskStatus.State.UNASSIGNED) {
markCompletedTaskAttempt(taskStatus.getTaskTracker(),
taskStatus.getTaskID());
}
}
}
for (TaskInProgress tip : job.getTasks(TaskType.MAP)) {
for (TaskStatus taskStatus : tip.getTaskStatuses()) {
if (taskStatus.getRunState() != TaskStatus.State.RUNNING &&
taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING &&
taskStatus.getRunState() != TaskStatus.State.FAILED_UNCLEAN &&
taskStatus.getRunState() != TaskStatus.State.KILLED_UNCLEAN &&
taskStatus.getRunState() != TaskStatus.State.UNASSIGNED) {
markCompletedTaskAttempt(taskStatus.getTaskTracker(),
taskStatus.getTaskID());
}
}
}
for (TaskInProgress tip : job.getTasks(TaskType.REDUCE)) {
for (TaskStatus taskStatus : tip.getTaskStatuses()) {
if (taskStatus.getRunState() != TaskStatus.State.RUNNING &&
taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING &&
taskStatus.getRunState() != TaskStatus.State.FAILED_UNCLEAN &&
taskStatus.getRunState() != TaskStatus.State.KILLED_UNCLEAN &&
taskStatus.getRunState() != TaskStatus.State.UNASSIGNED) {
markCompletedTaskAttempt(taskStatus.getTaskTracker(),
taskStatus.getTaskID());
}
}
}
} | java | {
"resource": ""
} |
q161626 | JobTracker.isBlacklisted | train | synchronized public boolean isBlacklisted(String trackerID) {
TaskTrackerStatus status = getTaskTrackerStatus(trackerID);
if (status != null) {
return faultyTrackers.isBlacklisted(status.getHost());
}
return false;
} | java | {
"resource": ""
} |
q161627 | JobTracker.addNewTracker | train | void addNewTracker(TaskTracker taskTracker) {
TaskTrackerStatus status = taskTracker.getStatus();
trackerExpiryQueue.add(status);
// Register the tracker if its not registered
String hostname = status.getHost();
if (getNode(status.getTrackerName()) == null) {
// Making the network location resolution inline ..
resolveAndAddToTopology(hostname);
}
// add it to the set of tracker per host
Set<TaskTracker> trackers = hostnameToTaskTracker.get(hostname);
if (trackers == null) {
trackers = Collections.synchronizedSet(new HashSet<TaskTracker>());
hostnameToTaskTracker.put(hostname, trackers);
}
statistics.taskTrackerAdded(status.getTrackerName());
getInstrumentation().addTrackers(1);
LOG.info("Adding tracker " + status.getTrackerName() + " to host "
+ hostname);
trackers.add(taskTracker);
} | java | {
"resource": ""
} |
q161628 | JobTracker.getNextHeartbeatInterval | train | public int getNextHeartbeatInterval() {
// get the no of task trackers
int clusterSize = getClusterStatus().getTaskTrackers();
int heartbeatInterval = Math.max(
(int)(1000 * HEARTBEATS_SCALING_FACTOR *
Math.ceil((double)clusterSize /
NUM_HEARTBEATS_IN_SECOND)),
HEARTBEAT_INTERVAL_MIN) ;
return heartbeatInterval;
} | java | {
"resource": ""
} |
q161629 | JobTracker.inHostsList | train | private boolean inHostsList(TaskTrackerStatus status) {
Set<String> hostsList = hostsReader.getHosts();
return (hostsList.isEmpty() || hostsList.contains(status.getHost()));
} | java | {
"resource": ""
} |
q161630 | JobTracker.inExcludedHostsList | train | private boolean inExcludedHostsList(TaskTrackerStatus status) {
Set<String> excludeList = hostsReader.getExcludedHosts();
return excludeList.contains(status.getHost());
} | java | {
"resource": ""
} |
q161631 | JobTracker.incrementReservations | train | void incrementReservations(TaskType type, int reservedSlots) {
if (type.equals(TaskType.MAP)) {
reservedMapSlots += reservedSlots;
} else if (type.equals(TaskType.REDUCE)) {
reservedReduceSlots += reservedSlots;
}
} | java | {
"resource": ""
} |
q161632 | JobTracker.processHeartbeat | train | synchronized boolean processHeartbeat(
TaskTrackerStatus trackerStatus,
boolean initialContact) {
String trackerName = trackerStatus.getTrackerName();
synchronized (taskTrackers) {
synchronized (trackerExpiryQueue) {
boolean seenBefore = updateTaskTrackerStatus(trackerName,
trackerStatus);
TaskTracker taskTracker = getTaskTracker(trackerName);
if (initialContact) {
// If it's first contact, then clear out
// any state hanging around
if (seenBefore) {
LOG.warn("initialContact but seenBefore from Tracker : " + trackerName);
lostTaskTracker(taskTracker);
}
} else {
// If not first contact, there should be some record of the tracker
if (!seenBefore) {
LOG.warn("Status from unknown Tracker : " + trackerName);
updateTaskTrackerStatus(trackerName, null);
return false;
}
}
if (initialContact) {
// if this is lost tracker that came back now, and if it blacklisted
// increment the count of blacklisted trackers in the cluster
if (isBlacklisted(trackerName)) {
faultyTrackers.incrBlackListedTrackers(1);
}
addNewTracker(taskTracker);
}
}
}
updateTaskStatuses(trackerStatus);
updateNodeHealthStatus(trackerStatus);
return true;
} | java | {
"resource": ""
} |
q161633 | JobTracker.addJobForCleanup | train | private void addJobForCleanup(JobID id) {
for (String taskTracker : taskTrackers.keySet()) {
LOG.debug("Marking job " + id + " for cleanup by tracker " + taskTracker);
synchronized (trackerToJobsToCleanup) {
Set<JobID> jobsToKill = trackerToJobsToCleanup.get(taskTracker);
if (jobsToKill == null) {
jobsToKill = new HashSet<JobID>();
trackerToJobsToCleanup.put(taskTracker, jobsToKill);
}
jobsToKill.add(id);
}
}
} | java | {
"resource": ""
} |
q161634 | JobTracker.getJobsForCleanup | train | private List<TaskTrackerAction> getJobsForCleanup(String taskTracker) {
Set<JobID> jobs = null;
synchronized (trackerToJobsToCleanup) {
jobs = trackerToJobsToCleanup.remove(taskTracker);
}
if (jobs != null) {
// prepare the actions list
List<TaskTrackerAction> killList = new ArrayList<TaskTrackerAction>();
for (JobID killJobId : jobs) {
killList.add(new KillJobAction(killJobId));
LOG.debug(taskTracker + " -> KillJobAction: " + killJobId);
}
return killList;
}
return null;
} | java | {
"resource": ""
} |
q161635 | JobTracker.getSetupAndCleanupTasks | train | List<Task> getSetupAndCleanupTasks(
TaskTrackerStatus taskTracker) throws IOException {
int maxMapTasks = taskScheduler.getMaxSlots(taskTracker, TaskType.MAP);
int maxReduceTasks =
taskScheduler.getMaxSlots(taskTracker, TaskType.REDUCE);
int numMaps = taskTracker.countOccupiedMapSlots();
int numReduces = taskTracker.countOccupiedReduceSlots();
int numTaskTrackers = getClusterStatus().getTaskTrackers();
int numUniqueHosts = getNumberOfUniqueHosts();
List<JobInProgress> cachedJobs = new ArrayList<JobInProgress> ();
// get a snapshot of all the jobs in the system
synchronized (jobs) {
cachedJobs.addAll(jobs.values());
}
Task t = null;
if (numMaps < maxMapTasks) {
for (JobInProgress job: cachedJobs) {
t = job.obtainJobCleanupTask(taskTracker, numTaskTrackers,
numUniqueHosts, true);
if (t != null) {
return Collections.singletonList(t);
}
}
for (JobInProgress job: cachedJobs) {
t = job.obtainTaskCleanupTask(taskTracker, true);
if (t != null) {
return Collections.singletonList(t);
}
}
for (JobInProgress job: cachedJobs) {
t = job.obtainJobSetupTask(taskTracker, numTaskTrackers,
numUniqueHosts, true);
if (t != null) {
return Collections.singletonList(t);
}
}
}
if (numReduces < maxReduceTasks) {
for (JobInProgress job: cachedJobs) {
t = job.obtainJobCleanupTask(taskTracker, numTaskTrackers,
numUniqueHosts, false);
if (t != null) {
return Collections.singletonList(t);
}
}
for (JobInProgress job: cachedJobs) {
t = job.obtainTaskCleanupTask(taskTracker, false);
if (t != null) {
return Collections.singletonList(t);
}
}
for (JobInProgress job: cachedJobs) {
t = job.obtainJobSetupTask(taskTracker, numTaskTrackers,
numUniqueHosts, false);
if (t != null) {
return Collections.singletonList(t);
}
}
}
return null;
} | java | {
"resource": ""
} |
q161636 | JobTracker.getNewJobId | train | public JobID getNewJobId() throws IOException {
JobID id = new JobID(getTrackerIdentifier(), nextJobId.getAndIncrement());
// get the user group info
UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
// mark the user for this id
jobToUserMap.put(id, ugi.getUserName());
LOG.info("Job id " + id + " assigned to user " + ugi.getUserName());
return id;
} | java | {
"resource": ""
} |
q161637 | JobTracker.addJob | train | protected synchronized JobStatus addJob(JobID jobId, JobInProgress job) {
totalSubmissions++;
synchronized (jobs) {
synchronized (taskScheduler) {
jobs.put(job.getProfile().getJobID(), job);
for (JobInProgressListener listener : jobInProgressListeners) {
try {
listener.jobAdded(job);
} catch (IOException ioe) {
LOG.warn("Failed to add and so skipping the job : "
+ job.getJobID() + ". Exception : " + ioe);
}
}
}
}
myInstrumentation.submitJob(job.getJobConf(), jobId);
String jobName = job.getJobConf().getJobName();
int jobNameLen = 64;
if (jobName.length() > jobNameLen) {
jobName = jobName.substring(0, jobNameLen); // Truncate for logging.
}
LOG.info("Job " + jobId + "(" + jobName +
") added successfully for user '" + job.getJobConf().getUser() +
"' to queue '" + job.getJobConf().getQueueName() + "'" +
", source " + job.getJobConf().getJobSource());
return job.getStatus();
} | java | {
"resource": ""
} |
q161638 | JobTracker.checkAccess | train | private void checkAccess(JobInProgress job,
QueueManager.QueueOperation oper)
throws IOException {
// get the user group info
UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
checkAccess(job, oper, ugi);
} | java | {
"resource": ""
} |
q161639 | JobTracker.checkAccess | train | private void checkAccess(JobInProgress job, QueueManager.QueueOperation oper,
UserGroupInformation ugi) throws IOException {
// get the queue
String queue = job.getProfile().getQueueName();
if (!queueManager.hasAccess(queue, job, oper, ugi)) {
throw new AccessControlException("User "
+ ugi.getUserName()
+ " cannot perform "
+ "operation " + oper + " on queue " + queue +
".\n Please run \"hadoop queue -showacls\" " +
"command to find the queues you have access" +
" to .");
}
} | java | {
"resource": ""
} |
q161640 | JobTracker.failJob | train | public synchronized void failJob(JobInProgress job) {
if (null == job) {
LOG.info("Fail on null job is not valid");
return;
}
JobStatus prevStatus = (JobStatus)job.getStatus().clone();
LOG.info("Failing job " + job.getJobID());
job.fail();
// Inform the listeners if the job state has changed
JobStatus newStatus = (JobStatus)job.getStatus().clone();
if (prevStatus.getRunState() != newStatus.getRunState()) {
JobStatusChangeEvent event =
new JobStatusChangeEvent(job, EventType.RUN_STATE_CHANGED, prevStatus,
newStatus);
updateJobInProgressListeners(event);
}
} | java | {
"resource": ""
} |
q161641 | JobTracker.setJobPriority | train | public synchronized void setJobPriority(JobID jobid,
String priority)
throws IOException {
JobInProgress job = jobs.get(jobid);
if (null == job) {
LOG.info("setJobPriority(): JobId " + jobid.toString()
+ " is not a valid job");
return;
}
checkAccess(job, QueueManager.QueueOperation.ADMINISTER_JOBS);
JobPriority newPriority = JobPriority.valueOf(priority);
setJobPriority(jobid, newPriority);
} | java | {
"resource": ""
} |
q161642 | JobTracker.getTaskStatuses | train | TaskStatus[] getTaskStatuses(TaskID tipid) {
TaskInProgress tip = getTip(tipid);
return (tip == null ? new TaskStatus[0]
: tip.getTaskStatuses());
} | java | {
"resource": ""
} |
q161643 | JobTracker.getTaskStatus | train | TaskStatus getTaskStatus(TaskAttemptID taskid) {
TaskInProgress tip = getTip(taskid.getTaskID());
return (tip == null ? null
: tip.getTaskStatus(taskid));
} | java | {
"resource": ""
} |
q161644 | JobTracker.getTipCounters | train | Counters getTipCounters(TaskID tipid) {
TaskInProgress tip = getTip(tipid);
return (tip == null ? null : tip.getCounters());
} | java | {
"resource": ""
} |
q161645 | JobTracker.setJobPriority | train | synchronized void setJobPriority(JobID jobId, JobPriority priority) {
JobInProgress job = jobs.get(jobId);
if (job != null) {
synchronized (taskScheduler) {
JobStatus oldStatus = (JobStatus)job.getStatus().clone();
job.setPriority(priority);
JobStatus newStatus = (JobStatus)job.getStatus().clone();
JobStatusChangeEvent event =
new JobStatusChangeEvent(job, EventType.PRIORITY_CHANGED, oldStatus,
newStatus);
updateJobInProgressListeners(event);
}
} else {
LOG.warn("Trying to change the priority of an unknown job: " + jobId);
}
} | java | {
"resource": ""
} |
q161646 | JobTracker.updateTaskStatuses | train | void updateTaskStatuses(TaskTrackerStatus status) {
String trackerName = status.getTrackerName();
for (TaskStatus report : status.getTaskReports()) {
report.setTaskTracker(trackerName);
TaskAttemptID taskId = report.getTaskID();
// Remove it from the expired task list
if (report.getRunState() != TaskStatus.State.UNASSIGNED) {
expireLaunchingTasks.removeTask(taskId);
}
JobInProgress job = getJob(taskId.getJobID());
if (job == null) {
// if job is not there in the cleanup list ... add it
synchronized (trackerToJobsToCleanup) {
Set<JobID> jobs = trackerToJobsToCleanup.get(trackerName);
if (jobs == null) {
jobs = new HashSet<JobID>();
trackerToJobsToCleanup.put(trackerName, jobs);
}
jobs.add(taskId.getJobID());
}
continue;
}
if (!job.inited()) {
// if job is not yet initialized ... kill the attempt
synchronized (trackerToTasksToCleanup) {
Set<TaskAttemptID> tasks = trackerToTasksToCleanup.get(trackerName);
if (tasks == null) {
tasks = new HashSet<TaskAttemptID>();
trackerToTasksToCleanup.put(trackerName, tasks);
}
tasks.add(taskId);
}
continue;
}
TaskInProgress tip = taskidToTIPMap.get(taskId);
// Check if the tip is known to the jobtracker. In case of a restarted
// jt, some tasks might join in later
if (tip != null) {
// Update the job and inform the listeners if necessary
JobStatus prevStatus = (JobStatus)job.getStatus().clone();
// Clone TaskStatus object here, because JobInProgress
// or TaskInProgress can modify this object and
// the changes should not get reflected in TaskTrackerStatus.
// An old TaskTrackerStatus is used later in countMapTasks, etc.
job.updateTaskStatus(tip, (TaskStatus)report.clone());
JobStatus newStatus = (JobStatus)job.getStatus().clone();
// Update the listeners if an incomplete job completes
if (prevStatus.getRunState() != newStatus.getRunState()) {
JobStatusChangeEvent event =
new JobStatusChangeEvent(job, EventType.RUN_STATE_CHANGED,
prevStatus, newStatus);
updateJobInProgressListeners(event);
}
} else {
LOG.info("Serious problem. While updating status, cannot find taskid "
+ report.getTaskID());
}
// Process 'failed fetch' notifications
List<TaskAttemptID> failedFetchMaps = report.getFetchFailedMaps();
if (failedFetchMaps != null) {
TaskAttemptID reportingAttempt = report.getTaskID();
for (TaskAttemptID mapTaskId : failedFetchMaps) {
TaskInProgress failedFetchMap = taskidToTIPMap.get(mapTaskId);
if (failedFetchMap != null) {
// Gather information about the map which has to be failed, if need be
String failedFetchTrackerName = getAssignedTracker(mapTaskId);
if (failedFetchTrackerName == null) {
failedFetchTrackerName = "Lost task tracker";
}
((JobInProgress)failedFetchMap.getJob()).fetchFailureNotification(
reportingAttempt, failedFetchMap, mapTaskId, failedFetchTrackerName);
}
}
}
}
} | java | {
"resource": ""
} |
q161647 | JobTracker.lostTaskTracker | train | void lostTaskTracker(TaskTracker taskTracker) {
String trackerName = taskTracker.getTrackerName();
LOG.info("Lost tracker '" + trackerName + "'");
// remove the tracker from the local structures
synchronized (trackerToJobsToCleanup) {
trackerToJobsToCleanup.remove(trackerName);
}
synchronized (trackerToTasksToCleanup) {
trackerToTasksToCleanup.remove(trackerName);
}
Set<TaskAttemptIDWithTip> lostTasks = trackerToTaskMap.get(trackerName);
trackerToTaskMap.remove(trackerName);
if (lostTasks != null) {
// List of jobs which had any of their tasks fail on this tracker
Set<JobInProgress> jobsWithFailures = new HashSet<JobInProgress>();
for (TaskAttemptIDWithTip oneTask : lostTasks) {
TaskAttemptID taskId = oneTask.attemptId;
TaskInProgress tip = oneTask.tip;
JobInProgress job = (JobInProgress) tip.getJob();
// Completed reduce tasks never need to be failed, because
// their outputs go to dfs
// And completed maps with zero reducers of the job
// never need to be failed.
if (!tip.isComplete() ||
(tip.isMapTask() && !tip.isJobSetupTask() &&
job.desiredReduces() != 0)) {
// if the job is done, we don't want to change anything
if (job.getStatus().getRunState() == JobStatus.RUNNING ||
job.getStatus().getRunState() == JobStatus.PREP) {
// the state will be KILLED_UNCLEAN, if the task(map or reduce)
// was RUNNING on the tracker
TaskStatus.State killState = (tip.isRunningTask(taskId) &&
!tip.isJobSetupTask() && !tip.isJobCleanupTask()) ?
TaskStatus.State.KILLED_UNCLEAN : TaskStatus.State.KILLED;
job.failedTask(tip, taskId,
("Lost task tracker: " + trackerName +
" at " + new Date()),
(tip.isMapTask() ?
TaskStatus.Phase.MAP :
TaskStatus.Phase.REDUCE),
killState,
trackerName);
jobsWithFailures.add(job);
}
} else {
// Completed 'reduce' task and completed 'maps' with zero
// reducers of the job, not failed;
// only removed from data-structures.
markCompletedTaskAttempt(trackerName, taskId);
}
}
// Penalize this tracker for each of the jobs which
// had any tasks running on it when it was 'lost'
// Also, remove any reserved slots on this tasktracker
for (JobInProgress job : jobsWithFailures) {
String reason = "Tracker went down";
job.addTrackerTaskFailure(trackerName, taskTracker, reason);
}
// Cleanup
taskTracker.cancelAllReservations();
// Purge 'marked' tasks, needs to be done
// here to prevent hanging references!
removeMarkedTasks(trackerName);
}
} | java | {
"resource": ""
} |
q161648 | JobTracker.removeTracker | train | private void removeTracker(TaskTracker tracker) {
String trackerName = tracker.getTrackerName();
// Remove completely after marking the tasks as 'KILLED'
lostTaskTracker(tracker);
TaskTrackerStatus status = tracker.getStatus();
// tracker is lost, and if it is blacklisted, remove
// it from the count of blacklisted trackers in the cluster
if (isBlacklisted(trackerName)) {
faultyTrackers.decrBlackListedTrackers(1);
}
updateTaskTrackerStatus(trackerName, null);
statistics.taskTrackerRemoved(trackerName);
getInstrumentation().decTrackers(1);
} | java | {
"resource": ""
} |
q161649 | JobTracker.main | train | public static void main(String argv[]
) throws IOException, InterruptedException {
StringUtils.startupShutdownMessage(JobTracker.class, argv, LOG);
try {
if (argv.length == 0) {
JobTracker tracker = startTracker(new JobConf());
tracker.offerService();
return;
}
if ("-instance".equals(argv[0]) && argv.length == 2) {
int instance = Integer.parseInt(argv[1]);
if (instance == 0 || instance == 1) {
JobConf conf = new JobConf();
JobConf.overrideConfiguration(conf, instance);
JobTracker tracker = startTracker(conf);
tracker.offerService();
return;
}
}
if ("-dumpConfiguration".equals(argv[0]) && argv.length == 1) {
dumpConfiguration(new PrintWriter(System.out));
return;
}
System.out.println("usage: JobTracker [-dumpConfiguration]");
System.out.println(" JobTracker [-instance <0|1>]");
System.exit(-1);
} catch (Throwable e) {
LOG.fatal(StringUtils.stringifyException(e));
System.exit(-1);
}
} | java | {
"resource": ""
} |
q161650 | JobTracker.dumpConfiguration | train | private static void dumpConfiguration(Writer writer) throws IOException {
Configuration.dumpConfiguration(new JobConf(), writer);
writer.write("\n");
// get the QueueManager configuration properties
QueueManager.dumpConfiguration(writer);
writer.write("\n");
} | java | {
"resource": ""
} |
q161651 | JobTracker.checkMemoryRequirements | train | private void checkMemoryRequirements(JobInProgress job)
throws IOException {
if (!perTaskMemoryConfigurationSetOnJT()) {
LOG.debug("Per-Task memory configuration is not set on JT. "
+ "Not checking the job for invalid memory requirements.");
return;
}
boolean invalidJob = false;
String msg = "";
long maxMemForMapTask = job.getMemoryForMapTask();
long maxMemForReduceTask = job.getMemoryForReduceTask();
if (maxMemForMapTask == JobConf.DISABLED_MEMORY_LIMIT
|| maxMemForReduceTask == JobConf.DISABLED_MEMORY_LIMIT) {
invalidJob = true;
msg = "Invalid job requirements.";
}
if (maxMemForMapTask > limitMaxMemForMapTasks
|| maxMemForReduceTask > limitMaxMemForReduceTasks) {
invalidJob = true;
msg = "Exceeds the cluster's max-memory-limit.";
}
if (invalidJob) {
StringBuilder jobStr =
new StringBuilder().append(job.getJobID().toString()).append("(")
.append(maxMemForMapTask).append(" memForMapTasks ").append(
maxMemForReduceTask).append(" memForReduceTasks): ");
LOG.warn(jobStr.toString() + msg);
throw new IOException(jobStr.toString() + msg);
}
} | java | {
"resource": ""
} |
q161652 | JobTracker.updateTotalTaskCapacity | train | private void updateTotalTaskCapacity(TaskTrackerStatus status) {
int mapSlots = taskScheduler.getMaxSlots(status, TaskType.MAP);
String trackerName = status.getTrackerName();
Integer oldMapSlots = trackerNameToMapSlots.get(trackerName);
if (oldMapSlots == null) {
oldMapSlots = 0;
}
int delta = mapSlots - oldMapSlots;
if (delta != 0) {
totalMapTaskCapacity += delta;
trackerNameToMapSlots.put(trackerName, mapSlots);
LOG.info("Changing map slot count due to " + trackerName + " from " +
oldMapSlots + " to " + mapSlots + ", totalMap = " + totalMapTaskCapacity);
}
int reduceSlots = taskScheduler.getMaxSlots(status, TaskType.REDUCE);
Integer oldReduceSlots = trackerNameToReduceSlots.get(trackerName);
if (oldReduceSlots == null) {
oldReduceSlots = 0;
}
delta = reduceSlots - oldReduceSlots;
if (delta != 0) {
totalReduceTaskCapacity += delta;
trackerNameToReduceSlots.put(trackerName, reduceSlots);
LOG.info("Changing reduce slot count due to " + trackerName + " from " +
oldReduceSlots + " to " + reduceSlots +
", totalReduce = " + totalReduceTaskCapacity);
}
} | java | {
"resource": ""
} |
q161653 | JobTracker.removeTaskTrackerCapacity | train | private void removeTaskTrackerCapacity(TaskTrackerStatus status) {
Integer mapSlots = trackerNameToMapSlots.remove(status.getTrackerName());
if (mapSlots == null) {
mapSlots = 0;
}
totalMapTaskCapacity -= mapSlots;
Integer reduceSlots = trackerNameToReduceSlots.remove(status.getTrackerName());
if (reduceSlots == null) {
reduceSlots = 0;
}
totalReduceTaskCapacity -= reduceSlots;
LOG.info("Removing " + mapSlots + " map slots, " + reduceSlots +
" reduce slots due to " + status.getTrackerName() +
", totalMap = " + totalMapTaskCapacity + ", totalReduce = " + totalReduceTaskCapacity);
} | java | {
"resource": ""
} |
q161654 | JournalNodeJournalSyncer.recoverSegments | train | void recoverSegments(SyncTask task) throws IOException {
// obtain the list of segments that are valid
if (!prepareRecovery(task)) {
return;
}
// iterate through all nodes
for (InetSocketAddress jn : journalNodes) {
if (isLocalIpAddress(jn.getAddress())
&& jn.getPort() == journalNode.getPort()) {
// we do not need to talk to ourselves
continue;
}
try {
// get manifest for log that we care about
List<EditLogFile> remoteLogFiles = getManifest(jn, task.journal,
task.recoveryStartTxid);
// go through all remote segments
for (EditLogFile relf : remoteLogFiles) {
recoverSegment(jn, relf, task);
}
// if we are done, there is no need to iterate more
if (!task.hasMissingValidSegments()) {
LOG.info(logMsg + "recovery finished.");
break;
}
} catch (Exception e) {
LOG.error(logMsg + "error", e);
continue;
}
}
} | java | {
"resource": ""
} |
q161655 | JournalNodeJournalSyncer.getManifest | train | private List<EditLogFile> getManifest(InetSocketAddress jn, Journal journal,
long minTxId) throws IOException {
String m = DFSUtil.getHTMLContentWithTimeout(
new URL("http", jn.getAddress().getHostAddress(), jn.getPort(),
GetJournalManifestServlet.buildPath(journal.getJournalId(),
minTxId, journal.getJournalStorage())), httpConnectReadTimeoutMs,
httpConnectReadTimeoutMs);
return convertJsonToListManifest(m);
} | java | {
"resource": ""
} |
q161656 | JournalNodeJournalSyncer.convertJsonToListManifest | train | public static List<EditLogFile> convertJsonToListManifest(String json)
throws IOException {
if (json == null || json.isEmpty()) {
return new ArrayList<EditLogFile>();
}
// get the list of strings from the http response
TypeReference<List<String>> type = new TypeReference<List<String>>() {
};
List<String> logFilesDesc = mapper.readValue(json, type);
// we need to convert the list of strings into edit log files
List<EditLogFile> logFiles = new ArrayList<EditLogFile>();
for (String lf : logFilesDesc) {
logFiles.add(new EditLogFile(lf));
}
return logFiles;
} | java | {
"resource": ""
} |
q161657 | JournalNodeJournalSyncer.isLocalIpAddress | train | private boolean isLocalIpAddress(InetAddress addr) {
if (addr.isAnyLocalAddress() || addr.isLoopbackAddress())
return true;
try {
return NetworkInterface.getByInetAddress(addr) != null;
} catch (SocketException e) {
return false;
}
} | java | {
"resource": ""
} |
q161658 | QueueManager.refreshAcls | train | synchronized void refreshAcls(Configuration conf) throws IOException {
try {
HashMap<String, AccessControlList> newAclsMap =
getQueueAcls(conf);
aclsMap = newAclsMap;
} catch (Throwable t) {
String exceptionString = StringUtils.stringifyException(t);
LOG.warn("Queue ACLs could not be refreshed because there was an " +
"exception in parsing the configuration: "+ exceptionString +
". Existing ACLs are retained.");
throw new IOException(exceptionString);
}
} | java | {
"resource": ""
} |
q161659 | SocketCache.get | train | public Socket get(SocketAddress remote) {
synchronized(multimap) {
List<Socket> sockList = multimap.get(remote);
if (sockList == null) {
return null;
}
Iterator<Socket> iter = sockList.iterator();
while (iter.hasNext()) {
Socket candidate = iter.next();
iter.remove();
if (!candidate.isClosed()) {
return candidate;
}
}
}
return null;
} | java | {
"resource": ""
} |
q161660 | SocketCache.put | train | public void put(Socket sock) {
Preconditions.checkNotNull(sock);
SocketAddress remoteAddr = sock.getRemoteSocketAddress();
if (remoteAddr == null) {
LOG.warn("Cannot cache (unconnected) socket with no remote address: " +
sock);
IOUtils.closeSocket(sock);
return;
}
Socket oldestSock = null;
synchronized(multimap) {
if (capacity == multimap.size()) {
oldestSock = evictOldest();
}
multimap.put(remoteAddr, sock);
}
if (oldestSock != null) {
IOUtils.closeSocket(oldestSock);
}
} | java | {
"resource": ""
} |
q161661 | SocketCache.evictOldest | train | private Socket evictOldest() {
Iterator<Entry<SocketAddress, Socket>> iter =
multimap.entries().iterator();
if (!iter.hasNext()) {
throw new IllegalArgumentException("Cannot evict from empty cache!");
}
Entry<SocketAddress, Socket> entry = iter.next();
iter.remove();
return entry.getValue();
} | java | {
"resource": ""
} |
q161662 | SocketCache.clear | train | public void clear() {
List<Socket> socketsToClear = new LinkedList<Socket>();
synchronized(multimap) {
for (Socket sock : multimap.values()) {
socketsToClear.add(sock);
}
multimap.clear();
}
for (Socket sock : socketsToClear) {
IOUtils.closeSocket(sock);
}
} | java | {
"resource": ""
} |
q161663 | PersistentLongFile.writeFile | train | public static void writeFile(File file, long val) throws IOException {
AtomicFileOutputStream fos = new AtomicFileOutputStream(file);
try {
fos.write(String.valueOf(val).getBytes(Charsets.UTF_8));
fos.write('\n');
fos.close();
fos = null;
} finally {
if (fos != null) {
fos.abort();
}
}
} | java | {
"resource": ""
} |
q161664 | NodeBase.normalize | train | static public String normalize(String path) {
if (path == null || path.length() == 0) return ROOT;
if (path.charAt(0) != PATH_SEPARATOR) {
throw new IllegalArgumentException(
"Network Location path does not start with "
+PATH_SEPARATOR_STR+ ": "+path);
}
int len = path.length();
if (path.charAt(len-1) == PATH_SEPARATOR) {
return path.substring(0, len-1);
}
return path;
} | java | {
"resource": ""
} |
q161665 | ClusterManager.recoverClusterManagerFromDisk | train | private void recoverClusterManagerFromDisk(HostsFileReader hostsReader)
throws IOException {
LOG.info("Restoring state from " +
new java.io.File(conf.getCMStateFile()).getAbsolutePath());
// This will prevent the expireNodes and expireSessions threads from
// expiring the nodes and sessions respectively
safeMode = true;
LOG.info("Safe mode is now: " + (this.safeMode ? "ON" : "OFF"));
CoronaSerializer coronaSerializer = new CoronaSerializer(conf);
// Expecting the START_OBJECT token for ClusterManager
coronaSerializer.readStartObjectToken("ClusterManager");
coronaSerializer.readField("startTime");
startTime = coronaSerializer.readValueAs(Long.class);
coronaSerializer.readField("nodeManager");
nodeManager = new NodeManager(this, hostsReader, coronaSerializer);
nodeManager.setConf(conf);
coronaSerializer.readField("sessionManager");
sessionManager = new SessionManager(this, coronaSerializer);
coronaSerializer.readField("sessionNotifier");
sessionNotifier = new SessionNotifier(sessionManager, this, metrics,
coronaSerializer);
// Expecting the END_OBJECT token for ClusterManager
coronaSerializer.readEndObjectToken("ClusterManager");
lastRestartTime = clock.getTime();
} | java | {
"resource": ""
} |
q161666 | ClusterManager.initLegalTypes | train | protected void initLegalTypes() {
Map<Integer, Map<ResourceType, Integer>> cpuToResourcePartitioning =
conf.getCpuToResourcePartitioning();
for (Map.Entry<Integer, Map<ResourceType, Integer>> entry :
cpuToResourcePartitioning.entrySet()) {
for (ResourceType type : entry.getValue().keySet()) {
legalTypeSet.add(type);
}
}
legalTypeSet = Collections.unmodifiableSet(legalTypeSet);
} | java | {
"resource": ""
} |
q161667 | ClusterManager.checkResourceRequestType | train | protected boolean checkResourceRequestType(
List<ResourceRequest> requestList) {
for (ResourceRequest req: requestList) {
if (!legalTypeSet.contains(req.type)) {
return false;
}
}
return true;
} | java | {
"resource": ""
} |
q161668 | ClusterManager.checkResourceRequestLimit | train | protected void checkResourceRequestLimit(
List<ResourceRequest> requestList, String handle)
throws InvalidSessionHandle {
ConfigManager configManager = getScheduler().getConfigManager();
Session session = sessionManager.getSession(handle);
PoolInfo poolInfo = session.getPoolInfo();
// Only check the resource requests if this pool is configured to not
// accept more than a fixed number of requests at the same time
if (!configManager.useRequestMax(poolInfo)) {
return;
}
// Count the resources by type
ResourceTypeCounter resourceTypeCounter = new ResourceTypeCounter();
for (ResourceRequest req : requestList) {
resourceTypeCounter.incr(req.type);
}
// No resource type request should exceed the maximum
for (ResourceType resourceType : ResourceType.values()) {
if (configManager.getPoolMaximum(poolInfo, resourceType) <
resourceTypeCounter.getCount(resourceType)) {
String failureMessage =
"Session " + handle + " requested " +
resourceTypeCounter.getCount(resourceType) +
" resources for resource type " +
resourceType + " but was only allowed " +
configManager.getPoolMaximum(poolInfo, resourceType) + ", " +
"so failing the job";
LOG.error(failureMessage);
throw new InvalidSessionHandle(failureMessage);
}
}
} | java | {
"resource": ""
} |
q161669 | ClusterManager.setSafeMode | train | @Override
public synchronized boolean setSafeMode(boolean safeMode) {
/**
* If we are switching off the safe mode, so we need to reset the last
* heartbeat timestamp for each of the sessions and nodes.
*/
if (safeMode == false) {
LOG.info("Resetting the heartbeat times for all sessions");
sessionManager.resetSessionsLastHeartbeatTime();
LOG.info("Resetting the heartbeat times for all nodes");
nodeManager.resetNodesLastHeartbeatTime();
/**
* If we are setting the safe mode to false, we should first set it
* in-memory, before we set it at the CPJT.
*/
this.safeMode = false;
}
try {
ClusterManagerAvailabilityChecker.getPJTClient(conf).
setClusterManagerSafeModeFlag(safeMode);
} catch (IOException e) {
LOG.info("Exception while setting the safe mode flag in ProxyJobTracker: "
+ e.getMessage());
return false;
} catch (TException e) {
LOG.info("Exception while setting the safe mode flag in ProxyJobTracker: "
+ e.getMessage());
return false;
}
this.safeMode = safeMode;
LOG.info("Flag successfully set in ProxyJobTracker");
LOG.info("Safe mode is now: " + (this.safeMode ? "ON" : "OFF"));
return true;
} | java | {
"resource": ""
} |
q161670 | ClusterManager.persistState | train | @Override
public boolean persistState() {
if (!safeMode) {
LOG.info(
"Cannot persist state because ClusterManager is not in Safe Mode");
return false;
}
try {
JsonGenerator jsonGenerator = CoronaSerializer.createJsonGenerator(conf);
jsonGenerator.writeStartObject();
jsonGenerator.writeFieldName("startTime");
jsonGenerator.writeNumber(startTime);
jsonGenerator.writeFieldName("nodeManager");
nodeManager.write(jsonGenerator);
jsonGenerator.writeFieldName("sessionManager");
sessionManager.write(jsonGenerator);
jsonGenerator.writeFieldName("sessionNotifier");
sessionNotifier.write(jsonGenerator);
jsonGenerator.writeEndObject();
jsonGenerator.close();
} catch (IOException e) {
LOG.info("Could not persist the state: ", e);
return false;
}
return true;
} | java | {
"resource": ""
} |
q161671 | ClusterManager.nodeTimeout | train | public void nodeTimeout(String nodeName) {
if (nodeRestarter != null) {
nodeRestarter.delete(nodeName);
}
Set<String> sessions = nodeManager.getNodeSessions(nodeName);
Set<ClusterNode.GrantId> grantsToRevoke = nodeManager.deleteNode(nodeName);
if (grantsToRevoke == null) {
return;
}
handleRevokedGrants(nodeName, grantsToRevoke);
handleDeadNode(nodeName, sessions);
scheduler.notifyScheduler();
} | java | {
"resource": ""
} |
q161672 | ClusterManager.nodeAppRemoved | train | public void nodeAppRemoved(String nodeName, ResourceType type) {
Set<String> sessions = nodeManager.getNodeSessions(nodeName);
Set<ClusterNode.GrantId> grantsToRevoke =
nodeManager.deleteAppFromNode(nodeName, type);
if (grantsToRevoke == null) {
return;
}
Set<String> affectedSessions = new HashSet<String>();
for (String sessionHandle : sessions) {
try {
if (sessionManager.getSession(sessionHandle).
getTypes().contains(type)) {
affectedSessions.add(sessionHandle);
}
} catch (InvalidSessionHandle ex) {
// ignore
LOG.warn("Found invalid session: " + sessionHandle
+ " while timing out node: " + nodeName);
}
}
handleDeadNode(nodeName, affectedSessions);
handleRevokedGrants(nodeName, grantsToRevoke);
scheduler.notifyScheduler();
} | java | {
"resource": ""
} |
q161673 | ClusterManager.handleRevokedGrants | train | private void handleRevokedGrants(
String nodeName, Set<ClusterNode.GrantId> grantsToRevoke) {
for (ClusterNode.GrantId grantId: grantsToRevoke) {
String sessionHandle = grantId.getSessionId();
try {
sessionManager.revokeResource(sessionHandle,
Collections.singletonList(grantId.getRequestId()));
} catch (InvalidSessionHandle e) {
// ignore
LOG.warn("Found invalid session: " + sessionHandle +
" while timing out node: " + nodeName);
}
}
} | java | {
"resource": ""
} |
q161674 | ClusterManager.handleDeadNode | train | private void handleDeadNode(String nodeName, Set<String> sessions) {
LOG.info("Notify sessions: " + sessions + " about dead node " + nodeName);
for (String session : sessions) {
sessionNotifier.notifyDeadNode(session, nodeName);
}
} | java | {
"resource": ""
} |
q161675 | DataXceiver.updateCurrentThreadName | train | private void updateCurrentThreadName(String status) {
StringBuilder sb = new StringBuilder();
sb.append("DataXceiver for client ");
InetAddress ia;
if (s != null && (ia = s.getInetAddress()) != null) {
sb.append(ia.toString());
} else {
sb.append("unknown");
}
if (status != null) {
sb.append(" [").append(status).append("]");
}
Thread.currentThread().setName(sb.toString());
} | java | {
"resource": ""
} |
q161676 | DataXceiver.readMetadata | train | void readMetadata(DataInputStream in, VersionAndOpcode versionAndOpcode)
throws IOException {
ReadMetadataHeader readMetadataHeader =
new ReadMetadataHeader(versionAndOpcode);
readMetadataHeader.readFields(in);
final int namespaceId = readMetadataHeader.getNamespaceId();
Block block = new Block(readMetadataHeader.getBlockId(), 0,
readMetadataHeader.getGenStamp());
ReplicaToRead rtr;
if ((rtr = datanode.data.getReplicaToRead(namespaceId, block)) == null
|| rtr.isInlineChecksum()) {
throw new IOException(
"Read metadata from inline checksum file is not supported");
}
DataOutputStream out = null;
try {
updateCurrentThreadName("reading metadata for block " + block);
out = new DataOutputStream(
NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
byte[] buf = BlockWithChecksumFileReader.getMetaData(datanode.data,
namespaceId, block);
out.writeByte(DataTransferProtocol.OP_STATUS_SUCCESS);
out.writeInt(buf.length);
out.write(buf);
//last DATA_CHUNK
out.writeInt(0);
} finally {
IOUtils.closeStream(out);
}
} | java | {
"resource": ""
} |
q161677 | DataXceiver.getBlockCrc | train | void getBlockCrc(DataInputStream in, VersionAndOpcode versionAndOpcode)
throws IOException {
// header
BlockChecksumHeader blockChecksumHeader =
new BlockChecksumHeader(versionAndOpcode);
blockChecksumHeader.readFields(in);
final int namespaceId = blockChecksumHeader.getNamespaceId();
final Block block = new Block(blockChecksumHeader.getBlockId(), 0,
blockChecksumHeader.getGenStamp());
DataOutputStream out = null;
ReplicaToRead ri = datanode.data.getReplicaToRead(namespaceId, block);
if (ri == null) {
throw new IOException("Unknown block");
}
updateCurrentThreadName("getting CRC checksum for block " + block);
try {
//write reply
out = new DataOutputStream(
NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
int blockCrc;
if (ri.hasBlockCrcInfo()) {
// There is actually a short window that the block is reopened
// and we got exception when call getBlockCrc but it's OK. It's
// only happens for append(). So far we don't optimize for this
// use case. We can do it later when necessary.
//
blockCrc = ri.getBlockCrc();
} else {
try {
if (ri.isInlineChecksum()) {
blockCrc = BlockInlineChecksumReader.getBlockCrc(datanode, ri,
namespaceId, block);
} else {
blockCrc = BlockWithChecksumFileReader.getBlockCrc(datanode, ri,
namespaceId, block);
}
} catch (IOException ioe) {
LOG.warn("Exception when getting Block CRC", ioe);
out.writeShort(DataTransferProtocol.OP_STATUS_ERROR);
out.flush();
throw ioe;
}
}
out.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS);
out.writeLong(blockCrc);
out.flush();
} finally {
IOUtils.closeStream(out);
}
} | java | {
"resource": ""
} |
q161678 | DataXceiver.copyBlock | train | private void copyBlock(DataInputStream in,
VersionAndOpcode versionAndOpcode) throws IOException {
// Read in the header
CopyBlockHeader copyBlockHeader = new CopyBlockHeader(versionAndOpcode);
copyBlockHeader.readFields(in);
long startTime = System.currentTimeMillis();
int namespaceId = copyBlockHeader.getNamespaceId();
long blockId = copyBlockHeader.getBlockId();
long genStamp = copyBlockHeader.getGenStamp();
Block block = new Block(blockId, 0, genStamp);
if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start
LOG.info("Not able to copy block " + blockId + " to "
+ s.getRemoteSocketAddress() + " because threads quota is exceeded.");
return;
}
BlockSender blockSender = null;
DataOutputStream reply = null;
boolean isOpSuccess = true;
updateCurrentThreadName("Copying block " + block);
try {
// check if the block exists or not
blockSender = new BlockSender(namespaceId, block, 0, -1, false, false, false,
false,
versionAndOpcode.getDataTransferVersion() >=
DataTransferProtocol.PACKET_INCLUDE_VERSION_VERSION, true,
datanode, null);
// set up response stream
OutputStream baseStream = NetUtils.getOutputStream(
s, datanode.socketWriteTimeout);
reply = new DataOutputStream(new BufferedOutputStream(
baseStream, SMALL_BUFFER_SIZE));
// send block content to the target
long read = blockSender.sendBlock(reply, baseStream,
dataXceiverServer.balanceThrottler);
long readDuration = System.currentTimeMillis() - startTime;
datanode.myMetrics.bytesReadLatency.inc(readDuration);
datanode.myMetrics.bytesRead.inc((int) read);
if (read > KB_RIGHT_SHIFT_MIN) {
datanode.myMetrics.bytesReadRate.inc((int) (read >> KB_RIGHT_SHIFT_BITS),
readDuration);
}
datanode.myMetrics.blocksRead.inc();
LOG.info("Copied block " + block + " to " + s.getRemoteSocketAddress());
} catch (IOException ioe) {
isOpSuccess = false;
throw ioe;
} finally {
dataXceiverServer.balanceThrottler.release();
if (isOpSuccess) {
try {
// send one last byte to indicate that the resource is cleaned.
reply.writeChar('d');
} catch (IOException ignored) {
}
}
IOUtils.closeStream(reply);
IOUtils.closeStream(blockSender);
}
} | java | {
"resource": ""
} |
q161679 | DataXceiver.sendResponse | train | private void sendResponse(Socket s, short opStatus, long timeout)
throws IOException {
DataOutputStream reply =
new DataOutputStream(NetUtils.getOutputStream(s, timeout));
reply.writeShort(opStatus);
reply.flush();
} | java | {
"resource": ""
} |
q161680 | JsonObjectMapperParser.getNext | train | public T getNext() throws IOException {
try {
return mapper.readValue(jsonParser, clazz);
} catch (EOFException e) {
return null;
}
} | java | {
"resource": ""
} |
q161681 | DBUtils.runInsertSelect | train | public static List<List<Object>> runInsertSelect(
DBConnectionFactory connectionFactory, String sql,
List<Object> sqlParams, boolean isWrite, int numRetries,
int retryMaxInternalSec, boolean insert, boolean getGeneratedKeys)
throws IOException {
int waitMS = 3000; // wait for at least 3s before next retry.
for (int i = 0; i < numRetries; ++i) {
Connection conn = null;
ResultSet generatedKeys = null;
PreparedStatement pstmt = null;
String url = null;
try {
try {
url = connectionFactory.getUrl(isWrite);
} catch (IOException ioe) {
LOG.warn("Cannot get DB URL, fall back to the default one", ioe);
url = defaultUrls.get(isWrite);
if (url == null) {
throw ioe;
}
}
LOG.info("Attepting connection with URL " + url);
conn = connectionFactory.getConnection(url);
defaultUrls.put(isWrite, url);
pstmt = getPreparedStatement(conn, sql, sqlParams,
getGeneratedKeys);
if (insert) {
int recordsUpdated = pstmt.executeUpdate();
LOG.info("rows inserted: " + recordsUpdated + " sql: " + sql);
List<List<Object>> results = null;
if (getGeneratedKeys) {
generatedKeys = pstmt.getGeneratedKeys();
results = getResults(generatedKeys);
}
Thread.sleep(connectionFactory.getDBOpsSleepTime() +
rand.nextInt(1000));
return results;
}
else {
generatedKeys = pstmt.executeQuery();
List<List<Object>> results = getResults(generatedKeys);
pstmt.clearBatch();
LOG.info("rows selected: " + results.size() + " sql: " + sql);
Thread.sleep(connectionFactory.getDBOpsSleepTime() +
rand.nextInt(1000));
return results;
}
} catch (Exception e) {
// We should catch a better exception than Exception, but since
// DBConnectionUrlFactory.getUrl() defines throws Exception, it's hard
// for us to figure out the complete set it can throw. We follow
// DBConnectionUrlFactory.getUrl()'s definition to catch Exception.
// It shouldn't be a big problem as after numRetries, we anyway exit.
LOG.info("Exception " + e + ". Will retry " + (numRetries - i)
+ " times.");
// Introducing a random factor to the wait time before another retry.
// The wait time is dependent on # of failures and a random factor.
// At the first time of getting a SQLException, the wait time
// is a random number between [0,300] msec. If the first retry
// still fails, we will wait 300 msec grace period before the 2nd retry.
// Also at the second retry, the waiting window is expanded to 600 msec
// alleviating the request rate from the server. Similarly the 3rd retry
// will wait 600 msec grace period before retry and the waiting window
// is
// expanded to 1200 msec.
waitMS += waitMS;
if (waitMS > retryMaxInternalSec * 1000) {
waitMS = retryMaxInternalSec * 1000;
}
double waitTime = waitMS + waitMS * rand.nextDouble();
if (i + 1 == numRetries) {
LOG.error("Still got Exception after " + numRetries + " retries.",
e);
throw new IOException(e);
}
try {
Thread.sleep((long) waitTime);
} catch (InterruptedException ie) {
throw new IOException(ie);
}
} finally {
DBUtils.close(generatedKeys, new PreparedStatement[]{pstmt}, conn);
}
}
return null;
} | java | {
"resource": ""
} |
q161682 | IntermediateForm.process | train | public void process(DocumentAndOp doc, Analyzer analyzer) throws IOException {
if (doc.getOp() == DocumentAndOp.Op.DELETE
|| doc.getOp() == DocumentAndOp.Op.UPDATE) {
deleteList.add(doc.getTerm());
}
if (doc.getOp() == DocumentAndOp.Op.INSERT
|| doc.getOp() == DocumentAndOp.Op.UPDATE) {
if (writer == null) {
// analyzer is null because we specify an analyzer with addDocument
writer = createWriter();
}
writer.addDocument(doc.getDocument(), analyzer);
numDocs++;
}
} | java | {
"resource": ""
} |
q161683 | CompressedWritable.ensureInflated | train | protected void ensureInflated() {
if (compressed != null) {
try {
ByteArrayInputStream deflated = new ByteArrayInputStream(compressed);
DataInput inflater =
new DataInputStream(new InflaterInputStream(deflated));
readFieldsCompressed(inflater);
compressed = null;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
} | java | {
"resource": ""
} |
q161684 | ProxyUgiManager.getUgiForUser | train | public static synchronized UnixUserGroupInformation getUgiForUser(
String userName) {
long now = System.currentTimeMillis();
long cutoffTime = now - ugiLifetime;
CachedUgi cachedUgi = ugiCache.get(userName);
if (cachedUgi != null && cachedUgi.getInitTime() > cutoffTime)
return cachedUgi.getUgi();
UnixUserGroupInformation ugi = null;
try {
ugi = getUgi(userName);
} catch (IOException e) {
return null;
}
if (ugiCache.size() > CLEANUP_THRESHOLD) { // remove expired ugi's first
for (Iterator<Map.Entry<String, CachedUgi>> it = ugiCache.entrySet()
.iterator(); it.hasNext();) {
Map.Entry<String, CachedUgi> e = it.next();
if (e.getValue().getInitTime() < cutoffTime) {
it.remove();
}
}
}
ugiCache.put(ugi.getUserName(), new CachedUgi(ugi, now));
return ugi;
} | java | {
"resource": ""
} |
q161685 | ProxyUgiManager.saveToCache | train | static synchronized void saveToCache(UnixUserGroupInformation ugi) {
ugiCache.put(ugi.getUserName(), new CachedUgi(ugi, System
.currentTimeMillis()));
} | java | {
"resource": ""
} |
q161686 | ProxyUgiManager.getUgi | train | private static UnixUserGroupInformation getUgi(String userName)
throws IOException {
if (userName == null || !USERNAME_PATTERN.matcher(userName).matches())
throw new IOException("Invalid username=" + userName);
String[] cmd = new String[] { "bash", "-c", "id -Gn '" + userName + "'"};
String[] groups = Shell.execCommand(cmd).split("\\s+");
return new UnixUserGroupInformation(userName, groups);
} | java | {
"resource": ""
} |
q161687 | NodeHealthCheckerService.start | train | void start() {
// if health script path is not configured don't start the thread.
if (!shouldRun(conf)) {
LOG.info("Not starting node health monitor");
return;
}
nodeHealthScriptScheduler = new Timer("NodeHealthMonitor-Timer", true);
// Start the timer task immediately and
// then periodically at interval time.
nodeHealthScriptScheduler.scheduleAtFixedRate(timer, 0, intervalTime);
} | java | {
"resource": ""
} |
q161688 | NodeHealthCheckerService.stop | train | void stop() {
if (!shouldRun(conf)) {
return;
}
nodeHealthScriptScheduler.cancel();
if (shexec != null) {
Process p = shexec.getProcess();
if (p != null) {
p.destroy();
}
}
} | java | {
"resource": ""
} |
q161689 | WritableName.setName | train | public static synchronized void setName(Class writableClass, String name) {
CLASS_TO_NAME.put(writableClass, name);
NAME_TO_CLASS.put(name, writableClass);
} | java | {
"resource": ""
} |
q161690 | ParityFilePair.parityExists | train | public static boolean parityExists(FileStatus src, Codec codec,
Configuration conf) throws IOException {
return ParityFilePair.getParityFile(codec, src, conf) != null;
} | java | {
"resource": ""
} |
q161691 | ClusterManagerAvailabilityChecker.getPJTClient | train | public static CoronaProxyJobTrackerService.Client
getPJTClient(CoronaConf conf) throws IOException {
InetSocketAddress address =
NetUtils.createSocketAddr(conf.getProxyJobTrackerThriftAddress());
TFramedTransport transport = new TFramedTransport(
new TSocket(address.getHostName(), address.getPort()));
CoronaProxyJobTrackerService.Client client =
new CoronaProxyJobTrackerService.Client(new TBinaryProtocol(transport));
try {
transport.open();
} catch (TException e) {
LOG.info("Transport Exception: ", e);
}
return client;
} | java | {
"resource": ""
} |
q161692 | ClusterManagerAvailabilityChecker.waitWhileClusterManagerInSafeMode | train | public static void waitWhileClusterManagerInSafeMode(CoronaConf conf)
throws IOException {
CoronaProxyJobTrackerService.Client pjtClient = getPJTClient(conf);
while (true) {
try {
// If this condition holds true, then two things can happen:
// 1. The CM was never in Safe Mode
// 2. CM was in Safe Mode, just before we made this method call, and
// came out of Safe Mode before the RPC call.
if (!pjtClient.getClusterManagerSafeModeFlag()) {
break;
}
// If the safe mode flag is indeed set
LOG.info("Safe mode flag is set on the ProxyJobTracker");
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new IOException(e);
}
} catch (TException e) {
throw new IOException(
"Could not check the safe mode flag on the ProxyJobTracker", e);
}
}
} | java | {
"resource": ""
} |
q161693 | AvatarShellCommand.noZeroOrOneOrAddress | train | void noZeroOrOneOrAddress(String command) {
if (isZeroCommand || isOneCommand) {
throwException(CMD + command + " (zero|one) should not be specified");
}
if (isAddressCommand) {
throwException(CMD + command + " address should not be specified");
}
} | java | {
"resource": ""
} |
q161694 | AvatarShellCommand.eitherZeroOrOneOrAddress | train | void eitherZeroOrOneOrAddress(String command) {
if (!isAddressCommand && !(isZeroCommand ^ isOneCommand)) {
throwException(CMD + command + ": (zero|one) specified incorrectly");
}
if (isAddressCommand && (isZeroCommand || isOneCommand)) {
throwException(CMD + command + ": cannot specify address with (zero|one)");
}
} | java | {
"resource": ""
} |
q161695 | LightWeightGSet.actualArrayLength | train | private static int actualArrayLength(int recommended) {
if (recommended > MAX_ARRAY_LENGTH) {
return MAX_ARRAY_LENGTH;
} else if (recommended < MIN_ARRAY_LENGTH) {
return MIN_ARRAY_LENGTH;
} else {
final int a = Integer.highestOneBit(recommended);
return a == recommended? a: a << 1;
}
} | java | {
"resource": ""
} |
q161696 | LightWeightGSet.shardIterator | train | @Override
public Iterator<E> shardIterator(int shardId, int numShards) {
if (shardId >= entries.length) {
return null;
}
if (shardId >= numShards) {
throw new IllegalArgumentException(
"Shard id must be less than total shards, shardId: " + shardId
+ ", numShards: " + numShards);
}
return new SetIterator(shardId, numShards);
} | java | {
"resource": ""
} |
q161697 | ConfServlet.getConfFromContext | train | private Configuration getConfFromContext() {
Configuration conf = (Configuration)getServletContext().getAttribute(
HttpServer.CONF_CONTEXT_ATTRIBUTE);
assert conf != null;
return conf;
} | java | {
"resource": ""
} |
q161698 | ConfServlet.writeResponse | train | static void writeResponse(Configuration conf, Writer out, String format)
throws IOException, BadFormatException {
if (FORMAT_JSON.equals(format)) {
Configuration.dumpConfiguration(conf, out);
} else if (FORMAT_XML.equals(format)) {
conf.writeXml(out);
} else {
throw new BadFormatException("Bad format: " + format);
}
} | java | {
"resource": ""
} |
q161699 | FTPFileSystem.create | train | @Override
public FSDataOutputStream create(Path file, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
final FTPClient client = connect();
Path workDir = new Path(client.printWorkingDirectory());
Path absolute = makeAbsolute(workDir, file);
if (exists(client, file)) {
if (overwrite) {
delete(client, file);
} else {
disconnect(client);
throw new IOException("File already exists: " + file);
}
}
Path parent = absolute.getParent();
if (parent == null || !mkdirs(client, parent, FsPermission.getDefault())) {
parent = (parent == null) ? new Path("/") : parent;
disconnect(client);
throw new IOException("create(): Mkdirs failed to create: " + parent);
}
client.allocate(bufferSize);
// Change to parent directory on the server. Only then can we write to the
// file on the server by opening up an OutputStream. As a side effect the
// working directory on the server is changed to the parent directory of the
// file. The FTP client connection is closed when close() is called on the
// FSDataOutputStream.
client.changeWorkingDirectory(parent.toUri().getPath());
FSDataOutputStream fos = new FSDataOutputStream(client.storeFileStream(file
.getName()), statistics) {
@Override
public void close() throws IOException {
super.close();
if (!client.isConnected()) {
throw new FTPException("Client not connected");
}
boolean cmdCompleted = client.completePendingCommand();
disconnect(client);
if (!cmdCompleted) {
throw new FTPException("Could not complete transfer, Reply Code - "
+ client.getReplyCode());
}
}
};
if (!FTPReply.isPositivePreliminary(client.getReplyCode())) {
// The ftpClient is an inconsistent state. Must close the stream
// which in turn will logout and disconnect from FTP server
fos.close();
throw new IOException("Unable to create file: " + file + ", Aborting");
}
return fos;
} | java | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.