index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/Filter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
import org.apache.commons.lang.ArrayUtils;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.extraction.engine.RecordUtil;
import org.apache.hadoop.chukwa.util.RegexUtil.CheckedPatternSyntaxException;
import org.apache.log4j.Logger;
public class Filter {
static Logger log = Logger.getLogger(Filter.class);
private static final String[] SEARCH_TARGS =
{"datatype", "name", "host", "cluster", "content"};
static final String SEPARATOR="&";
private static class SearchRule {
Pattern p;
String targ;
SearchRule(Pattern p, String t) {
this.p = p;
this.targ = t;
}
boolean matches(Chunk chunk) {
if(targ.equals("datatype")) {
return p.matcher(chunk.getDataType()).matches();
} else if(targ.equals("name")) {
return p.matcher(chunk.getStreamName()).matches();
} else if(targ.equals("host")) {
return p.matcher(chunk.getSource()).matches();
} else if(targ.equals("cluster")) {
String cluster = RecordUtil.getClusterName(chunk);
return p.matcher(cluster).matches();
} else if(targ.equals("content")) {
String content = new String(chunk.getData(), Charset.forName("UTF-8"));
return p.matcher(content).matches();
} else if(targ.startsWith("tags.")) {
String tagName = targ.substring("tags.".length());
if (!RegexUtil.isRegex(tagName)) {
log.warn("Error parsing 'tagName' as a regex: "
+ RegexUtil.regexError(tagName));
return false;
}
String tagVal = chunk.getTag(tagName);
if(tagVal == null)
return false;
return p.matcher(tagVal).matches();
} else {
assert false: "unknown target: " +targ;
return false;
}
}
public String toString() {
return targ + "=" +p.toString();
}
}
List<SearchRule> compiledPatterns;
public Filter(String listOfPatterns) throws CheckedPatternSyntaxException {
compiledPatterns = new ArrayList<SearchRule>();
//FIXME: could escape these
String[] patterns = listOfPatterns.split(SEPARATOR);
for(String p: patterns) {
int equalsPos = p.indexOf('=');
if(equalsPos < 0 || equalsPos > (p.length() -2)) {
throw new CheckedPatternSyntaxException(
"pattern must be of form targ=pattern", p, -1);
}
String targ = p.substring(0, equalsPos);
if(!targ.startsWith("tags.") && !ArrayUtils.contains(SEARCH_TARGS, targ)) {
throw new CheckedPatternSyntaxException(
"pattern doesn't start with recognized search target", p, -1);
}
String regex = p.substring(equalsPos+1);
if (!RegexUtil.isRegex(regex)) {
throw new CheckedPatternSyntaxException(RegexUtil.regexException(regex));
}
Pattern pat = Pattern.compile(regex, Pattern.DOTALL);
compiledPatterns.add(new SearchRule(pat, targ));
}
}
public boolean matches(Chunk chunk) {
for(SearchRule r: compiledPatterns) {
if(!r.matches(chunk))
return false;
}
return true;
}
int size() {
return compiledPatterns.size();
}
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(compiledPatterns.get(0));
for(int i=1; i < compiledPatterns.size(); ++i) {
sb.append(" & ");
sb.append(compiledPatterns.get(i));
}
return sb.toString();
}
private static final class MatchAll extends Filter {
public MatchAll() throws CheckedPatternSyntaxException {
super("datatype=.*");
}
public boolean matches(Chunk c) {
return true;
}
public String toString() {
return "ALL";
}
}
public static final Filter ALL = newMatchAll();
private static Filter newMatchAll() {
try {
return new MatchAll();
} catch (CheckedPatternSyntaxException e) {
throw new RuntimeException("Illegal MatchAll regular expression.", e);
}
}
}//end class
| 8,300 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/ChukwaUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.io.File;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.Logger;
/*
* Create a common set of utility classes for code reuse
*/
public class ChukwaUtil {
private static Logger log = Logger.getLogger(ChukwaUtil.class);
public static Configuration readConfiguration() {
Configuration conf = new Configuration();
String chukwaHomeName = System.getenv("CHUKWA_HOME");
if (chukwaHomeName == null) {
chukwaHomeName = "";
}
File chukwaHome = new File(chukwaHomeName).getAbsoluteFile();
log.info("Config - CHUKWA_HOME: [" + chukwaHome.toString() + "]");
String chukwaConfName = System.getProperty("CHUKWA_CONF_DIR");
File chukwaConf;
if (chukwaConfName != null)
chukwaConf = new File(chukwaConfName).getAbsoluteFile();
else
chukwaConf = new File(chukwaHome, "conf");
log.info("Config - CHUKWA_CONF_DIR: [" + chukwaConf.toString() + "]");
File agentConf = new File(chukwaConf, "chukwa-agent-conf.xml");
conf.addResource(new Path(agentConf.getAbsolutePath()));
if (conf.get("chukwaAgent.checkpoint.dir") == null)
conf.set("chukwaAgent.checkpoint.dir",
new File(chukwaHome, "var").getAbsolutePath());
conf.set("chukwaAgent.initial_adaptors", new File(chukwaConf,
"initial_adaptors").getAbsolutePath());
try {
Configuration chukwaAgentConf = new Configuration(false);
chukwaAgentConf.addResource(new Path(agentConf.getAbsolutePath()));
} catch (Exception e) {
e.printStackTrace();
}
return conf;
}
}
| 8,301 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/DatabaseWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.sql.SQLException;
import java.sql.Connection;
import java.sql.Statement;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.util.List;
import java.text.SimpleDateFormat;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public class DatabaseWriter {
private static Log log = LogFactory.getLog(DatabaseWriter.class);
private Connection conn = null;
private Statement stmt = null;
private PreparedStatement pstmt = null;
private ResultSet rs = null;
public DatabaseWriter(String host, String user, String password) {
String jdbc_url = "jdbc:mysql://" + host + "/";
if (user != null) {
jdbc_url = jdbc_url + "?user=" + user;
if (password != null) {
jdbc_url = jdbc_url + "&password=" + password;
}
}
try {
// The newInstance() call is a work around for some
// broken Java implementations
DriverManagerUtil.loadDriver().newInstance();
} catch (Exception ex) {
// handle the error
log.error(ex, ex);
}
try {
conn = org.apache.hadoop.chukwa.util.DriverManagerUtil.getConnection(jdbc_url);
log.debug("Initialized JDBC URL: " + jdbc_url);
} catch (SQLException ex) {
log.error(ex, ex);
}
}
public DatabaseWriter(String cluster) {
ClusterConfig cc = new ClusterConfig();
String jdbc_url = cc.getURL(cluster);
try {
// The newInstance() call is a work around for some
// broken Java implementations
DriverManagerUtil.loadDriver().newInstance();
} catch (Exception ex) {
// handle the error
log.error(ex, ex);
}
try {
conn = org.apache.hadoop.chukwa.util.DriverManagerUtil.getConnection(jdbc_url);
log.debug("Initialized JDBC URL: " + jdbc_url);
} catch (SQLException ex) {
log.error(ex, ex);
}
}
public void execute(String query) throws SQLException {
try {
stmt = conn.createStatement();
stmt.execute(query);
} catch (SQLException ex) {
// handle any errors
log.error(ex, ex);
log.error("SQL Statement:" + query);
log.error("SQLException: " + ex.getMessage());
log.error("SQLState: " + ex.getSQLState());
log.error("VendorError: " + ex.getErrorCode());
throw ex;
} finally {
if (stmt != null) {
try {
stmt.close();
} catch (SQLException sqlEx) {
// ignore
log.debug(ExceptionUtil.getStackTrace(sqlEx));
}
stmt = null;
}
}
}
public Connection getConnection() {
return conn;
}
public ResultSet query(String query, List<Object> parameters) throws SQLException {
try {
pstmt = conn.prepareStatement(query);
for(int i=0;i<parameters.size();i++) {
int index = i+1;
pstmt.setObject(index,parameters.get(i));
}
rs = pstmt.executeQuery();
} catch (SQLException ex) {
// handle any errors
//only log at debug level because caller will still see exception
log.debug(ex, ex);
log.debug("SQL Statement:" + query);
log.debug("SQLException: " + ex.getMessage());
log.debug("SQLState: " + ex.getSQLState());
log.debug("VendorError: " + ex.getErrorCode());
throw ex;
}
return rs;
}
public ResultSet query(String query) throws SQLException {
try {
stmt = conn.createStatement();
rs = stmt.executeQuery(query);
} catch (SQLException ex) {
// handle any errors
//only log at debug level because caller will still see exception
log.debug(ex, ex);
log.debug("SQL Statement:" + query);
log.debug("SQLException: " + ex.getMessage());
log.debug("SQLState: " + ex.getSQLState());
log.debug("VendorError: " + ex.getErrorCode());
throw ex;
}
return rs;
}
public void close() {
// it is a good idea to release
// resources in a finally{} block
// in reverse-order of their creation
// if they are no-longer needed
if (rs != null) {
try {
rs.close();
} catch (SQLException sqlEx) {
// ignore
log.debug(ExceptionUtil.getStackTrace(sqlEx));
}
rs = null;
}
if (stmt != null) {
try {
stmt.close();
} catch (SQLException sqlEx) {
// ignore
log.debug(ExceptionUtil.getStackTrace(sqlEx));
}
stmt = null;
}
if (conn != null) {
try {
conn.close();
} catch (SQLException sqlEx) {
// ignore
log.debug(ExceptionUtil.getStackTrace(sqlEx));
}
conn = null;
}
}
public static String formatTimeStamp(long timestamp) {
SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
String format = formatter.format(timestamp);
return format;
}
}
| 8,302 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datastore/ChukwaHBaseStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datastore;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.TimeZone;
import java.util.UUID;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.chukwa.hicc.bean.Chart;
import org.apache.hadoop.chukwa.hicc.bean.Dashboard;
import org.apache.hadoop.chukwa.hicc.bean.HeatMapPoint;
import org.apache.hadoop.chukwa.hicc.bean.Heatmap;
import org.apache.hadoop.chukwa.hicc.bean.LineOptions;
import org.apache.hadoop.chukwa.hicc.bean.Series;
import org.apache.hadoop.chukwa.hicc.bean.SeriesMetaData;
import org.apache.hadoop.chukwa.hicc.bean.Widget;
import org.apache.hadoop.chukwa.hicc.rest.Examples;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.chukwa.util.HBaseUtil;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.log4j.Logger;
import org.json.simple.JSONObject;
import org.json.simple.JSONValue;
import com.google.gson.Gson;
public class ChukwaHBaseStore {
static Logger LOG = Logger.getLogger(ChukwaHBaseStore.class);
static int MINUTES_IN_HOUR = 60;
static double RESOLUTION = 360;
static int MINUTE = 60000; //60 milliseconds
final static int SECOND = (int) TimeUnit.SECONDS.toMillis(1);
private final static Charset UTF8 = Charset.forName("UTF-8");
final static byte[] COLUMN_FAMILY = "t".getBytes(UTF8);
final static byte[] ANNOTATION_FAMILY = "a".getBytes(UTF8);
final static byte[] KEY_NAMES = "k".getBytes(UTF8);
final static byte[] CHART_TYPE = "chart_meta".getBytes(UTF8);
final static byte[] CHART_FAMILY = "c".getBytes(UTF8);
final static byte[] COMMON_FAMILY = "c".getBytes(UTF8);
final static byte[] WIDGET_TYPE = "widget_meta".getBytes(UTF8);
final static byte[] DASHBOARD_TYPE = "dashboard_meta".getBytes(UTF8);
private static final String CHUKWA = "chukwa";
private static final String CHUKWA_META = "chukwa_meta";
private static long MILLISECONDS_IN_DAY = 86400000L;
private static Connection connection = null;
public ChukwaHBaseStore() {
super();
}
public static synchronized void getHBaseConnection() throws IOException {
if (connection == null || connection.isClosed()) {
connection = ConnectionFactory.createConnection();
}
}
public static synchronized void closeHBase() {
try {
if(connection != null) {
connection.close();
}
} catch(IOException e) {
LOG.warn("Unable to release HBase connection.");
}
}
/**
* Scan chukwa table for a particular metric group and metric name based on
* time ranges.
*
* @param metricGroup metric group name
* @param metric metric name
* @param source source of the metric
* @param startTime start time
* @param endTime end time
* @return Series object
*/
public static Series getSeries(String metricGroup, String metric,
String source, long startTime, long endTime) {
String fullMetricName = new StringBuilder(metricGroup).append(".")
.append(metric).toString();
return getSeries(fullMetricName, source, startTime, endTime);
}
/**
* Scan chukwa table for a full metric name based on time ranges.
*
* @param metric metric group name and metric name combined
* @param source source of the metric
* @param startTime start time
* @param endTime end time
* @return Series object
*/
public static synchronized Series getSeries(String metric, String source, long startTime,
long endTime) {
String seriesName = new StringBuilder(metric).append(":").append(source).toString();
Series series = new Series(seriesName);
try {
// Swap start and end if the values are inverted.
if (startTime > endTime) {
long temp = endTime;
startTime = endTime;
endTime = temp;
}
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA));
Scan scan = new Scan();
Calendar c = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
c.setTimeInMillis(startTime);
int startDay = c.get(Calendar.DAY_OF_YEAR);
c.setTimeInMillis(endTime);
int endDay = c.get(Calendar.DAY_OF_YEAR);
long currentDay = startTime;
for (int i = startDay; i <= endDay; i++) {
byte[] rowKey = HBaseUtil.buildKey(currentDay, metric, source);
scan.addFamily(COLUMN_FAMILY);
scan.setStartRow(rowKey);
scan.setStopRow(rowKey);
scan.setTimeRange(startTime, endTime);
scan.setBatch(10000);
ResultScanner results = table.getScanner(scan);
Iterator<Result> it = results.iterator();
while (it.hasNext()) {
Result result = it.next();
for (Cell kv : result.rawCells()) {
byte[] key = CellUtil.cloneQualifier(kv);
long timestamp = ByteBuffer.wrap(key).getLong();
double value = Double
.parseDouble(new String(CellUtil.cloneValue(kv), UTF8));
series.add(timestamp, value);
}
}
results.close();
currentDay = currentDay + (i * MILLISECONDS_IN_DAY);
}
table.close();
} catch (IOException e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
}
return series;
}
public static Set<String> getMetricNames(String metricGroup) {
Set<String> familyNames = new CopyOnWriteArraySet<String>();
try {
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA_META));
Get get = new Get(metricGroup.getBytes(UTF8));
Result result = table.get(get);
for (Cell kv : result.rawCells()) {
JSONObject json = (JSONObject) JSONValue.parse(new String(CellUtil.cloneValue(kv), UTF8));
if (json.get("type").equals("metric")) {
familyNames.add(new String(CellUtil.cloneQualifier(kv), UTF8));
}
}
table.close();
} catch (Exception e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
}
return familyNames;
}
public static Set<String> getMetricGroups() {
Set<String> metricGroups = new CopyOnWriteArraySet<String>();
try {
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA_META));
Scan scan = new Scan();
scan.addFamily(KEY_NAMES);
ResultScanner rs = table.getScanner(scan);
Iterator<Result> it = rs.iterator();
while (it.hasNext()) {
Result result = it.next();
metricGroups.add(new String(result.getRow(), UTF8));
}
table.close();
} catch (Exception e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
}
return metricGroups;
}
public static Set<String> getSourceNames(String dataType) {
Set<String> pk = new HashSet<String>();
try {
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA_META));
Scan scan = new Scan();
scan.addFamily(KEY_NAMES);
ResultScanner rs = table.getScanner(scan);
Iterator<Result> it = rs.iterator();
while (it.hasNext()) {
Result result = it.next();
for (Cell cell : result.rawCells()) {
JSONObject json = (JSONObject) JSONValue.parse(new String(CellUtil.cloneValue(cell), UTF8));
if (json!=null && json.get("type")!=null && json.get("type").equals("source")) {
pk.add(new String(CellUtil.cloneQualifier(cell), UTF8));
}
}
}
rs.close();
table.close();
} catch (Exception e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
}
return pk;
}
public static Heatmap getHeatmap(String metricGroup, String metric,
long startTime, long endTime, double max, double scale, int width, int height) {
Heatmap heatmap = new Heatmap();
Set<String> sources = getSourceNames(metricGroup);
Set<String> metrics = getMetricNames(metricGroup);
List<Get> series = new ArrayList<Get>();
String fullName = new StringBuilder(metricGroup).append(".").append(metric).toString();
try {
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA));
Calendar c = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
c.setTimeInMillis(startTime);
int startDay = c.get(Calendar.DAY_OF_YEAR);
c.setTimeInMillis(endTime);
int endDay = c.get(Calendar.DAY_OF_YEAR);
long currentDay = startTime;
for (int i = startDay; i <= endDay; i++) {
for (String m : metrics) {
if (m.startsWith(fullName)) {
for (String source : sources) {
byte[] rowKey = HBaseUtil.buildKey(currentDay, m, source);
Get serie = new Get(rowKey);
serie.addFamily(COLUMN_FAMILY);
serie.setTimeRange(startTime, endTime);
series.add(serie);
}
}
}
currentDay = currentDay + (i * MILLISECONDS_IN_DAY);
}
long timeRange = (endTime - startTime);
Result[] rs = table.get(series);
int index = 1;
// Series display in y axis
int y = 0;
HashMap<String, Integer> keyMap = new HashMap<String, Integer>();
for (Result result : rs) {
for(Cell cell : result.rawCells()) {
byte[] dest = new byte[5];
System.arraycopy(CellUtil.cloneRow(cell), 3, dest, 0, 5);
String source = new String(dest, UTF8);
long time = cell.getTimestamp();
// Time display in x axis
long delta = time - startTime;
double f = (double) delta / timeRange;
f = (double) f * width;
int x = (int) Math.round(f);
if (keyMap.containsKey(source)) {
y = keyMap.get(source);
} else {
keyMap.put(source, Integer.valueOf(index));
y = index;
index++;
}
double v = Double.parseDouble(new String(CellUtil.cloneValue(cell), UTF8));
heatmap.put(x, y, v);
if (v > max) {
max = v;
}
}
}
table.close();
int radius = height / index;
// Usually scale max from 0 to 100 for visualization
heatmap.putMax(scale);
for (HeatMapPoint point : heatmap.getHeatmap()) {
double round = point.count / max * scale;
round = Math.round(round * 100.0) / 100.0;
point.put(point.x, point.y * radius, round);
}
heatmap.putRadius(radius);
heatmap.putSeries(index -1);
} catch (IOException e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
}
return heatmap;
}
/**
* Scan chukwa table and find cluster tag from annotation column family from a
* range of entries.
*
* @param startTime start time in epoch
* @param endTime start time in epoch
* @return Set of cluster names
*/
public static Set<String> getClusterNames(long startTime, long endTime) {
Set<String> clusters = new HashSet<String>();
try {
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA_META));
Scan scan = new Scan();
scan.addFamily(KEY_NAMES);
ResultScanner rs = table.getScanner(scan);
Iterator<Result> it = rs.iterator();
while (it.hasNext()) {
Result result = it.next();
for (Cell cell : result.rawCells()) {
JSONObject json = (JSONObject) JSONValue.parse(new String(CellUtil.cloneValue(cell), UTF8));
if (json.get("type").equals("cluster")) {
clusters.add(new String(CellUtil.cloneQualifier(cell), UTF8));
}
}
}
table.close();
} catch (Exception e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
}
return clusters;
}
/**
* Get a chart from HBase by ID.
*
* @param id Chart ID
* @return Chart object
*/
public static Chart getChart(String id) {
Chart chart = null;
try {
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA_META));
Get get = new Get(CHART_TYPE);
Result r = table.get(get);
byte[] value = r.getValue(CHART_FAMILY, id.getBytes(UTF8));
Gson gson = new Gson();
if(value!=null) {
chart = gson.fromJson(new String(value, UTF8), Chart.class);
}
table.close();
} catch (Exception e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
}
return chart;
}
/**
* Update a chart in HBase by ID.
*
* @param id Chart ID
* @param chart Chart Object
*/
public static void putChart(String id, Chart chart) {
try {
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA_META));
Put put = new Put(CHART_TYPE);
Gson gson = new Gson();
String buffer = gson.toJson(chart);
put.addColumn(CHART_FAMILY, id.getBytes(UTF8), buffer.getBytes(UTF8));
table.put(put);
table.close();
} catch (Exception e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
}
}
/**
* Create a chart in HBase by specifying parameters.
* @param id is unique chart identifier
* @param title is searchable name of the chart
* @param metrics is list of metric names to render chart
* @param source is data source name
* @param yunitType is y axis unit type
* @return Chart ID
* @throws URISyntaxException if metrics name can not compose valid URL syntax
*/
public static synchronized String createChart(String id,
String title, String[] metrics, String source, String yunitType) throws URISyntaxException {
Chart chart = new Chart(id);
chart.setYUnitType(yunitType);
chart.setTitle(title);
ArrayList<SeriesMetaData> series = new ArrayList<SeriesMetaData>();
for(String metric : metrics) {
SeriesMetaData s = new SeriesMetaData();
s.setLabel(metric + "/" + source);
s.setUrl(new URI("/hicc/v1/metrics/series/" + metric + "/"
+ source));
LineOptions l = new LineOptions();
s.setLineOptions(l);
series.add(s);
}
chart.setSeries(series);
return createChart(chart);
}
/**
* Create a chart in HBase by specifying parameters.
* @param id is unique chart identifier
* @param title is searchable name of the chart
* @param metrics is list of metric names to render ring chart
* @param source is data source name
* @param suffixLabel is text label to append to metric values
* @param direction sets the threshold to have either upper limit or lower limit
* @return Chart ID
* @throws URISyntaxException if metrics name can not compose valid URL syntax
*/
public static synchronized String createCircle(String id,
String title, String[] metrics, String source, String suffixLabel, String direction) throws URISyntaxException {
Chart chart = new Chart(id);
chart.setSuffixText(suffixLabel);
chart.setTitle(title);
chart.setThreshold(direction);
ArrayList<SeriesMetaData> series = new ArrayList<SeriesMetaData>();
for(String metric : metrics) {
SeriesMetaData s = new SeriesMetaData();
s.setLabel(metric + "/" + source);
s.setUrl(new URI("/hicc/v1/metrics/series/" + metric + "/"
+ source));
series.add(s);
}
chart.setSeries(series);
return createChart(chart);
}
/**
* Create a tile in HBase by specifying parameters.
* @param id is unique tile identifier
* @param title is searchable name of the tile widget
* @param bannerText is description of the tile widget
* @param suffixLabel is text label to append to metric values
* @param metrics is list of metric names to render tile widget
* @param source is data source name
* @param icon is emoji symbol to render beside tile widget
* @return Widget ID
* @throws URISyntaxException if metrics name can not compose valid URL syntax
*/
public static synchronized String createTile(String id, String title,
String bannerText, String suffixLabel, String[] metrics, String source,
String icon) throws URISyntaxException {
Chart chart = new Chart(id);
chart.setTitle(title);
chart.setBannerText(bannerText);
chart.setSuffixText(suffixLabel);
chart.setIcon(icon);
List<SeriesMetaData> smd = new ArrayList<SeriesMetaData>();
for (String metric : metrics) {
SeriesMetaData series = new SeriesMetaData();
series.setUrl(new URI("/hicc/v1/metrics/series/" + metric + "/" + source));
smd.add(series);
}
chart.setSeries(smd);
return createChart(chart);
}
/**
* Create a chart in HBase.
*
* @param chart is a chukwa Chart object
* @return id of newly created chart
*/
public static synchronized String createChart(Chart chart) {
String id = chart.getId();
try {
getHBaseConnection();
if (id != null) {
// Check if there is existing chart with same id.
Chart test = getChart(id);
if (test != null) {
// If id already exists, randomly generate an id.
id = String.valueOf(UUID.randomUUID());
}
} else {
// If id is not provided, randomly generate an id.
id = String.valueOf(UUID.randomUUID());
}
chart.setId(id);
Table table = connection.getTable(TableName.valueOf(CHUKWA_META));
Put put = new Put(CHART_TYPE);
Gson gson = new Gson();
String buffer = gson.toJson(chart);
put.addColumn(CHART_FAMILY, id.getBytes(UTF8), buffer.getBytes(UTF8));
table.put(put);
table.close();
} catch (Exception e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
id = null;
}
return id;
}
/**
* Return data for multiple series of metrics stored in HBase.
*
* @param series is SeriesMetaData object
* @param startTime sets the start time of metrics
* @param endTime sets the end time of metrics
* @return A list of Series meta data
*/
public static synchronized ArrayList<org.apache.hadoop.chukwa.hicc.bean.SeriesMetaData> getChartSeries(ArrayList<org.apache.hadoop.chukwa.hicc.bean.SeriesMetaData> series, long startTime, long endTime) {
ArrayList<org.apache.hadoop.chukwa.hicc.bean.SeriesMetaData> list = new ArrayList<org.apache.hadoop.chukwa.hicc.bean.SeriesMetaData>();
try {
// Swap start and end if the values are inverted.
if (startTime > endTime) {
long temp = endTime;
startTime = endTime;
endTime = temp;
}
// Figure out the time range and determine the best resolution
// to fetch the data
long range = (endTime - startTime)
/ (long) (MINUTES_IN_HOUR * MINUTE);
long sampleRate = 1;
if (range <= 1) {
sampleRate = 5;
} else if(range <= 24) {
sampleRate = 240;
} else if (range <= 720) {
sampleRate = 7200;
} else if(range >= 720) {
sampleRate = 87600;
}
double smoothing = (endTime - startTime)
/ (double) (sampleRate * SECOND ) / (double) RESOLUTION;
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA));
Scan scan = new Scan();
Calendar c = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
c.setTimeInMillis(startTime);
int startDay = c.get(Calendar.DAY_OF_YEAR);
c.setTimeInMillis(endTime);
int endDay = c.get(Calendar.DAY_OF_YEAR);
for (org.apache.hadoop.chukwa.hicc.bean.SeriesMetaData s : series) {
org.apache.hadoop.chukwa.hicc.bean.SeriesMetaData clone = (org.apache.hadoop.chukwa.hicc.bean.SeriesMetaData) s.clone();
long currentDay = startTime;
String[] parts = s.getUrl().toString().split("/");
String metric = parts[5];
String source = parts[6];
ArrayList<ArrayList<Number>> data = new ArrayList<ArrayList<Number>>();
for (int i = startDay; i <= endDay; i++) {
byte[] rowKey = HBaseUtil.buildKey(currentDay, metric, source);
scan.addFamily(COLUMN_FAMILY);
scan.setStartRow(rowKey);
scan.setStopRow(rowKey);
scan.setTimeRange(startTime, endTime);
scan.setBatch(10000);
ResultScanner results = table.getScanner(scan);
Iterator<Result> it = results.iterator();
double filteredValue = 0.0d;
long lastTime = startTime;
long totalElapsedTime = 0;
int initial = 0;
while (it.hasNext()) {
Result result = it.next();
for (Cell kv : result.rawCells()) {
byte[] key = CellUtil.cloneQualifier(kv);
long timestamp = ByteBuffer.wrap(key).getLong();
double value = Double.parseDouble(new String(CellUtil.cloneValue(kv),
UTF8));
if(initial==0) {
filteredValue = value;
}
long elapsedTime = (timestamp - lastTime) / SECOND;
lastTime = timestamp;
// Determine if there is any gap, if there is gap in data, reset
// calculation.
if (elapsedTime > (sampleRate * 5)) {
filteredValue = 0.0d;
} else {
if (smoothing != 0.0d) {
// Apply low pass filter to calculate
filteredValue = filteredValue + (double) ((double) elapsedTime * (double) ((double) (value - filteredValue) / smoothing));
} else {
// Use original value
filteredValue = value;
}
}
totalElapsedTime = totalElapsedTime + elapsedTime;
if (totalElapsedTime >= sampleRate) {
ArrayList<Number> points = new ArrayList<Number>();
points.add(timestamp);
points.add(filteredValue);
data.add(points);
totalElapsedTime = 0;
}
}
initial++;
}
results.close();
currentDay = currentDay + (i * MILLISECONDS_IN_DAY);
}
clone.setData(data);
list.add(clone);
}
table.close();
} catch (IOException|CloneNotSupportedException e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
}
return list;
}
/**
* List widgets stored in HBase.
*
* @param limit sets the number of widgets to return
* @param offset sets the starting point to return widgets
* @return List of Widgets
*/
public static synchronized List<Widget> listWidget(int limit, int offset) {
ArrayList<Widget> list = new ArrayList<Widget>();
try {
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA_META));
Scan scan = new Scan();
scan.setStartRow(WIDGET_TYPE);
scan.setStopRow(WIDGET_TYPE);
ResultScanner rs = table.getScanner(scan);
Iterator<Result> it = rs.iterator();
int c = 0;
while(it.hasNext()) {
Result result = it.next();
for(Cell kv : result.rawCells()) {
if(c > limit) {
break;
}
if(c < offset) {
continue;
}
Gson gson = new Gson();
Widget widget = gson.fromJson(new String(CellUtil.cloneValue(kv), UTF8), Widget.class);
list.add(widget);
c++;
}
}
rs.close();
table.close();
} catch (Exception e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
}
return list;
}
public static synchronized List<String> getData(ArrayList<SeriesMetaData> series, long startTime, long endTime) {
ArrayList<String> data = new ArrayList<String>();
try {
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA));
Scan scan = new Scan();
for(SeriesMetaData s : series) {
String snapshot = "";
String[] parts = s.getUrl().toString().split("/");
String metric = parts[5];
String source = parts[6];
long currentDay = startTime;
byte[] rowKey = HBaseUtil.buildKey(currentDay, metric, source);
scan.addFamily(COLUMN_FAMILY);
scan.setStartRow(rowKey);
scan.setStopRow(rowKey);
scan.setTimeRange(startTime, endTime);
scan.setBatch(10000);
ResultScanner rs = table.getScanner(scan);
Iterator<Result> it = rs.iterator();
while(it.hasNext()) {
Result result = it.next();
for(Cell kv : result.rawCells()) {
snapshot = new String(CellUtil.cloneValue(kv));
if(snapshot.matches("-?\\d+(\\.\\d+)?")) {
int endOffset = snapshot.length();
if(snapshot.length() - snapshot.indexOf(".") > 2) {
endOffset = snapshot.indexOf(".") + 2;
}
snapshot = snapshot.substring(0, endOffset);
}
}
}
data.add(snapshot);
rs.close();
}
table.close();
} catch(Exception e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
}
return data;
}
/**
* Find widget by title prefix in HBase.
*
* @param query is prefix query of widget title.
* @return List of Widgets
*/
public static synchronized List<Widget> searchWidget(String query) {
ArrayList<Widget> list = new ArrayList<Widget>();
try {
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA_META));
Filter filter = new ColumnPrefixFilter(Bytes.toBytes(query));
Scan scan = new Scan();
scan.setStartRow(WIDGET_TYPE);
scan.setStopRow(WIDGET_TYPE);
scan.setFilter(filter);
ResultScanner rs = table.getScanner(scan);
Iterator<Result> it = rs.iterator();
while(it.hasNext()) {
Result result = it.next();
for(Cell kv : result.rawCells()) {
Gson gson = new Gson();
Widget widget = gson.fromJson(new String(CellUtil.cloneValue(kv), UTF8), Widget.class);
list.add(widget);
}
}
rs.close();
table.close();
} catch (Exception e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
}
return list;
}
/**
* View a widget information in HBase.
*
* @param title is title of the widget.
* @return List of Widgets
*/
public static synchronized Widget viewWidget(String title) {
Widget w = null;
try {
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA_META));
Get widget = new Get(WIDGET_TYPE);
widget.addColumn(COMMON_FAMILY, title.getBytes(UTF8));
Result rs = table.get(widget);
byte[] buffer = rs.getValue(COMMON_FAMILY, title.getBytes(UTF8));
Gson gson = new Gson();
w = gson.fromJson(new String(buffer, UTF8), Widget.class);
table.close();
} catch (Exception e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
}
return w;
}
/**
* Create a widget in HBase.
*
* @param widget is chukwa Widget object
* @return true if widget is created
*/
public static synchronized boolean createWidget(Widget widget) {
boolean created = false;
try {
widget.tokenize();
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA_META));
Get widgetTest = new Get(WIDGET_TYPE);
widgetTest.addColumn(COMMON_FAMILY, widget.getTitle().getBytes(UTF8));
if (table.exists(widgetTest)) {
LOG.warn("Widget: " + widget.getTitle() + " already exists.");
created = false;
} else {
Put put = new Put(WIDGET_TYPE);
Gson gson = new Gson();
String buffer = gson.toJson(widget);
put.addColumn(COMMON_FAMILY, widget.getTitle().getBytes(UTF8), buffer.getBytes(UTF8));
table.put(put);
created = true;
}
table.close();
} catch (Exception e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
}
return created;
}
/**
* Update a widget in HBase.
*
* @param title is searchable title in a widget
* @param widget is Chukwa Widget object
* @return true if widget has been updated
*/
public static synchronized boolean updateWidget(String title, Widget widget) {
boolean result = false;
try {
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA_META));
Delete oldWidget = new Delete(WIDGET_TYPE);
oldWidget.addColumn(COMMON_FAMILY, title.getBytes(UTF8));
table.delete(oldWidget);
Put put = new Put(WIDGET_TYPE);
Gson gson = new Gson();
String buffer = gson.toJson(widget);
put.addColumn(COMMON_FAMILY, title.getBytes(UTF8), buffer.getBytes(UTF8));
table.put(put);
table.close();
result = true;
} catch (Exception e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
LOG.error("Error in updating widget, original title: " +
title + " new title:" + widget.getTitle());
}
return result;
}
/**
* Delete a widget in HBase.
*
* @param title is searchable title in a widget
* @return true if widget has been deleted
*/
public static synchronized boolean deleteWidget(String title) {
boolean result = false;
try {
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA_META));
Delete oldWidget = new Delete(WIDGET_TYPE);
oldWidget.addColumn(COMMON_FAMILY, title.getBytes(UTF8));
table.delete(oldWidget);
table.close();
result = true;
} catch (Exception e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
LOG.error("Error in deleting widget: "+ title);
}
return result;
}
public static boolean isDefaultExists() {
boolean exists = false;
try {
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA_META));
Get dashboardTest = new Get(DASHBOARD_TYPE);
dashboardTest.addColumn(COMMON_FAMILY, "default".getBytes(UTF8));
exists = table.exists(dashboardTest);
table.close();
} catch (Exception e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
}
return exists;
}
public static void populateDefaults() {
boolean defaultExists = isDefaultExists();
try {
if(defaultExists) {
return;
}
// Populate example chart widgets
createChart(Examples.SYSTEM_LOAD_AVERAGE);
createChart(Examples.CPU_UTILIZATION);
createChart(Examples.MEMORY_UTILIZATION);
createChart(Examples.DISK_UTILIZATION);
createChart(Examples.NETWORK_UTILIZATION);
createChart(Examples.SWAP_UTILIZATION);
// Namenode heap usage
createChart(Examples.NAMENODE_MEMORY);
// HDFS Usage
createChart(Examples.HDFS_USAGE);
// Resource Manager Memory
createChart(Examples.RESOURCE_MANAGER_MEMORY);
// Node Managers Health
createChart(Examples.NODE_MANAGER_HEALTH);
// High Availability State
createChart(Examples.HDFS_HA);
// HDFS Load
createChart(Examples.HDFS_LOAD);
// Namenode RPC Latency
createChart(Examples.NAMENODE_RPC_LATENCY);
// Datanode Health
createChart(Examples.DATANODES);
// HBase Master Memory
createChart(Examples.HBASE_MASTER_MEMORY);
// Populate default widgets
createWidget(Examples.SYSTEM_LOAD_AVERAGE_WIDGET);
createWidget(Examples.WELCOME_PAGE_WIDGET);
createWidget(Examples.TRIAL_DOWNLOAD_WIDGET);
createWidget(Examples.CLUSTER_RUNNING_WIDGET);
createWidget(Examples.USER_WORKING_WIDGET);
createWidget(Examples.APP_RUNNING_WIDGET);
createWidget(Examples.TRIAL_ABANDON_RATE_WIDGET);
createWidget(Examples.CLUSTERS_HEALTH_WIDGET);
createWidget(Examples.TOP_ACTIVE_CLUSTERS_WIDGET);
createWidget(Examples.TOP_APP_WIDGET);
// User widgets
createWidget(Examples.APP_USAGE_WIDGET);
createWidget(Examples.QUICK_LINKS_WIDGET);
createWidget(Examples.LOG_SEARCH_WIDGET);
createWidget(Examples.YARN_APP_WIDGET);
createWidget(Examples.HDFS_WIDGET);
createWidget(Examples.HBASE_TABLE_WIDGET);
createWidget(Examples.TOP_USER_WIDGET);
// System widgets
createWidget(Examples.HDFS_HA_STATE_WIDGET);
createWidget(Examples.HDFS_LOAD_WIDGET);
createWidget(Examples.HDFS_NAMENODE_LATENCY_WIDGET);
createWidget(Examples.DATANODES_HEALTH_WIDGET);
createWidget(Examples.NODE_MANAGERS_HEALTH_WIDGET);
createWidget(Examples.HDFS_REMAINING_WIDGET);
createWidget(Examples.NAMENODE_MEMORY_WIDGET);
createWidget(Examples.RESOURCE_MANAGER_MEMORY_WIDGET);
createWidget(Examples.HBASE_MASTER_MOMORY_WIDGET);
createWidget(Examples.CPU_UTILIZATION_WIDGET);
createWidget(Examples.MEMORY_UTILIZATION_WIDGET);
createWidget(Examples.SWAP_UTILIZATION_WIDGET);
createWidget(Examples.DISK_UTILIZATION_WIDGET);
createWidget(Examples.NETWORK_UTILIZATION_WIDGET);
createWidget(Examples.CPU_HEAPMAP_WIDGET);
createWidget(Examples.HDFS_UI_WIDGET);
createWidget(Examples.HBASE_MASTER_UI_WIDGET);
// Populate default dashboard
updateDashboard("default", "", Examples.DEFAULT_DASHBOARD);
updateDashboard("user", "", Examples.USER_DASHBOARD);
updateDashboard("system", "", Examples.SYSTEM_DASHBOARD);
} catch (Throwable ex) {
LOG.error(ExceptionUtil.getStackTrace(ex));
}
}
public static synchronized Dashboard getDashboard(String id, String user) {
Dashboard dash = null;
String key = new StringBuilder().append(id).
append("|").append(user).toString();
try {
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA_META));
Get dashboard = new Get(DASHBOARD_TYPE);
dashboard.addColumn(COMMON_FAMILY, key.getBytes(UTF8));
Result rs = table.get(dashboard);
byte[] buffer = rs.getValue(COMMON_FAMILY, key.getBytes(UTF8));
if(buffer == null) {
// If user dashboard is not found, use default dashboard.
key = new StringBuilder().append(id).append("|").toString();
dashboard = new Get(DASHBOARD_TYPE);
dashboard.addColumn(COMMON_FAMILY, key.getBytes(UTF8));
rs = table.get(dashboard);
buffer = rs.getValue(COMMON_FAMILY, key.getBytes(UTF8));
}
Gson gson = new Gson();
dash = gson.fromJson(new String(buffer, UTF8), Dashboard.class);
table.close();
} catch (Exception e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
LOG.error("Error retrieving dashboard, id: " +
id + " user:" + user);
}
return dash;
}
public static boolean updateDashboard(String id, String user, Dashboard dash) {
boolean result = false;
String key = new StringBuilder().append(id).
append("|").append(user).toString();
try {
getHBaseConnection();
Table table = connection.getTable(TableName.valueOf(CHUKWA_META));
Put put = new Put(DASHBOARD_TYPE);
Gson gson = new Gson();
String buffer = gson.toJson(dash);
put.addColumn(COMMON_FAMILY, key.getBytes(UTF8), buffer.getBytes(UTF8));
table.put(put);
table.close();
result = true;
} catch (Exception e) {
closeHBase();
LOG.error(ExceptionUtil.getStackTrace(e));
LOG.error("Error in updating dashboard, id: " +
id + " user:" + user);
}
return result;
}
}
| 8,303 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datastore/UserStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datastore;
import java.io.File;
import java.io.IOException;
import java.nio.charset.Charset;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.json.simple.JSONValue;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.hicc.HiccWebServer;
import org.apache.hadoop.chukwa.rest.bean.UserBean;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class UserStore {
private String uid = null;
private UserBean profile = null;
private static Log log = LogFactory.getLog(UserStore.class);
private static Configuration config = new Configuration();
private static ChukwaConfiguration chukwaConf = new ChukwaConfiguration();
private static String hiccPath = null;
static {
config = HiccWebServer.getConfig();
hiccPath = config.get("fs.defaultFS")+File.separator+chukwaConf.get("chukwa.data.dir")+File.separator+"hicc"+File.separator+"users";
}
public UserStore() throws IllegalAccessException {
}
public UserStore(String uid) throws IllegalAccessException {
this.uid = uid;
init(uid);
}
public void init(String uid) throws IllegalAccessException {
StringBuilder profilePath = new StringBuilder();
profilePath.append(hiccPath);
profilePath.append(File.separator);
profilePath.append(uid);
profilePath.append(".profile");
Path profileFile = new Path(profilePath.toString());
FileSystem fs;
try {
fs = FileSystem.get(config);
if(fs.exists(profileFile)) {
FileStatus[] fstatus = fs.listStatus(profileFile);
long size = fstatus[0].getLen();
FSDataInputStream viewStream = fs.open(profileFile);
byte[] buffer = new byte[(int)size];
viewStream.readFully(buffer);
viewStream.close();
try {
JSONObject json = (JSONObject) JSONValue.parse(new String(buffer, Charset.forName("UTF-8")));
profile = new UserBean(json);
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
throw new IllegalAccessException("Unable to access user profile database.");
}
} else {
profile = new UserBean();
profile.setId(uid);
JSONArray ja = new JSONArray();
profile.setViews(ja);
JSONObject json = new JSONObject();
profile.setProperties(json.toString());
}
} catch (IOException ex) {
log.error(ExceptionUtil.getStackTrace(ex));
}
}
public UserBean get() throws IllegalAccessException {
if(profile==null) {
init(uid);
}
return profile;
}
public void set(UserBean profile) throws IllegalAccessException {
StringBuilder profilePath = new StringBuilder();
profilePath.append(hiccPath);
profilePath.append(File.separator);
profilePath.append(profile.getId());
profilePath.append(".profile");
Path profileFile = new Path(profilePath.toString());
FileSystem fs;
try {
fs = FileSystem.get(config);
FSDataOutputStream out = fs.create(profileFile,true);
out.write(profile.deserialize().toString().getBytes(Charset.forName("UTF-8")));
out.close();
} catch (IOException ex) {
log.error(ExceptionUtil.getStackTrace(ex));
throw new IllegalAccessException("Unable to access user profile database.");
}
this.profile = profile;
}
public static JSONArray list() throws IllegalAccessException {
StringBuilder profilePath = new StringBuilder();
profilePath.append(hiccPath);
profilePath.append(File.separator);
profilePath.append("*.profile");
Path viewFile = new Path(profilePath.toString());
FileSystem fs;
JSONArray list = new JSONArray();
try {
fs = FileSystem.get(config);
FileStatus[] fstatus = fs.listStatus(viewFile);
if(fstatus!=null) {
for(int i=0;i<fstatus.length;i++) {
long size = fstatus[i].getLen();
FSDataInputStream profileStream = fs.open(fstatus[i].getPath());
byte[] buffer = new byte[(int)size];
profileStream.readFully(buffer);
profileStream.close();
try {
UserBean user = new UserBean((JSONObject) JSONValue.parse(new String(buffer, Charset.forName("UTF-8"))));
list.add(user.getId());
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
}
}
}
} catch (IOException ex) {
log.error(ExceptionUtil.getStackTrace(ex));
throw new IllegalAccessException("Unable to access user profile database.");
}
return list;
}
}
| 8,304 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datastore/ViewStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datastore;
import java.io.File;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.LinkedHashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.hicc.HiccWebServer;
import org.apache.hadoop.chukwa.rest.bean.ViewBean;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.json.simple.JSONArray;
public class ViewStore {
private String vid = null;
private String uid = null;
private ViewBean view = null;
private static Log log = LogFactory.getLog(ViewStore.class);
private static Configuration config = null;
private static ChukwaConfiguration chukwaConf = new ChukwaConfiguration();
private static String viewPath = null;
private static String publicViewPath = viewPath+File.separator+"public";
private static String usersViewPath = viewPath+File.separator+"users";
private static String PUBLIC = "public".intern();
static {
config = HiccWebServer.getConfig();
viewPath = config.get("fs.defaultFS")+File.separator+chukwaConf.get("chukwa.data.dir")+File.separator+"hicc"+File.separator+"views";
}
public ViewStore() throws IllegalAccessException {
}
public ViewStore(String uid, String vid) throws IllegalAccessException {
this.uid=uid;
this.vid=vid;
load(uid, vid);
}
public void load(String uid, String vid) throws IllegalAccessException {
StringBuilder vp = new StringBuilder();
vp.append(usersViewPath);
vp.append(File.separator);
vp.append(uid);
vp.append(File.separator);
vp.append(vid);
vp.append(".view");
Path viewFile = new Path(vp.toString());
try {
FileSystem fs = FileSystem.get(config);
if(!fs.exists(viewFile)) {
StringBuilder pubPath = new StringBuilder();
pubPath.append(publicViewPath);
pubPath.append(File.separator);
pubPath.append(vid);
pubPath.append(".view");
viewFile = new Path(pubPath.toString());
}
if(fs.exists(viewFile)) {
FileStatus[] fstatus = fs.listStatus(viewFile);
long size = fstatus[0].getLen();
FSDataInputStream viewStream = fs.open(viewFile);
byte[] buffer = new byte[(int)size];
viewStream.readFully(buffer);
viewStream.close();
try {
view = new ViewBean(buffer);
view.update();
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
throw new IllegalAccessException("Unable to access view: "+vid);
}
}
} catch (IOException ex) {
log.error(ExceptionUtil.getStackTrace(ex));
}
}
public String getMessage() {
return view.toString();
}
public ViewBean get() throws IllegalAccessException {
if(view==null) {
load(uid, vid);
}
if(view==null) {
// Display global default view if user default view does not exist.
try {
load(null, vid);
} catch(Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
view = null;
}
}
return view;
}
public void set(ViewBean view) throws IllegalAccessException {
try {
if (this.view == null || (this.view.getOwner().intern() == view.getOwner().intern())) {
if(this.view!=null) {
delete();
}
StringBuilder viewPath = new StringBuilder();
if(view.getPermissionType().intern()==PUBLIC) {
viewPath.append(publicViewPath);
} else {
viewPath.append(usersViewPath);
viewPath.append(File.separator);
viewPath.append(uid);
}
viewPath.append(File.separator);
viewPath.append(view.getName());
viewPath.append(".view");
Path viewFile = new Path(viewPath.toString());
try {
FileSystem fs = FileSystem.get(config);
FSDataOutputStream out = fs.create(viewFile,true);
out.write(view.deserialize().toString().getBytes(Charset.forName("UTF-8")));
out.close();
} catch (IOException ex) {
log.error(ExceptionUtil.getStackTrace(ex));
}
this.view = view;
} else {
if(view.getPermissionType().intern()==PUBLIC) {
throw new IllegalAccessException("Unable to save public view, duplicated view exists.");
} else {
throw new IllegalAccessException("Unable to save user view.");
}
}
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
throw new IllegalAccessException("Unable to access user view.");
}
}
public void delete() throws IllegalAccessException {
try {
if(this.view==null) {
get();
}
if (this.view!=null) {
StringBuilder viewPath = new StringBuilder();
if(view.getPermissionType().intern()==PUBLIC) {
viewPath.append(publicViewPath);
} else {
viewPath.append(usersViewPath);
viewPath.append(File.separator);
viewPath.append(uid);
}
viewPath.append(File.separator);
viewPath.append(view.getName());
viewPath.append(".view");
Path viewFile = new Path(viewPath.toString());
try {
FileSystem fs = FileSystem.get(config);
fs.delete(viewFile, true);
} catch (IOException ex) {
log.error(ExceptionUtil.getStackTrace(ex));
}
} else {
throw new IllegalAccessException("Unable to delete user view, view does not exist.");
}
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
throw new IllegalAccessException("Unable to access user view.");
}
}
public static JSONArray list(String uid) throws IllegalAccessException {
StringBuilder viewPath = new StringBuilder();
viewPath.append(usersViewPath);
viewPath.append(File.separator);
viewPath.append(uid);
String[] pathList = new String[2];
pathList[0]=viewPath.toString();
pathList[1]=publicViewPath;
JSONArray list = new JSONArray();
for(String path : pathList) {
Path viewFile = new Path(path);
try {
FileSystem fs = FileSystem.get(config);
FileStatus[] fstatus = fs.listStatus(viewFile);
if(fstatus!=null) {
for(int i=0;i<fstatus.length;i++) {
if(!fstatus[i].getPath().getName().endsWith(".view")) {
continue;
}
long size = fstatus[i].getLen();
FSDataInputStream viewStream = fs.open(fstatus[i].getPath());
byte[] buffer = new byte[(int)size];
viewStream.readFully(buffer);
viewStream.close();
try {
ViewBean view = new ViewBean(buffer);
Map<String, String> json=new LinkedHashMap<String, String>();
json.put("name", view.getName());
json.put("type", view.getPermissionType());
json.put("owner", view.getOwner());
if(uid.intern()==view.getOwner().intern()) {
json.put("editable","true");
} else {
json.put("editable","false");
}
list.add(json);
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
}
}
}
} catch (IOException ex) {
log.error(ExceptionUtil.getStackTrace(ex));
throw new IllegalAccessException("Unable to access user view.");
}
}
return list;
}
}
| 8,305 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datastore/WidgetStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datastore;
import java.io.File;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.HashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.json.simple.JSONObject;
import org.json.simple.JSONValue;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.hicc.HiccWebServer;
import org.apache.hadoop.chukwa.rest.bean.CatalogBean;
import org.apache.hadoop.chukwa.rest.bean.WidgetBean;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class WidgetStore {
private static Log log = LogFactory.getLog(WidgetStore.class);
private static Configuration config = new Configuration();
private static ChukwaConfiguration chukwaConf = new ChukwaConfiguration();
private static String hiccPath = null;
private static CatalogBean catalog = null;
private static HashMap<String, WidgetBean> list = new HashMap<String, WidgetBean>();
static {
config = HiccWebServer.getConfig();
hiccPath = config.get("fs.defaultFS")+File.separator+chukwaConf.get("chukwa.data.dir")+File.separator+"hicc"+File.separator+"widgets";
}
public WidgetStore() throws IllegalAccessException {
}
public void set(WidgetBean widget) throws IllegalAccessException {
try {
StringBuilder widgetPath = new StringBuilder();
widgetPath.append(hiccPath);
widgetPath.append(File.separator);
widgetPath.append(widget.getId());
widgetPath.append(".descriptor");
Path widgetFile = new Path(widgetPath.toString());
FileSystem fs;
try {
fs = FileSystem.get(config);
FSDataOutputStream out = fs.create(widgetFile,true);
out.writeBytes(widget.deserialize().toString());
out.close();
} catch (IOException ex) {
log.error(ExceptionUtil.getStackTrace(ex));
}
cacheWidgets();
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
throw new IllegalAccessException("Unable to access user view database.");
}
}
public static void cacheWidgets() throws IllegalAccessException {
StringBuilder widgetPath = new StringBuilder();
widgetPath.append(hiccPath);
Path widgetFiles = new Path(widgetPath.toString());
FileSystem fs;
catalog = new CatalogBean();
catalog.setId("root");
catalog.setLabel("root");
try {
fs = FileSystem.get(config);
FileStatus[] fstatus = fs.listStatus(widgetFiles);
if(fstatus!=null) {
for(int i=0;i<fstatus.length;i++) {
long size = fstatus[i].getLen();
FSDataInputStream widgetStream = fs.open(fstatus[i].getPath());
byte[] buffer = new byte[(int)size];
widgetStream.readFully(buffer);
widgetStream.close();
try {
JSONObject widgetBuffer = (JSONObject) JSONValue.parse(new String(buffer, Charset.forName("UTF-8")));
WidgetBean widget = new WidgetBean(widgetBuffer);
catalog.addCatalog(widget);
list.put(widget.getId(),widget);
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
}
}
}
} catch (IOException ex) {
log.error(ExceptionUtil.getStackTrace(ex));
throw new IllegalAccessException("Unable to access user view database.");
}
}
public static CatalogBean getCatalog() throws IllegalAccessException {
if(catalog==null) {
cacheWidgets();
}
return catalog;
}
public static HashMap<String, WidgetBean> list() throws IllegalAccessException {
if(list.size()==0) {
cacheWidgets();
}
return list;
}
}
| 8,306 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datatrigger/HttpTriggerAction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datatrigger;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.conf.Configuration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.BufferedReader;
import java.io.OutputStreamWriter;
import java.net.URL;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.Map;
import java.util.HashMap;
import java.util.Map.Entry;
/**
* Trigger action that makes an HTTP request when executed.
* <P>
* To use this trigger, two types of configurations must be set. First, this class
* must be configured to be invoked for a given trigger event. Second, the
* the relevant settings for the HTTP request(s) to be made must be set as
* described below.
* <P>
* The general format of this classes configs is
* <code>chukwa.trigger.[eventName].http.[N].[paramName]</code> where
* <code>eventName</code> is the name of the event the request values are bound
* to (see TriggerEvent), <code>N</code> is a counter for each request configured (starting at 1)
* and <code>paramName</code> is the request parameter being set.
* <P>
* Using the post demux success trigger event as an example, the first request
* to be fired would use the following configurations
* <ul>
* <li><code>chukwa.trigger.post.demux.success.http.1.url</code> - The HTTP url to
* invoke.</li>
* <li><code>chukwa.trigger.post.demux.success.http.1.method</code> - The HTTP method
* (optional, default=GET).</li>
* <li><code>chukwa.trigger.post.demux.success.http.1.headers</code> - A comma-delimited
* set of HTTP headers (in <code>[headerName]:[headerValue]</code> form) to
* include (optional).</li>
* <li><code>chukwa.trigger.post.demux.success.http.1.body</code> - The text HTTP body
* to include (optional).</li>
* <li><code>chukwa.trigger.post.demux.success.http.1.connect.timeout</code> - The
* HTTP connection timeout setting in milliseconds (optional, default=5000ms).</li>
* <li><code>chukwa.trigger.post.demux.success.http.1.read.timeout</code> - The
* HTTP read timeout setting in milliseconds (optional, default=5000ms).</li>
* </ul>
* @see TriggerAction
* @see TriggerEvent
*/
public class HttpTriggerAction implements TriggerAction {
protected Log log = LogFactory.getLog(getClass());
/**
* Iterates over each URL found, fetched other settings and fires and HTTP
* request.
*
* @param conf is Chukwa configuration
* @param fs is HDFS File System
* @param src is list of sources to look for data
* @param triggerEvent is type of processing to happen
* @throws IOException if error in process triggers
*/
public void execute(Configuration conf, FileSystem fs,
FileStatus[] src, TriggerEvent triggerEvent) throws IOException {
if (log.isDebugEnabled()) {
for (FileStatus file : src) {
log.debug("Execute file: " + file.getPath());
}
}
int reqNumber = 1;
URL url = null;
while ((url = getUrl(conf, triggerEvent, reqNumber)) != null) {
// get settings for this request
String method = getMethod(conf, triggerEvent, reqNumber);
Map<String, String> headers = getHeaders(conf, triggerEvent, reqNumber);
String body = getBody(conf, triggerEvent, reqNumber);
int connectTimeout = getConnectTimeout(conf, triggerEvent, reqNumber);
int readTimeout = getReadTimeout(conf, triggerEvent, reqNumber);
try {
// make the request
makeHttpRequest(url, method, headers, body, connectTimeout, readTimeout);
}
catch(Exception e) {
log.error("Error making request to " + url, e);
}
reqNumber++;
}
}
private void makeHttpRequest(URL url, String method,
Map<String, String> headers, String body,
int connectTimeout, int readTimeout) throws IOException {
if (url == null) {
return;
}
// initialize the connection
HttpURLConnection conn = (HttpURLConnection)url.openConnection();
conn.setRequestMethod(method);
conn.setDoInput(true);
conn.setConnectTimeout(connectTimeout);
conn.setReadTimeout(readTimeout);
// set headers
boolean contentLengthExists = false;
if (headers != null) {
for(Entry<String, String> entry : headers.entrySet()) {
String name = entry.getKey();
String value = entry.getValue();
if (log.isDebugEnabled()) {
log.debug("Setting header " + name + ": " + value);
}
if (name.equalsIgnoreCase("content-length")) {
contentLengthExists = true;
}
conn.setRequestProperty(name, value);
}
}
// set content-length if not already set
if (!"GET".equals(method) && !contentLengthExists) {
String contentLength = body != null ? String.valueOf(body.length()) : "0";
conn.setRequestProperty("Content-Length", contentLength);
}
// send body if it exists
if (body != null) {
conn.setDoOutput(true);
OutputStreamWriter writer = new OutputStreamWriter(conn.getOutputStream(), Charset.forName("UTF-8"));
writer.write(body);
writer.flush();
writer.close();
}
else {
conn.setDoOutput(false);
}
// read reponse code/message and dump response
log.info("Making HTTP " + method + " to: " + url);
int responseCode = conn.getResponseCode();
log.info("HTTP Response code: " + responseCode);
if (responseCode != 200) {
log.info("HTTP Response message: " + conn.getResponseMessage());
}
else {
BufferedReader reader = new BufferedReader(
new InputStreamReader(conn.getInputStream(), Charset.forName("UTF-8")));
String line;
StringBuilder sb = new StringBuilder();
while ((line = reader.readLine()) != null) {
if(sb.length() > 0) {
sb.append("\n");
}
sb.append(line);
}
log.info("HTTP Response:\n" + sb);
reader.close();
}
conn.disconnect();
}
protected URL getUrl(Configuration conf,
TriggerEvent triggerEvent,
int reqNumber) throws MalformedURLException {
String urlString = conf.get(getConfigKey(triggerEvent, reqNumber, "url"), null);
if (urlString == null) {
return null;
}
return new URL(urlString);
}
protected String getMethod(Configuration conf,
TriggerEvent triggerEvent,
int reqNumber) {
return conf.get(getConfigKey(triggerEvent, reqNumber, "method"), "GET");
}
protected Map<String, String> getHeaders(Configuration conf,
TriggerEvent triggerEvent,
int reqNumber) {
Map<String, String> headerMap = new HashMap<String,String>();
String headers = conf.get(getConfigKey(triggerEvent, reqNumber, "headers"), null);
if (headers != null) {
String[] headersSplit = headers.split(",");
for (String header : headersSplit) {
String[] nvp = header.split(":", 2);
if (nvp.length < 2) {
log.error("Invalid HTTP header found: " + Arrays.toString(nvp));
continue;
}
headerMap.put(nvp[0].trim(), nvp[1].trim());
}
}
return headerMap;
}
protected String getBody(Configuration conf,
TriggerEvent triggerEvent,
int reqNumber) {
return conf.get(getConfigKey(triggerEvent, reqNumber, "body"), "GET");
}
protected int getConnectTimeout(Configuration conf,
TriggerEvent triggerEvent,
int reqNumber) {
String timeout = conf.get(getConfigKey(triggerEvent, reqNumber, "connect.timeout"), null);
return timeout != null ? Integer.parseInt(timeout) : 5000;
}
protected int getReadTimeout(Configuration conf,
TriggerEvent triggerEvent,
int reqNumber) {
String timeout = conf.get(getConfigKey(triggerEvent, reqNumber, "read.timeout"), null);
return timeout != null ? Integer.parseInt(timeout) : 5000;
}
private String getConfigKey(TriggerEvent triggerEvent, int reqNumber, String name) {
return triggerEvent.getConfigKeyBase() + ".http." + reqNumber + "." + name;
}
}
| 8,307 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datatrigger/TriggerEvent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datatrigger;
/**
* enum that encapsulates the different possible events that can be triggered.
* When a call is made to a TriggerAction class, the caller must pass a TriggerEvent
* object to identify the event that is occurring.
*/
public enum TriggerEvent {
POST_DEMUX_SUCCESS("postDemuxSuccess", "chukwa.trigger.post.demux.success");
private String name;
private String configKeyBase;
private TriggerEvent(String name, String configKeyBase) {
this.name = name;
this.configKeyBase = configKeyBase;
}
public String getName() {
return name;
}
public String getConfigKeyBase() {
return configKeyBase;
}
}
| 8,308 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datatrigger/TriggerAction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datatrigger;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
public interface TriggerAction {
public void execute(Configuration conf, FileSystem fs,
FileStatus[] src, TriggerEvent triggerEvent) throws IOException;
}
| 8,309 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/ChukwaInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools;
import java.io.IOException;
import java.util.regex.*;
import org.apache.hadoop.chukwa.*;
import org.apache.hadoop.chukwa.util.RegexUtil;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.*;
import org.apache.log4j.Logger;
/***
* An InputFormat for processing logfiles in Chukwa. Designed to be a nearly
* drop-in replacement for the Hadoop default TextInputFormat so that code can
* be ported to use Chukwa with minimal modification.
*
* Has an optional configuration option, chukwa.inputfilter.datatype which can
* be used to filter the input by datatype. If need exists, this mechanism could
* be extended to also filter by other fields.
*
*/
@SuppressWarnings("deprecation")
public class ChukwaInputFormat extends
SequenceFileInputFormat<LongWritable, Text> {
public static class ChukwaRecordReader implements
RecordReader<LongWritable, Text> {
static Logger LOG = Logger.getLogger(ChukwaInputFormat.class);
public static final String DATATYPE_PROPERTY = "chukwa.inputfilter.datatype";
public static final String DATATYPE_PROPERTY_DEFAULT = ".*";
private SequenceFileRecordReader<ChukwaArchiveKey, Chunk> sfrr;
private long lineInFile = 0;
private Chunk curChunk = null;
private int lineInChunk; // outside of next, it's the array offset of next
// line to be returned
private int[] lineOffsets = null;
private int byteOffsetOfLastLine = 0;
Pattern dtPattern;
public ChukwaRecordReader(Configuration conf, FileSplit split)
throws IOException {
sfrr = new SequenceFileRecordReader<ChukwaArchiveKey, Chunk>(conf, split);
String datatype = conf.get(DATATYPE_PROPERTY, DATATYPE_PROPERTY_DEFAULT);
if (!RegexUtil.isRegex(datatype)) {
LOG.warn("Error parsing '" + DATATYPE_PROPERTY
+ "' property as a regex: " + RegexUtil.regexError(datatype)
+ ". Using default instead: " + DATATYPE_PROPERTY_DEFAULT);
datatype = DATATYPE_PROPERTY_DEFAULT;
}
dtPattern = Pattern.compile(datatype);
}
@Override
public void close() throws IOException {
sfrr.close();
}
@Override
public LongWritable createKey() {
return new LongWritable();
}
@Override
public Text createValue() {
return new Text();
}
@Override
public long getPos() throws IOException {
return sfrr.getPos();
}
@Override
public float getProgress() throws IOException {
return sfrr.getProgress();
}
private boolean passesFilters(Chunk c) {
return dtPattern.matcher(c.getDataType()).matches();
}
@Override
public boolean next(LongWritable key, Text value) throws IOException {
if (curChunk == null) {
ChukwaArchiveKey k = new ChukwaArchiveKey();
curChunk = ChunkImpl.getBlankChunk();
boolean unfilteredChunk = false;
while (!unfilteredChunk) {
boolean readOK = sfrr.next(k, curChunk);
if (!readOK) {
curChunk = null;
return false;
}
unfilteredChunk = passesFilters(curChunk);
}
lineOffsets = curChunk.getRecordOffsets();
lineInChunk = 0;
byteOffsetOfLastLine = 0;
} // end curChunk == null
value.set(curChunk.getData(), byteOffsetOfLastLine,
lineOffsets[lineInChunk] - byteOffsetOfLastLine);
if (lineInChunk >= lineOffsets.length - 1) { // end of chunk
curChunk = null;
} else
byteOffsetOfLastLine = lineOffsets[lineInChunk++] + 1;
key.set(lineInFile);
lineInFile++;
return true;
}
} // end ChukwaRecordReader
@Override
public RecordReader<LongWritable, Text> getRecordReader(InputSplit split,
JobConf job, Reporter reporter) throws IOException {
reporter.setStatus(split.toString());
LOG.info("returning a new chukwa record reader");
return new ChukwaRecordReader(job, (FileSplit) split);
}
}
| 8,310 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/mdl/DataConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.mdl;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import java.util.Iterator;
import java.util.HashMap;
import java.util.Map;
import java.io.File;
import java.io.FilenameFilter;
public class DataConfig {
private Configuration config;
final static String MDL_XML = "mdl.xml";
private Log log = LogFactory.getLog(DataConfig.class);
public DataConfig(String path) {
Path fileResource = new Path(path);
config = new Configuration();
config.addResource(fileResource);
}
public DataConfig() {
String dataConfig = System.getenv("CHUKWA_CONF_DIR");
if (dataConfig == null) {
dataConfig = MDL_XML;
} else {
dataConfig += File.separator + MDL_XML;
}
log.debug("DATACONFIG=" + dataConfig);
if (config == null) {
try {
Path fileResource = new Path(dataConfig);
config = new Configuration();
config.addResource(fileResource);
} catch (Exception e) {
log.debug("Error reading configuration file:" + dataConfig);
}
}
if (System.getenv("CHUKWA_CONF_DIR") != null) {
// Allow site-specific MDL files to be included in the
// configuration so as to keep the "main" mdl.xml pure.
File confDir = new File(System.getenv("CHUKWA_CONF_DIR"));
File[] confFiles = confDir.listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
// Implements a naming convention of ending with "mdl.xml"
// but is careful not to pick up mdl.xml itself again.
return name.endsWith(MDL_XML) && !name.equals(MDL_XML);
}
});
if (confFiles != null) {
for (File confFile : confFiles)
config.addResource(new Path(confFile.getAbsolutePath()));
}
}
}
public String get(String key) {
return config.get(key);
}
public void put(String key, String value) {
config.set(key, value);
}
public Iterator<Map.Entry<String, String>> iterator() {
return config.iterator();
}
public HashMap<String, String> startWith(String key) {
HashMap<String, String> transformer = new HashMap<String, String>();
Iterator<Map.Entry<String, String>> entries = config.iterator();
while (entries.hasNext()) {
String entry = entries.next().toString();
if (entry.startsWith(key)) {
String[] metrics = entry.split("=");
transformer.put(metrics[0], metrics[1]);
}
}
return transformer;
}
}
| 8,311 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/mdl/LoaderServer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.mdl;
import java.io.*;
import java.lang.management.ManagementFactory;
import java.nio.charset.Charset;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public class LoaderServer {
String name;
private static Log log = LogFactory.getLog(LoaderServer.class);
private FileOutputStream pidFileOutput = null;
public LoaderServer(String name) {
this.name = name;
}
public void init() throws IOException {
String pidLong = ManagementFactory.getRuntimeMXBean().getName();
String[] items = pidLong.split("@");
String pid = items[0];
String chukwaPath = System.getProperty("CHUKWA_HOME");
StringBuffer pidFilesb = new StringBuffer();
pidFilesb.append(chukwaPath).append("/var/run/").append(name)
.append(".pid");
try {
File pidFile = new File(pidFilesb.toString());
pidFileOutput = new FileOutputStream(pidFile);
pidFileOutput.write(pid.getBytes(Charset.forName("UTF-8")));
pidFileOutput.flush();
} catch (IOException ex) {
System.out.println("Initializaiton failed: can not write pid file.");
log.error("Initialization failed...");
log.error(ex.getMessage());
throw ex;
}
}
public void clean() {
String chukwaPath = System.getenv("CHUKWA_HOME");
StringBuffer pidFilesb = new StringBuffer();
pidFilesb.append(chukwaPath).append("/var/run/").append(name)
.append(".pid");
String pidFileName = pidFilesb.toString();
File pidFile = new File(pidFileName);
if (!pidFile.exists()) {
log.error("Delete pid file, No such file or directory: " + pidFileName);
} else {
try {
pidFileOutput.close();
} catch (IOException e) {
log.error("Unable to release file lock: " + pidFileName);
}
}
boolean result = pidFile.delete();
if (!result) {
log.error("Delete pid file failed, " + pidFileName);
}
}
}
| 8,312 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/mdl/ErStreamHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.mdl;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.BufferedReader;
import java.nio.charset.Charset;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public class ErStreamHandler extends Thread {
InputStream inpStr;
String command;
boolean record;
private static Log log = LogFactory.getLog(ErStreamHandler.class);
public ErStreamHandler(InputStream inpStr, String command, boolean record) {
this.inpStr = inpStr;
this.command = command;
this.record = record;
}
public void run() {
try {
InputStreamReader inpStrd = new InputStreamReader(inpStr, Charset.forName("UTF-8"));
BufferedReader buffRd = new BufferedReader(inpStrd);
String line = null;
StringBuffer sb = new StringBuffer();
while ((line = buffRd.readLine()) != null) {
sb.append(line);
}
buffRd.close();
if (record && sb.length() > 0) {
log.error(command + " execution error:" + sb.toString());
}
} catch (Exception e) {
log.error(command + " error:" + e.getMessage());
}
}
}
| 8,313 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/plugin/ExecPlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.plugin;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.Charset;
import org.json.simple.JSONObject;
/**
* Runs external command-line tools, captures output.
*
* Subclasses are responsible for implementing getCmde(), which determines the
* command to be invoked.
*
*/
public abstract class ExecPlugin implements IPlugin {
public final static int statusOK = 100;
public final static int statusKO = -100;
Process process = null;
public ExecPlugin() {
}
public void stop() {
if(process != null)
process.destroy();
}
public int waitFor() throws InterruptedException {
return process.waitFor();
}
public abstract String getCmde();
public JSONObject postProcess(JSONObject execResult) {
return execResult;
}
public JSONObject execute() {
JSONObject result = new JSONObject();
try {
result.put("timestamp", System.currentTimeMillis());
Runtime runtime = Runtime.getRuntime();
process = runtime.exec(getCmde());
OutputReader stdOut = new OutputReader(process, Output.stdOut);
stdOut.start();
OutputReader stdErr = new OutputReader(process, Output.stdErr);
stdErr.start();
int exitValue = process.waitFor();
stdOut.join();
stdErr.join();
process.getInputStream().close(); //otherwise this implicitly stays open
result.put("exitValue", exitValue);
result.put("stdout", stdOut.output.toString());
result.put("stderr", stdErr.output.toString());
result.put("status", statusOK);
process.getOutputStream().close();
process.getErrorStream().close();
} catch (Throwable e) {
try {
result.put("status", statusKO);
result.put("errorLog", e.getMessage());
if(e.getMessage().contains("Too many open files")) {
//maybe die abruptly? Error is ir-recoverable and runtime can reboot us.
// System.exit(1);
}
} catch (Exception e1) {
e1.printStackTrace();
}
e.printStackTrace();
}
return postProcess(result);
}
}
enum Output {
stdOut, stdErr
};
class OutputReader extends Thread {
private Process process = null;
private Output outputType = null;
public StringBuilder output = new StringBuilder();
public OutputReader(Process process, Output outputType) {
this.process = process;
this.outputType = outputType;
}
public void run() {
try {
String line = null;
InputStream is = null;
switch (this.outputType) {
case stdOut:
is = process.getInputStream();
break;
case stdErr:
is = process.getErrorStream();
break;
}
if(is!=null) {
InputStreamReader isr = new InputStreamReader(is, Charset.forName("UTF-8"));
BufferedReader br = new BufferedReader(isr);
while ((line = br.readLine()) != null) {
// System.out.println("========>>>>>>>["+line+"]");
output.append(line).append("\n");
}
br.close();
}
} catch (IOException e) {
e.printStackTrace();
} catch (Throwable e) {
e.printStackTrace();
}
}
}
| 8,314 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/plugin/IPlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.plugin;
import org.json.simple.JSONObject;
public interface IPlugin {
JSONObject execute();
}
| 8,315 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/plugin | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/plugin/pbsnode/PbsNodePlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.plugin.pbsnode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.chukwa.inputtools.mdl.DataConfig;
import org.apache.hadoop.chukwa.inputtools.plugin.ExecPlugin;
import org.apache.hadoop.chukwa.inputtools.plugin.IPlugin;
import org.json.simple.JSONObject;
public class PbsNodePlugin extends ExecPlugin {
private static Log log = LogFactory.getLog(PbsNodePlugin.class);
private String cmde = null;
private DataConfig dataConfig = null;
public PbsNodePlugin() {
dataConfig = new DataConfig();
cmde = dataConfig.get("chukwa.inputtools.plugin.pbsNode.cmde");
}
@Override
public String getCmde() {
return cmde;
}
public static void main(String[] args) {
IPlugin plugin = new PbsNodePlugin();
JSONObject result = plugin.execute();
System.out.print("Result: " + result);
if ((Integer) result.get("status") < 0) {
System.out.println("Error");
log.warn("[ChukwaError]:" + PbsNodePlugin.class + ", "
+ result.get("stderr"));
} else {
log.info(result.get("stdout"));
}
}
}
| 8,316 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/plugin | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/plugin/metrics/ExecHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.plugin.metrics;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.chukwa.inputtools.plugin.ExecPlugin;
public class ExecHelper extends ExecPlugin {
private static Log log = LogFactory.getLog(ExecHelper.class);
private String cmde = null;
public ExecHelper(String[] cmds) {
StringBuffer c = new StringBuffer();
for (String cmd : cmds) {
c.append(cmd);
c.append(" ");
}
cmde = c.toString();
}
public String getCmde() {
return cmde;
}
}
| 8,317 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/plugin | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/plugin/metrics/Exec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.plugin.metrics;
import java.util.Timer;
import java.util.TimerTask;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.chukwa.inputtools.plugin.IPlugin;
import org.json.simple.JSONObject;
public class Exec extends TimerTask {
private static Log log = LogFactory.getLog(Exec.class);
private String cmde = null;
private IPlugin plugin = null;
public Exec(String[] cmds) {
StringBuffer c = new StringBuffer();
for (String cmd : cmds) {
c.append(cmd);
c.append(" ");
}
cmde = c.toString();
plugin = new ExecHelper(cmds);
}
public void run() {
try {
JSONObject result = plugin.execute();
int status = (Integer) result.get("status");
if (status < 0) {
System.out.println("Error");
log.warn("[ChukwaError]:" + Exec.class + ", "
+ result.get("stderr"));
} else {
log.info(result.get("stdout"));
}
} catch (Exception e) {
log.error("Exec output unparsable:" + this.cmde);
}
}
public String getCmde() {
return cmde;
}
public static void main(String[] args) {
int period = 60;
try {
if (System.getProperty("PERIOD") != null) {
period = Integer.parseInt(System.getProperty("PERIOD"));
}
} catch (NumberFormatException ex) {
ex.printStackTrace();
System.out
.println("Usage: java -DPERIOD=nn -DRECORD_TYPE=recordType Exec [cmd]");
System.out.println("PERIOD should be numeric format of seconds.");
return;
}
Timer timer = new Timer();
timer.schedule(new Exec(args), 0, period * 1000);
}
}
| 8,318 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/plugin | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/plugin/nodeactivity/NodeActivityPlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.plugin.nodeactivity;
import org.apache.hadoop.chukwa.inputtools.mdl.DataConfig;
import org.apache.hadoop.chukwa.inputtools.plugin.ExecPlugin;
import org.apache.hadoop.chukwa.inputtools.plugin.IPlugin;
import org.json.simple.JSONObject;
public class NodeActivityPlugin extends ExecPlugin {
private String cmde = null;
private DataConfig dataConfig = null;
public NodeActivityPlugin() {
dataConfig = new DataConfig();
cmde = dataConfig.get("mdl.plugin.NodeActivityPlugin.cmde");
}
@Override
public String getCmde() {
return cmde;
}
@Override
public JSONObject postProcess(JSONObject execResult) {
try {
if ( (Integer) execResult.get("status") < 0) {
return execResult;
}
String res = (String) execResult.get("stdout");
String[] tab = res.split("\n");
int totalFreeNode = 0;
int totalUsedNode = 0;
int totalDownNode = 0;
for (int i = 0; i < tab.length; i++) {
if (tab[i].indexOf("state =") < 0) {
tab[i] = null;
continue;
}
String[] line = tab[i].split("state =");
tab[i] = null;
if (line[1].trim().equals("free")) {
totalFreeNode++;
} else if (line[1].trim().equals("job-exclusive")) {
totalUsedNode++;
} else {
totalDownNode++;
}
}
execResult.put("totalFreeNode", totalFreeNode);
execResult.put("totalUsedNode", totalUsedNode);
execResult.put("totalDownNode", totalDownNode);
execResult.put("source", "NodeActivity");
execResult.put("status", 100);
} catch (Throwable e) {
try {
execResult.put("source", "NodeActivity");
execResult.put("status", -100);
execResult.put("errorLog", e.getMessage());
} catch (Exception e1) {
e1.printStackTrace();
}
e.printStackTrace();
}
return execResult;
}
public static void main(String[] args) {
IPlugin plugin = new NodeActivityPlugin();
JSONObject result = plugin.execute();
System.out.print("Result: " + result);
}
}
| 8,319 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/jplugin/ChukwaMetrics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.jplugin;
import java.util.HashMap;
public interface ChukwaMetrics {
String getKey();
HashMap<String, String> getAttributes();
}
| 8,320 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/jplugin/JPlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.jplugin;
public interface JPlugin<T extends ChukwaMetrics> {
String getRecordType();
ChukwaMetricsList<T> getMetrics() throws Throwable;
JPluginStatusMetricsList getStatus() throws Throwable;
void init(String[] args) throws Throwable;
}
| 8,321 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/jplugin/JPluginStatusMetricsList.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.jplugin;
public class JPluginStatusMetricsList extends GenericChukwaMetricsList {
private String pluginName;
public JPluginStatusMetricsList(String pluginName) {
super("JPluginStatus");
this.pluginName = pluginName;
}
public void addStatus(String name, String value) {
GenericChukwaMetrics metrics = new GenericChukwaMetrics();
metrics.put(name, value);
metrics.setKey(pluginName);
addMetrics(metrics);
}
}
| 8,322 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/jplugin/ChukwaMetricsList.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.jplugin;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Date;
import java.util.Map.Entry;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
public class ChukwaMetricsList<T extends ChukwaMetrics> {
private ArrayList<T> metricsList = null;
private long timestamp = new Date().getTime();
private String recordType = "JPlugin";
public ChukwaMetricsList() {
}
public ChukwaMetricsList(String recordType) {
setRecordType(recordType);
}
public void setMetricsList(ArrayList<T> metricsList) {
this.metricsList = metricsList;
}
public ArrayList<T> getMetricsList() {
if(metricsList == null){
metricsList = new ArrayList<T>();
}
return metricsList;
}
public void addMetrics(T metrics) {
getMetricsList().add(metrics);
}
public long getTimestamp() {
return timestamp;
}
public void setTimestamp(long timestamp) {
this.timestamp = timestamp;
}
public String toXml() throws Exception {
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
DocumentBuilder docBuilder = null;
docBuilder = factory.newDocumentBuilder();
Document doc = docBuilder.newDocument();
Element root = doc.createElement(getRecordType());
doc.appendChild(root);
root.setAttribute("ts", getTimestamp()+"");
for(ChukwaMetrics metrics : getMetricsList()) {
Element elem = doc.createElement("Metrics");
elem.setAttribute("key", metrics.getKey());
for(Entry<String, String> attr : metrics.getAttributes().entrySet()) {
elem.setAttribute(attr.getKey(), attr.getValue());
}
root.appendChild(elem);
}
Transformer transformer = TransformerFactory.newInstance().newTransformer();
transformer.setOutputProperty("indent", "yes");
StringWriter sw = new StringWriter();
transformer.transform(new DOMSource(doc), new StreamResult(sw));
return sw.toString();
}
public void setRecordType(String recordType) {
this.recordType = recordType;
}
public String getRecordType() {
return recordType;
}
}
| 8,323 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/jplugin/GenericChukwaMetricsList.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.jplugin;
import java.io.StringReader;
import java.util.HashMap;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.w3c.dom.Attr;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NamedNodeMap;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
public class GenericChukwaMetricsList extends ChukwaMetricsList<ChukwaMetrics> {
private static DocumentBuilderFactory factory;
private static DocumentBuilder docBuilder;
static {
factory = DocumentBuilderFactory.newInstance();
try {
docBuilder = factory.newDocumentBuilder();
} catch (ParserConfigurationException e) {
e.printStackTrace();
}
}
public GenericChukwaMetricsList() {
}
public GenericChukwaMetricsList(String recType) {
super(recType);
}
public void fromXml(String xml) throws Exception {
InputSource is = new InputSource(new StringReader(xml));
Document doc = docBuilder.parse(is);
Element root = doc.getDocumentElement();
setRecordType(root.getTagName());
long timestamp = Long.parseLong(root.getAttribute("ts"));
setTimestamp(timestamp);
NodeList children = root.getChildNodes();
for(int i=0; i<children.getLength(); i++) {
if(!children.item(i).getNodeName().equals("Metrics")) {
continue;
}
NamedNodeMap attrs = children.item(i).getAttributes();
if(attrs == null) {
continue;
}
GenericChukwaMetrics metrics = new GenericChukwaMetrics();
for(int a=0; a<attrs.getLength(); a++) {
Attr attr = (Attr) attrs.item(a);
String name = attr.getName();
String value = attr.getValue();
if(name.equals("key")) {
metrics.setKey(value);
} else {
metrics.put(name, value);
}
}
addMetrics(metrics);
}
}
@SuppressWarnings("serial")
public static class GenericChukwaMetrics extends HashMap<String, String> implements ChukwaMetrics {
private String key;
@Override
public HashMap<String, String> getAttributes() {
return this;
}
@Override
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
}
}
| 8,324 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/jplugin/JPluginAgent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.jplugin;
import java.util.Calendar;
import java.util.Timer;
import java.util.TimerTask;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
public class JPluginAgent {
private static Log log = LogFactory.getLog(JPluginAgent.class);
private static class MetricsTimerTask extends TimerTask {
@SuppressWarnings( { "unchecked" })
private JPlugin plugin;
@SuppressWarnings("unchecked")
public MetricsTimerTask(JPlugin plugin) {
this.plugin = plugin;
}
@SuppressWarnings("unchecked")
@Override
public void run() {
try {
ChukwaMetricsList metrics = plugin.getMetrics();
String xml = metrics.toXml();
log.info(xml);
} catch (Throwable e) {
log.error(e.getMessage(), e);
}
}
}
private static class StatusTimerTask extends TimerTask {
@SuppressWarnings( { "unchecked" })
private JPlugin plugin;
@SuppressWarnings("unchecked")
public StatusTimerTask(JPlugin plugin) {
this.plugin = plugin;
}
@SuppressWarnings("unchecked")
@Override
public void run() {
try {
ChukwaMetricsList metrics = plugin.getStatus();
String xml = metrics.toXml();
log.info(xml);
} catch (Throwable e) {
log.error(e.getMessage(), e);
}
}
}
@SuppressWarnings("unchecked")
public static void main(String[] args) {
if (args.length < 1) {
System.out
.println("Usage: java -DPERIOD=nn JavaPluginAgent <class name> [parameters]");
return;
}
int period = -1;
try {
if (System.getProperty("PERIOD") != null) {
period = Integer.parseInt(System.getProperty("PERIOD"));
}
} catch (NumberFormatException ex) {
ex.printStackTrace();
System.out.println("PERIOD should be numeric format of seconds.");
return;
}
JPlugin plugin = null;
try {
plugin = (JPlugin) Class.forName(args[0]).newInstance();
plugin.init(args);
} catch (Throwable e) {
e.printStackTrace();
return;
}
try {
} catch (Exception e) {
e.printStackTrace();
}
Calendar cal = Calendar.getInstance();
long now = cal.getTime().getTime();
cal.set(Calendar.SECOND, 3);
cal.set(Calendar.MILLISECOND, 0);
cal.add(Calendar.MINUTE, 1);
long until = cal.getTime().getTime();
try {
if (period == -1) {
new MetricsTimerTask(plugin).run();
} else {
Thread.sleep(until - now);
Timer timer = new Timer();
timer.scheduleAtFixedRate(new MetricsTimerTask(plugin), 0,
period * 1000);
}
} catch (Exception ex) {
log.debug(ExceptionUtil.getStackTrace(ex));
}
}
}
| 8,325 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/log4j/OneLineLogLayout.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.log4j;
import org.apache.log4j.*;
import org.apache.log4j.spi.LoggingEvent;
public class OneLineLogLayout extends PatternLayout {
char SEP = ' ';
public String format(LoggingEvent evt) {
String initial_s = super.format(evt);
StringBuilder sb = new StringBuilder();
for (int i = 0; i < initial_s.length() - 1; ++i) {
char c = initial_s.charAt(i);
if (c == '\n')
sb.append(SEP);
else
sb.append(c);
}
sb.append(SEP);
String[] s = evt.getThrowableStrRep();
if (s != null) {
int len = s.length;
for (int i = 0; i < len; i++) {
sb.append(s[i]);
sb.append(SEP);
}
}
sb.append('\n');
return sb.toString();
}
public boolean ignoresThrowable() {
return false;
}
public static void main(String[] args) {
System.setProperty("line.separator", " ");
Logger l = Logger.getRootLogger();
l.removeAllAppenders();
Appender appender = new ConsoleAppender(new OneLineLogLayout());
appender.setName("console");
l.addAppender(appender);
l.warn("testing", new java.io.IOException("just kidding!"));
}
}
| 8,326 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/log4j/ChukwaTaskLogAppender.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.log4j;
import org.apache.hadoop.chukwa.datacollection.controller.ChukwaAgentController;
import org.apache.hadoop.chukwa.datacollection.controller.ClientFinalizer;
import org.apache.hadoop.chukwa.util.RecordConstants;
import org.apache.log4j.Logger;
import org.apache.log4j.spi.LoggingEvent;
public class ChukwaTaskLogAppender extends
org.apache.hadoop.mapred.TaskLogAppender {
static Logger log = Logger.getLogger(ChukwaTaskLogAppender.class);
static final String adaptorType = ChukwaAgentController.CharFileTailUTF8NewLineEscaped;
ChukwaAgentController chukwaClient;
String recordType = null;
static boolean chukwaClientIsNull = true;
static final Object chukwaLock = new Object();
private ClientFinalizer clientFinalizer = null;
public String getRecordType() {
if (recordType != null)
return recordType;
else
return "unknown";
}
public void setRecordType(String recordType) {
this.recordType = recordType;
}
public void subAppend(LoggingEvent event) {
this.qw.write(RecordConstants.escapeAllButLastRecordSeparator("\n",this.layout.format(event)));
// Make sure only one thread can do this
// and use the boolean to avoid the first level locking
if (chukwaClientIsNull) {
synchronized (chukwaLock) {
if (chukwaClient == null) {
String log4jFileName = getFile();
String recordType = getRecordType();
long currentLength = 0L;
chukwaClient = new ChukwaAgentController();
chukwaClientIsNull = false;
String adaptorID = chukwaClient.add(ChukwaAgentController.CharFileTailUTF8NewLineEscaped,
recordType,currentLength + " " + log4jFileName, currentLength);
// Setup a shutdownHook for the controller
clientFinalizer = new ClientFinalizer(chukwaClient);
Runtime.getRuntime().addShutdownHook(clientFinalizer);
if (adaptorID != null) {
log.debug("Added file tailing adaptor to chukwa agent for file "
+ log4jFileName + ", adaptorId:" + adaptorID
+ " using this recordType :" + recordType
+ ", starting at offset:" + currentLength);
} else {
log.debug("Chukwa adaptor not added, addFile(" + log4jFileName
+ ") returned, current offset:" + currentLength);
}
}
}
}
}
}
| 8,327 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/log4j/Log4jMetricsSink.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.log4j;
import org.json.simple.JSONObject;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.apache.commons.configuration.SubsetConfiguration;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsSink;
import org.apache.hadoop.metrics2.MetricsTag;
public class Log4jMetricsSink implements MetricsSink {
/* Configuration attribute names */
private static final String HOST_PROPERTY = "host";
private static final String PORT_PROPERTY = "port";
private static final String TIMESTAMP = "timestamp";
private static String CONTEXT = "context";
private static final String CONTEXT_NAME = "contextName";
private static final String RECORD_NAME = "recordName";
protected String context = "HadoopMetrics";
protected String host = "localhost";
protected int port = 9095;
protected static Logger out = null;
@Override
public void init(SubsetConfiguration conf) {
String host = conf.getString(HOST_PROPERTY);
if (host != null) {
this.host = host;
}
String port = conf.getString(PORT_PROPERTY);
if (port != null) {
this.port = Integer.parseInt(port);
}
String context = conf.getString(CONTEXT);
if (context != null) {
this.context = context;
}
PatternLayout layout = new PatternLayout("%d{ISO8601} %p %c: %m%n");
org.apache.log4j.net.SocketAppender appender = new org.apache.log4j.net.SocketAppender(this.host, this.port);
appender.setName("chukwa.metrics." + this.context);
appender.setLayout(layout);
Logger logger = Logger.getLogger("chukwa.metrics." + this.context);
logger.setAdditivity(false);
logger.addAppender(appender);
appender.activateOptions();
out = logger;
}
@Override
@SuppressWarnings("unchecked")
public void putMetrics(MetricsRecord record) {
JSONObject json = new JSONObject();
json.put(TIMESTAMP, Long.valueOf(record.timestamp()));
json.put(CONTEXT_NAME, record.context());
json.put(RECORD_NAME, record.name());
for (MetricsTag tag : record.tags()) {
json.put(tag.name(), tag.value());
}
for (AbstractMetric metric : record.metrics()) {
json.put(metric.name(), metric.value());
}
out.info(json);
}
@Override
public void flush() {
}
}
| 8,328 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/log4j/ChukwaDailyRollingFileAppender.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Portions copyright (C) The Apache Software Foundation. All rights reserved.
*
* This software is published under the terms of the Apache Software
* License version 1.1, a copy of which has been included with this
* distribution in the LICENSE.txt file. */
package org.apache.hadoop.chukwa.inputtools.log4j;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collections;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.List;
import java.util.Locale;
import java.util.TimeZone;
import java.util.regex.Pattern;
import org.apache.hadoop.chukwa.datacollection.controller.ChukwaAgentController;
import org.apache.hadoop.chukwa.datacollection.controller.ClientFinalizer;
import org.apache.hadoop.chukwa.util.AdaptorNamingUtils;
import org.apache.hadoop.chukwa.util.RecordConstants;
import org.apache.log4j.FileAppender;
import org.apache.log4j.Layout;
import org.apache.log4j.Logger;
import org.apache.log4j.helpers.LogLog;
import org.apache.log4j.spi.LoggingEvent;
/**
ChukwaDailyRollingFileAppender is a slightly modified version of
DailyRollingFileAppender, with modified versions of its
<code>subAppend()</code> and <code>rollOver()</code> functions.
We would have preferred to sub-class DailyRollingFileAppender but
its authors clearly did not intend that to be a viable option since
they made too much of the class private or package-private
DailyRollingFileAppender extends {@link FileAppender} so that the
underlying file is rolled over at a user chosen frequency.
<p>The rolling schedule is specified by the <b>DatePattern</b>
option. This pattern should follow the {@link SimpleDateFormat}
conventions. In particular, you <em>must</em> escape literal text
within a pair of single quotes. A formatted version of the date
pattern is used as the suffix for the rolled file name.</p>
<p>For example, if the <b>File</b> option is set to
<code>/foo/bar.log</code> and the <b>DatePattern</b> set to
<code>'.'yyyy-MM-dd</code>, on 2001-02-16 at midnight, the logging
file <code>/foo/bar.log</code> will be copied to
<code>/foo/bar.log.2001-02-16</code> and logging for 2001-02-17
will continue in <code>/foo/bar.log</code> until it rolls over
the next day.</p>
<p>Is is possible to specify monthly, weekly, half-daily, daily,
hourly, or minutely rollover schedules.</p>
<table border="1" cellpadding="2" summary="">
<tr>
<th>DatePattern</th>
<th>Rollover schedule</th>
<th>Example</th></tr>
<tr>
<td><code>'.'yyyy-MM</code>
<td>Rollover at the beginning of each month</td>
<td>At midnight of May 31st, 2002 <code>/foo/bar.log</code> will be
copied to <code>/foo/bar.log.2002-05</code>. Logging for the month
of June will be output to <code>/foo/bar.log</code> until it is
also rolled over the next month.</td></tr>
<tr>
<td><code>'.'yyyy-ww</code>
<td>Rollover at the first day of each week. The first day of the
week depends on the locale.</td>
<td>Assuming the first day of the week is Sunday, on Saturday
midnight, June 9th 2002, the file <i>/foo/bar.log</i> will be
copied to <i>/foo/bar.log.2002-23</i>. Logging for the 24th week
of 2002 will be output to <code>/foo/bar.log</code> until it is
rolled over the next week.</td></tr>
<tr>
<td><code>'.'yyyy-MM-dd</code>
<td>Rollover at midnight each day.</td>
<td>At midnight, on March 8th, 2002, <code>/foo/bar.log</code> will
be copied to <code>/foo/bar.log.2002-03-08</code>. Logging for the
9th day of March will be output to <code>/foo/bar.log</code> until
it is rolled over the next day.</td></tr>
<tr>
<td><code>'.'yyyy-MM-dd-a</code>
<td>Rollover at midnight and midday of each day.</td>
<td>At noon, on March 9th, 2002, <code>/foo/bar.log</code> will be
copied to <code>/foo/bar.log.2002-03-09-AM</code>. Logging for the
afternoon of the 9th will be output to <code>/foo/bar.log</code>
until it is rolled over at midnight.</td></tr>
<tr>
<td><code>'.'yyyy-MM-dd-HH</code>
<td>Rollover at the top of every hour.</td>
<td>At approximately 11:00.000 o'clock on March 9th, 2002,
<code>/foo/bar.log</code> will be copied to
<code>/foo/bar.log.2002-03-09-10</code>. Logging for the 11th hour
of the 9th of March will be output to <code>/foo/bar.log</code>
until it is rolled over at the beginning of the next hour.</td></tr>
<tr>
<td><code>'.'yyyy-MM-dd-HH-mm</code></td>
<td>Rollover at the beginning of every minute.</td>
<td>At approximately 11:23,000, on March 9th, 2001,
<code>/foo/bar.log</code> will be copied to
<code>/foo/bar.log.2001-03-09-10-22</code>. Logging for the minute
of 11:23 (9th of March) will be output to
<code>/foo/bar.log</code> until it is rolled over the next minute.</td></tr>
</table>
<p>Do not use the colon ":" character in anywhere in the
<b>DatePattern</b> option. The text before the colon is interpeted
as the protocol specificaion of a URL which is probably not what
you want.</p> */
public class ChukwaDailyRollingFileAppender extends FileAppender {
static Logger log = Logger.getLogger(ChukwaDailyRollingFileAppender.class);
// The code assumes that the following constants are in a increasing
// sequence.
static final int TOP_OF_TROUBLE = -1;
static final int TOP_OF_MINUTE = 0;
static final int TOP_OF_HOUR = 1;
static final int HALF_DAY = 2;
static final int TOP_OF_DAY = 3;
static final int TOP_OF_WEEK = 4;
static final int TOP_OF_MONTH = 5;
static final String adaptorType = ChukwaAgentController.CharFileTailUTF8NewLineEscaped;
static final Object lock = new Object();
static String lastRotation = "";
/**
* The date pattern. By default, the pattern is set to "'.'yyyy-MM-dd" meaning
* daily rollover.
*/
private String datePattern = "'.'yyyy-MM-dd";
/**
* The log file will be renamed to the value of the scheduledFilename variable
* when the next interval is entered. For example, if the rollover period is
* one hour, the log file will be renamed to the value of "scheduledFilename"
* at the beginning of the next hour.
*
* The precise time when a rollover occurs depends on logging activity.
*/
private String scheduledFilename;
/**
* The next time we estimate a rollover should occur.
*/
private long nextCheck = System.currentTimeMillis() - 1;
/**
* Regex to select log files to be deleted
*/
private String cleanUpRegex = null;
/**
* Set the maximum number of backup files to keep around.
*/
private int maxBackupIndex = 10;
private ClientFinalizer clientFinalizer = null;
boolean hasBeenActivated = false;
Date now = new Date();
SimpleDateFormat sdf;
RollingCalendar rc = new RollingCalendar();
int checkPeriod = TOP_OF_TROUBLE;
ChukwaAgentController chukwaClient;
boolean chukwaClientIsNull = true;
static final Object chukwaLock = new Object();
String chukwaClientHostname;
int chukwaClientPortNum;
long chukwaClientConnectNumRetry;
long chukwaClientConnectRetryInterval;
String recordType;
// The gmtTimeZone is used only in computeCheckPeriod() method.
static final TimeZone gmtTimeZone = TimeZone.getTimeZone("GMT");
/**
* The default constructor does nothing.
* @throws IOException if constructor initialization error
*/
public ChukwaDailyRollingFileAppender() throws IOException {
super();
}
/**
Instantiate a <code>DailyRollingFileAppender</code> and open the
file designated by <code>filename</code>. The opened filename will
become the output destination for this appender.
* @param layout is logging layout
* @param filename is the filename to write logs
* @param datePattern is the date pattern of log suffix
* @throws IOException if constructor initialization error
*/
public ChukwaDailyRollingFileAppender(Layout layout, String filename,
String datePattern) throws IOException {
super(layout, filename, true);
System.out
.println("Daily Rolling File Appender successfully registered file with agent: "
+ filename);
this.datePattern = datePattern;
}
/**
* The <b>DatePattern</b> takes a string in the same format as expected by
* {@link SimpleDateFormat}. This options determines the rollover schedule.
* @param pattern is date formatting pattern
*/
public void setDatePattern(String pattern) {
datePattern = pattern;
}
/** Returns the value of the <b>DatePattern</b> option.
* @return date pattern
*/
public String getDatePattern() {
return datePattern;
}
public String getRecordType() {
if (recordType != null)
return recordType;
else
return "unknown";
}
public void setRecordType(String recordType) {
this.recordType = recordType;
}
public void activateOptions() {
// Prevent early initialisation
if (!hasBeenActivated)
{ return;}
super.activateOptions();
if (datePattern != null && fileName != null) {
now.setTime(System.currentTimeMillis());
sdf = new SimpleDateFormat(datePattern);
int type = computeCheckPeriod();
printPeriodicity(type);
rc.setType(type);
File file = new File(fileName);
scheduledFilename = fileName + sdf.format(new Date(file.lastModified()));
} else {
LogLog
.error("Either File or DatePattern options are not set for appender ["
+ name + "].");
}
}
void printPeriodicity(int type) {
switch (type) {
case TOP_OF_MINUTE:
LogLog.debug("Appender [" + name + "] to be rolled every minute.");
break;
case TOP_OF_HOUR:
LogLog
.debug("Appender [" + name + "] to be rolled on top of every hour.");
break;
case HALF_DAY:
LogLog.debug("Appender [" + name
+ "] to be rolled at midday and midnight.");
break;
case TOP_OF_DAY:
LogLog.debug("Appender [" + name + "] to be rolled at midnight.");
break;
case TOP_OF_WEEK:
LogLog.debug("Appender [" + name + "] to be rolled at start of week.");
break;
case TOP_OF_MONTH:
LogLog.debug("Appender [" + name
+ "] to be rolled at start of every month.");
break;
default:
LogLog.warn("Unknown periodicity for appender [" + name + "].");
}
}
// This method computes the roll over period by looping over the
// periods, starting with the shortest, and stopping when the r0 is
// different from from r1, where r0 is the epoch formatted according
// the datePattern (supplied by the user) and r1 is the
// epoch+nextMillis(i) formatted according to datePattern. All date
// formatting is done in GMT and not local format because the test
// logic is based on comparisons relative to 1970-01-01 00:00:00
// GMT (the epoch).
int computeCheckPeriod() {
RollingCalendar rollingCalendar = new RollingCalendar(gmtTimeZone,
Locale.ENGLISH);
// set sate to 1970-01-01 00:00:00 GMT
Date epoch = new Date(0);
if (datePattern != null) {
for (int i = TOP_OF_MINUTE; i <= TOP_OF_MONTH; i++) {
SimpleDateFormat simpleDateFormat = new SimpleDateFormat(datePattern);
simpleDateFormat.setTimeZone(gmtTimeZone); // do all date formatting in
// GMT
String r0 = simpleDateFormat.format(epoch);
rollingCalendar.setType(i);
Date next = new Date(rollingCalendar.getNextCheckMillis(epoch));
String r1 = simpleDateFormat.format(next);
// System.out.println("Type = "+i+", r0 = "+r0+", r1 = "+r1);
if (r0 != null && r1 != null && !r0.equals(r1)) {
return i;
}
}
}
return TOP_OF_TROUBLE; // Deliberately head for trouble...
}
/**
* Rollover the current file to a new file.
*/
void rollOver() throws IOException {
/* Compute filename, but only if datePattern is specified */
if (datePattern == null) {
errorHandler.error("Missing DatePattern option in rollOver().");
return;
}
String datedFilename = fileName + sdf.format(now);
// It is too early to roll over because we are still within the
// bounds of the current interval. Rollover will occur once the
// next interval is reached.
if (scheduledFilename.equals(datedFilename)) {
return;
}
// close current file, and rename it to datedFilename
this.closeFile();
File target = new File(scheduledFilename);
if (target.exists()) {
if(!target.delete()) {
LogLog.warn("Unable to remove: "+target.getAbsolutePath());
};
}
File file = new File(fileName);
boolean result = file.renameTo(target);
if (result) {
LogLog.debug(fileName + " -> " + scheduledFilename);
} else {
LogLog.error("Failed to rename [" + fileName + "] to ["
+ scheduledFilename + "].");
}
try {
// This will also close the file. This is OK since multiple
// close operations are safe.
this.setFile(fileName, false, this.bufferedIO, this.bufferSize);
} catch (IOException e) {
errorHandler.error("setFile(" + fileName + ", false) call failed.");
}
scheduledFilename = datedFilename;
cleanUp();
}
public synchronized String getCleanUpRegex() {
return cleanUpRegex;
}
protected synchronized void setCleanUpRegex(String cleanUpRegex) {
this.cleanUpRegex = cleanUpRegex;
}
public int getMaxBackupIndex() {
return maxBackupIndex;
}
public void setMaxBackupIndex(int maxBackupIndex) {
this.maxBackupIndex = maxBackupIndex;
}
protected synchronized void cleanUp() {
String regex = "";
try {
File actualFile = new File(fileName);
String directoryName = actualFile.getParent();
String actualFileName = actualFile.getName();
File dirList = new File(directoryName);
if (cleanUpRegex == null || !cleanUpRegex.contains("$fileName")) {
LogLog
.error("cleanUpRegex == null || !cleanUpRegex.contains(\"$fileName\")");
return;
}
regex = cleanUpRegex.replace("$fileName", actualFileName);
String[] dirFiles = dirList.list(new LogFilter(actualFileName, regex));
List<String> files = new ArrayList<String>();
if(dirFiles!=null) {
for (String file : dirFiles) {
files.add(file);
}
}
Collections.sort(files);
while (files.size() > maxBackupIndex) {
String file = files.remove(0);
File f = new File(directoryName + "/" + file);
if(!f.delete()) {
LogLog.warn("Cannot remove: " + file);
}
}
} catch (Exception e) {
errorHandler
.error("cleanUp(" + fileName + "," + regex + ") call failed.");
}
}
private static class LogFilter implements FilenameFilter {
private Pattern p = null;
private String logFile = null;
public LogFilter(String logFile, String regex) {
this.logFile = logFile;
p = Pattern.compile(regex);
}
@Override
public boolean accept(File dir, String name) {
// ignore current log file
if (name.intern() == this.logFile.intern()) {
return false;
}
// ignore file without the same prefix
if (!name.startsWith(logFile)) {
return false;
}
return p.matcher(name).find();
}
}
/**
* Fix for late-initialisation
*/
@Override
protected boolean checkEntryConditions() {
synchronized(chukwaLock) {
if (!hasBeenActivated) {
hasBeenActivated = true;
activateOptions();
}
}
return super.checkEntryConditions();
}
/**
* This method differentiates DailyRollingFileAppender from its super class.
*
* <p>Before actually logging, this method will check whether it is time to do
* a rollover. If it is, it will schedule the next rollover time and then
* rollover.
* */
protected void subAppend(LoggingEvent event) {
try {
// we set up the chukwa adaptor here because this is the first
// point which is called after all setters have been called with
// their values from the log4j.properties file, in particular we
// needed to give setCukwaClientPortNum() and -Hostname() a shot
// Make sure only one thread can do this
// and use the boolean to avoid the first level locking
if (chukwaClientIsNull) {
synchronized (chukwaLock) {
String log4jFileName = getFile();
String recordType = getRecordType();
long currentLength = 0L;
try {
File fooLog = new File(log4jFileName);
log4jFileName = fooLog.getAbsolutePath();
currentLength = fooLog.length();
} catch (Throwable e) {
log.warn("Exception while trying to get current file size for " + log4jFileName);
currentLength = 0L;
}
if (chukwaClient == null) {
if (getChukwaClientHostname() != null
&& getChukwaClientPortNum() != 0) {
chukwaClient = new ChukwaAgentController(
getChukwaClientHostname(), getChukwaClientPortNum());
log.debug("setup adaptor with hostname "
+ getChukwaClientHostname() + " and portnum "
+ getChukwaClientPortNum());
} else {
chukwaClient = new ChukwaAgentController();
log
.debug("setup adaptor with no args, which means it used its defaults");
}
chukwaClientIsNull = false;
// Watchdog is watching for ChukwaAgent only once every 5 minutes,
// so there's no point in retrying more than once every 5 mins.
// In practice, if the watchdog is not able to automatically restart
// the agent, it will take more than 20 minutes to get Ops to
// restart it.
// Also its a good to limit the number of communications between
// Hadoop and Chukwa, that's why 30 minutes.
long retryInterval = chukwaClientConnectRetryInterval;
if (retryInterval == 0) {
retryInterval = 1000 * 60 * 30;
}
long numRetries = chukwaClientConnectNumRetry;
if (numRetries == 0) {
numRetries = 48;
}
String name = AdaptorNamingUtils.synthesizeAdaptorID
(ChukwaAgentController.CharFileTailUTF8NewLineEscaped, recordType, log4jFileName);
String adaptorID = chukwaClient.addByName(name, ChukwaAgentController.CharFileTailUTF8NewLineEscaped,
recordType,currentLength + " " + log4jFileName, currentLength,
numRetries, retryInterval);
// Setup a shutdownHook for the controller
clientFinalizer = new ClientFinalizer(chukwaClient);
Runtime.getRuntime().addShutdownHook(clientFinalizer);
if (adaptorID != null) {
log.debug("Added file tailing adaptor to chukwa agent for file "
+ log4jFileName + ", adaptorId:" + adaptorID
+ " using this recordType :" + recordType
+ ", starting at offset:" + currentLength);
} else {
log.debug("Chukwa adaptor not added, addFile(" + log4jFileName
+ ") returned, current offset: " + currentLength);
}
}
}
}
long n = System.currentTimeMillis();
if (n >= nextCheck) {
now.setTime(n);
nextCheck = rc.getNextCheckMillis(now);
try {
rollOver();
} catch (IOException ioe) {
LogLog.error("rollOver() failed.", ioe);
}
}
boolean written = false;
if(layout.ignoresThrowable()) {
String[] s = event.getThrowableStrRep();
if (s != null) {
int len = s.length;
StringBuilder sb = new StringBuilder();
sb.append(this.layout.format(event));
for(int i = 0; i < len; i++) {
sb.append(s[i]).append("\n");
}
//escape the newlines from record bodies, exception and then write this record to the log file
written = true;
this.qw.write(RecordConstants.escapeAllButLastRecordSeparator("\n",sb.toString()));
}
}
if (!written) {
//escape the newlines from record bodies and then write this record to the log file
this.qw.write(RecordConstants.escapeAllButLastRecordSeparator("\n",this.layout.format(event)));
}
if (this.immediateFlush) {
this.qw.flush();
}
} catch (Throwable e) {
System.err.println("Exception in ChukwaRollingAppender: "
+ e.getMessage());
e.printStackTrace();
}
}
public String getChukwaClientHostname() {
return chukwaClientHostname;
}
public void setChukwaClientHostname(String chukwaClientHostname) {
this.chukwaClientHostname = chukwaClientHostname;
}
public int getChukwaClientPortNum() {
return chukwaClientPortNum;
}
public void setChukwaClientPortNum(int chukwaClientPortNum) {
this.chukwaClientPortNum = chukwaClientPortNum;
}
public void setChukwaClientConnectNumRetry(int i) {
this.chukwaClientConnectNumRetry = i;
}
public void setChukwaClientConnectRetryInterval(long i) {
this.chukwaClientConnectRetryInterval = i;
}
}
/**
* RollingCalendar is a helper class to DailyRollingFileAppender. Given a
* periodicity type and the current time, it computes the start of the next
* interval.
* */
class RollingCalendar extends GregorianCalendar {
/**
*
*/
private static final long serialVersionUID = 2153481574198792767L;
int type = ChukwaDailyRollingFileAppender.TOP_OF_TROUBLE;
RollingCalendar() {
super();
}
RollingCalendar(TimeZone tz, Locale locale) {
super(tz, locale);
}
void setType(int type) {
this.type = type;
}
public long getNextCheckMillis(Date now) {
return getNextCheckDate(now).getTime();
}
public Date getNextCheckDate(Date now) {
this.setTime(now);
switch (type) {
case ChukwaDailyRollingFileAppender.TOP_OF_MINUTE:
this.set(Calendar.SECOND, 0);
this.set(Calendar.MILLISECOND, 0);
this.add(Calendar.MINUTE, 1);
break;
case ChukwaDailyRollingFileAppender.TOP_OF_HOUR:
this.set(Calendar.MINUTE, 0);
this.set(Calendar.SECOND, 0);
this.set(Calendar.MILLISECOND, 0);
this.add(Calendar.HOUR_OF_DAY, 1);
break;
case ChukwaDailyRollingFileAppender.HALF_DAY:
this.set(Calendar.MINUTE, 0);
this.set(Calendar.SECOND, 0);
this.set(Calendar.MILLISECOND, 0);
int hour = get(Calendar.HOUR_OF_DAY);
if (hour < 12) {
this.set(Calendar.HOUR_OF_DAY, 12);
} else {
this.set(Calendar.HOUR_OF_DAY, 0);
this.add(Calendar.DAY_OF_MONTH, 1);
}
break;
case ChukwaDailyRollingFileAppender.TOP_OF_DAY:
this.set(Calendar.HOUR_OF_DAY, 0);
this.set(Calendar.MINUTE, 0);
this.set(Calendar.SECOND, 0);
this.set(Calendar.MILLISECOND, 0);
this.add(Calendar.DATE, 1);
break;
case ChukwaDailyRollingFileAppender.TOP_OF_WEEK:
this.set(Calendar.DAY_OF_WEEK, getFirstDayOfWeek());
this.set(Calendar.HOUR_OF_DAY, 0);
this.set(Calendar.SECOND, 0);
this.set(Calendar.MILLISECOND, 0);
this.add(Calendar.WEEK_OF_YEAR, 1);
break;
case ChukwaDailyRollingFileAppender.TOP_OF_MONTH:
this.set(Calendar.DATE, 1);
this.set(Calendar.HOUR_OF_DAY, 0);
this.set(Calendar.SECOND, 0);
this.set(Calendar.MILLISECOND, 0);
this.add(Calendar.MONTH, 1);
break;
default:
throw new IllegalStateException("Unknown periodicity type.");
}
return getTime();
}
@Override
public boolean equals(Object o) {
return super.equals(o);
}
@Override
public int hashCode() {
return super.hashCode();
}
}
| 8,329 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/log4j/Log4JMetricsContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.log4j;
/**
* Log4JMetricsContext is a plugin for reporting Hadoop Metrics through
* syslog protocol. Usage:
*
* Copy hadoop-metrics.properties file from CHUKWA_HOME/conf to HADOOP_HOME/conf.
* Copy chukwa-hadoop-*-client.jar and json.jar to HADOOP_HOME/lib
*
*/
import org.apache.hadoop.metrics.ContextFactory;
import org.apache.hadoop.metrics.MetricsException;
import org.apache.hadoop.metrics.spi.AbstractMetricsContext;
import org.apache.hadoop.metrics.spi.OutputRecord;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.json.simple.JSONObject;
import java.util.TreeMap;
import java.util.Map;
import java.util.Collection;
import java.io.IOException;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
public class Log4JMetricsContext extends AbstractMetricsContext {
Logger log = Logger.getLogger(Log4JMetricsContext.class);
Logger out = null;
static final Object lock = new Object();
/* Configuration attribute names */
protected static final String PERIOD_PROPERTY = "period";
protected static final String HOST_PROPERTY = "host";
protected static final String PORT_PROPERTY = "port";
protected int period = 0;
protected String host = "localhost";
protected int port = 9095;
/** Creates a new instance of MetricsContext */
public Log4JMetricsContext() {
}
public void init(String contextName, ContextFactory factory) {
super.init(contextName, factory);
String periodStr = getAttribute(PERIOD_PROPERTY);
if (periodStr != null) {
int period = 0;
try {
period = Integer.parseInt(periodStr);
} catch (NumberFormatException nfe) {
log.debug(ExceptionUtil.getStackTrace(nfe));
}
if (period <= 0) {
throw new MetricsException("Invalid period: " + periodStr);
}
setPeriod(period);
this.period = period;
log.info("Log4JMetricsContext." + contextName + ".period=" + period);
}
String host = getAttribute(HOST_PROPERTY);
if (host != null) {
this.host = host;
}
String port = getAttribute(PORT_PROPERTY);
if (port != null) {
this.port = Integer.parseInt(port);
}
}
@Override
protected void emitRecord(String contextName, String recordName,
OutputRecord outRec) throws IOException {
synchronized (lock) {
if (out == null) {
PatternLayout layout = new PatternLayout("%d{ISO8601} %p %c: %m%n");
org.apache.log4j.net.SocketAppender appender = new org.apache.log4j.net.SocketAppender(host, port);
appender.setName("chukwa.metrics." + contextName);
appender.setLayout(layout);
Logger logger = Logger.getLogger("chukwa.metrics." + contextName);
logger.setAdditivity(false);
logger.addAppender(appender);
appender.activateOptions();
out = logger;
}
JSONObject json = new JSONObject();
try {
json.put("contextName", contextName);
json.put("recordName", recordName);
json.put("timestamp", System.currentTimeMillis());
json.put("period", period);
for (String tagName : outRec.getTagNames()) {
json.put(tagName, outRec.getTag(tagName));
}
for (String metricName : outRec.getMetricNames()) {
json.put(metricName, outRec.getMetric(metricName));
}
} catch (Exception e) {
log.warn("exception in Log4jMetricsContext:" , e);
}
out.info(json.toString());
}
}
@Override
public synchronized Map<String, Collection<OutputRecord>> getAllRecords() {
Map<String, Collection<OutputRecord>> out = new TreeMap<String, Collection<OutputRecord>>();
/* for (String recordName : bufferedData.keySet()) {
RecordMap recordMap = bufferedData.get(recordName);
synchronized (recordMap) {
List<OutputRecord> records = new ArrayList<OutputRecord>();
Set<Entry<TagMap, MetricMap>> entrySet = recordMap.entrySet();
for (Entry<TagMap, MetricMap> entry : entrySet) {
OutputRecord outRec = new OutputRecord(entry.getKey(), entry.getValue());
records.add(outRec);
}
out.put(recordName, records);
}
}*/
return out;
}
}
| 8,330 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/hdfsusage/HDFSUsagePlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.hdfsusage;
import java.net.URI;
import org.apache.hadoop.chukwa.inputtools.jplugin.ChukwaMetricsList;
import org.apache.hadoop.chukwa.inputtools.jplugin.JPlugin;
import org.apache.hadoop.chukwa.inputtools.jplugin.JPluginStatusMetricsList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class HDFSUsagePlugin implements JPlugin<HDFSUsageMetrics> {
private FileSystem hdfs;
private String path;
private OrgChart chart;
@Override
public ChukwaMetricsList<HDFSUsageMetrics> getMetrics() throws Throwable {
ChukwaMetricsList<HDFSUsageMetrics> metricsList = new ChukwaMetricsList<HDFSUsageMetrics>(getRecordType());
FileStatus status[] = hdfs.globStatus(new Path(path));
for(int i=0; i<status.length; i++) {
long totalSize = hdfs.getContentSummary(status[i].getPath()).getLength();
if(totalSize <= 0) {
continue;
}
String name = status[i].getPath().getName();
HDFSUsageMetrics usage = new HDFSUsageMetrics();
usage.setName(name);
usage.setSize(totalSize);
usage.setLastModified(status[i].getModificationTime());
metricsList.addMetrics(usage);
// also contribute to manager's usage
if(chart != null) {
Employee employee = chart.get(name);
if(employee != null) {
employee = employee.getManager();
while(employee != null) {
HDFSUsageMetrics managerUsage = new HDFSUsageMetrics();
managerUsage.setName(employee.getId());
managerUsage.setSize(totalSize);
metricsList.addMetrics(managerUsage);
employee = employee.getManager();
}
}
}
}
return metricsList;
}
@Override
public void init(String[] args) throws Throwable {
for(int i=0; i<args.length; i++) {
if(args[i].equals("-c")) {
String orgChartClass = args[i+1];
chart = (OrgChart) Class.forName(orgChartClass).newInstance();
i++;
} else if(args[i].equals("-h")) {
Configuration conf = new Configuration();
hdfs = FileSystem.get(new URI(args[i+1]), conf);
i++;
} else if(args[i].equals("-p")) {
path = args[i+1];
i++;
}
}
if(hdfs == null) {
Configuration conf = new Configuration();
hdfs = FileSystem.get(conf);
}
if(path == null) {
path = "/user/*";
}
}
@Override
public JPluginStatusMetricsList getStatus() throws Throwable {
JPluginStatusMetricsList list = new JPluginStatusMetricsList(this.getClass().getSimpleName());
list.addStatus("hdfs", hdfs.getUri().toString());
list.addStatus("path", path);
return null;
}
@Override
public String getRecordType() {
return "HDFSUsage";
}
}
| 8,331 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/hdfsusage/HDFSUsageMetricsList.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.hdfsusage;
import org.apache.hadoop.chukwa.inputtools.jplugin.ChukwaMetricsList;
public class HDFSUsageMetricsList extends ChukwaMetricsList<HDFSUsageMetrics> {
public HDFSUsageMetricsList() {
super("HDFSUsage");
}
}
| 8,332 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/hdfsusage/HDFSUsageMetrics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.hdfsusage;
import java.util.Date;
import java.util.HashMap;
import org.apache.hadoop.chukwa.inputtools.jplugin.ChukwaMetrics;
public class HDFSUsageMetrics implements ChukwaMetrics {
private String name = null;
private Long size;
private long lastModified;
@Override
public String getKey() {
return getName();
}
public void setName(String name) {
this.name = name;
}
public String getName() {
return name;
}
public void setSize(Long size) {
this.size = size;
}
public Long getSize() {
return size;
}
@Override
public HashMap<String, String> getAttributes() {
HashMap<String, String> attr = new HashMap<String, String>();
attr.put("user", name);
attr.put("bytes", size.toString());
attr.put("timestamp", new Date().getTime() + "");
return attr;
}
public void setLastModified(long lastModified) {
this.lastModified = lastModified;
}
public long getLastModified() {
return lastModified;
}
}
| 8,333 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/hdfsusage/OrgChart.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.hdfsusage;
public interface OrgChart {
Employee get(String id);
}
| 8,334 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/inputtools/hdfsusage/Employee.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.hdfsusage;
public class Employee {
private String id;
private Employee manager;
public void setId(String id) {
this.id = id;
}
public String getId() {
return id;
}
public void setManager(Employee manager) {
this.manager = manager;
}
public Employee getManager() {
return manager;
}
}
| 8,335 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/conf/ChukwaConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.conf;
import java.io.File;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.Logger;
public class ChukwaConfiguration extends Configuration {
static Logger log = Logger.getLogger(ChukwaConfiguration.class);
private String chukwaHome, chukwaConf;
public String getChukwaHome() {
return chukwaHome;
}
public String getChukwaConf() {
return chukwaConf;
}
public ChukwaConfiguration() {
this(true);
}
public ChukwaConfiguration(boolean loadDefaults) {
super();
if (loadDefaults) {
chukwaHome = System.getenv("CHUKWA_HOME");
if (chukwaHome == null) {
chukwaHome = ".";
}
if (!chukwaHome.endsWith("/")) {
chukwaHome = chukwaHome + File.separator;
}
chukwaConf = System.getenv("CHUKWA_CONF_DIR");
if (chukwaConf == null) {
chukwaConf = chukwaHome + "conf" + File.separator;
}
log.info("chukwaConf is " + chukwaConf);
super.addResource(new Path(chukwaConf + "/chukwa-common.xml"));
log.debug("added chukwa-agent-conf.xml to ChukwaConfiguration");
super.addResource(new Path(chukwaConf + "/chukwa-agent-conf.xml"));
log.debug("added chukwa-agent-conf.xml to ChukwaConfiguration");
super.addResource(new Path(chukwaConf + "/chukwa-collector-conf.xml"));
log.debug("added chukwa-collector-conf.xml to ChukwaConfiguration");
super.addResource(new Path(chukwaConf + "/chukwa-demux-conf.xml"));
log.debug("added chukwa-demux-conf.xml to ChukwaConfiguration");
}
}
}
| 8,336 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/CHUKWA_CONSTANT.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction;
public interface CHUKWA_CONSTANT {
public static final String HDFS_DEFAULT_NAME_FIELD = "fs.defaultFS";
public static final String WRITER_HDFS_FILESYSTEM_FIELD = "writer.hdfs.filesystem";
public static final String CHUKWA_ROOT_DIR_FIELD = "chukwaRootDir";
public static final String CHUKWA_ROOT_REPOS_DIR_FIELD = "chukwaRootReposDir";
//This is the INPUT directory for archiving; defaults to /chukwa/logs
public static final String CHUKWA_ARCHIVE_DIR_FIELD = "chukwaArchiveDir";
public static final String CHUKWA_POST_PROCESS_DIR_FIELD = "chukwaPostProcessDir";
public static final String CHUKWA_POSTPROCESS_IN_ERROR_DIR_FIELD = "chukwaPostProcessInErrorDir";
public static final String CHUKWA_DATA_SINK_DIR_FIELD = "chukwaDataSinkDir";
public static final String CHUKWA_NAGIOS_HOST_FIELD = "demux.nagiosHost";
public static final String CHUKWA_NAGIOS_PORT_FIELD = "demux.nagiosPort";
public static final String CHUKWA_REPORTING_HOST_FIELD = "demux.reportingHost4Nagios";
public static final String CHUKWA_POSTPROCESS_MAX_ERROR_COUNT_FIELD = "post.process.max.error.count.before.shutdown";
public static final String CHUKWA_ARCHIVE_MAX_ERROR_COUNT_FIELD = "archive.max.error.count.before.shutdown";
public static final String CHUKWA_DEMUX_MAX_ERROR_COUNT_FIELD = "demux.max.error.count.before.shutdown";
public static final String CHUKWA_DEMUX_REDUCER_COUNT_FIELD = "demux.reducerCount";
public static final String DEFAULT_CHUKWA_ROOT_DIR_NAME = "/chukwa/";
public static final String DEFAULT_REPOS_DIR_NAME = "repos/";
public static final String DEFAULT_CHUKWA_POSTPROCESS_DIR_NAME = "postProcess/";
public static final String DEFAULT_POSTPROCESS_IN_ERROR_DIR_NAME = "postProcessInError/";
public static final String DEFAULT_CHUKWA_LOGS_DIR_NAME = "logs/";
public static final String DEFAULT_DEMUX_PROCESSING_DIR_NAME = "demuxProcessing/";
public static final String DEFAULT_DEMUX_MR_OUTPUT_DIR_NAME = "mrOutput/";
public static final String DEFAULT_DEMUX_MR_INPUT_DIR_NAME = "mrInput/";
public static final String DEFAULT_DEMUX_IN_ERROR_DIR_NAME = "inError/";
public static final String DEFAULT_CHUKWA_DATASINK_DIR_NAME = "dataSinkArchives/";
public static final String DEFAULT_FINAL_ARCHIVES = "finalArchives/";
//These fields control the working dirs for the archive mapred job.
//They are not configurable at runtime.
public static final String ARCHIVES_PROCESSING_DIR_NAME = "archivesProcessing/";
public static final String ARCHIVES_MR_OUTPUT_DIR_NAME = "mrOutput/";
public static final String ARCHIVES_MR_INPUT_DIR_NAME = "mrInput/";
public static final String ARCHIVES_IN_ERROR_DIR_NAME = "inError/";
public static final String POST_DEMUX_DATA_LOADER = "chukwa.post.demux.data.loader";
public static final String INCLUDE_KEY_IN_PARTITIONER = "_";
//CHUKWA-648: Make Chukwa Reduce Type to support hierarchy format
//To support Hierarchy datatype
public static final String HIERARCHY_CONNECTOR = "-";
public static final String POST_DEMUX_SUCCESS_ACTION = "chukwa.post.demux.success.action";
}
| 8,337 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveDataTypePartitioner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.archive;
import java.text.SimpleDateFormat;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.extraction.engine.RecordUtil;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Partitioner;
public class ChukwaArchiveDataTypePartitioner<K, V> implements
Partitioner<ChukwaArchiveKey, ChunkImpl> {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy_MM_dd");
boolean useClusterID = false;
public void configure(JobConf conf) {
useClusterID = "true".equals(conf.get(ChukwaArchiveDataTypeOutputFormat.
GROUP_BY_CLUSTER_OPTION_NAME));
}
public int getPartition(ChukwaArchiveKey key, ChunkImpl chunk,
int numReduceTasks) {
if(useClusterID) {
String clusterID = RecordUtil.getClusterName(chunk);
return ((chunk.getDataType() + "_" + clusterID + "_" + sdf.format(key.getTimePartition()))
.hashCode() & Integer.MAX_VALUE)
% numReduceTasks;
} else {
return ((chunk.getDataType() + "_" + sdf.format(key.getTimePartition()))
.hashCode() & Integer.MAX_VALUE)
% numReduceTasks;
}
}
}
| 8,338 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveStreamNamePartitioner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.archive;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Partitioner;
public class ChukwaArchiveStreamNamePartitioner<K, V> implements
Partitioner<ChukwaArchiveKey, ChunkImpl> {
public void configure(JobConf arg0) {
}
public int getPartition(ChukwaArchiveKey key, ChunkImpl chunk,
int numReduceTasks) {
return ((chunk.getSource() + "/" + chunk.getStreamName()).hashCode() & Integer.MAX_VALUE)
% numReduceTasks;
}
}
| 8,339 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveStreamNameOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.archive;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat;
public class ChukwaArchiveStreamNameOutputFormat extends
MultipleSequenceFileOutputFormat<ChukwaArchiveKey, ChunkImpl> {
@Override
protected String generateLeafFileName(String name) {
return "chukwaArchive-" + super.generateLeafFileName(name);
}
}
| 8,340 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveDailyPartitioner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.archive;
import java.text.SimpleDateFormat;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Partitioner;
public class ChukwaArchiveDailyPartitioner<K, V> implements
Partitioner<ChukwaArchiveKey, ChunkImpl> {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy_MM_dd");
public void configure(JobConf arg0) {
}
public int getPartition(ChukwaArchiveKey key, ChunkImpl chunl,
int numReduceTasks) {
return (sdf.format(key.getTimePartition()).hashCode() & Integer.MAX_VALUE)
% numReduceTasks;
}
}
| 8,341 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.archive;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.text.SimpleDateFormat;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger;
public class ChukwaArchiveManager implements CHUKWA_CONSTANT {
static Logger log = Logger.getLogger(ChukwaArchiveManager.class);
SimpleDateFormat day = new java.text.SimpleDateFormat("yyyyMMdd");
static final int ONE_HOUR = 60 * 60 * 1000;
static final int ONE_DAY = 24*ONE_HOUR;
static final int MAX_FILES = 500;
private static final int DEFAULT_MAX_ERROR_COUNT = 4;
protected ChukwaConfiguration conf = null;
protected FileSystem fs = null;
protected boolean isRunning = true;
public ChukwaArchiveManager() throws Exception {
conf = new ChukwaConfiguration();
init();
}
protected void init() throws IOException, URISyntaxException {
String fsName = conf.get(HDFS_DEFAULT_NAME_FIELD);
fs = FileSystem.get(new URI(fsName), conf);
}
public static void main(String[] args) throws Exception {
ChukwaArchiveManager manager = new ChukwaArchiveManager();
manager.start();
}
public void shutdown() {
this.isRunning = false;
}
public void start() throws Exception {
String chukwaRootDir = conf.get(CHUKWA_ROOT_DIR_FIELD, DEFAULT_CHUKWA_ROOT_DIR_NAME);
if ( ! chukwaRootDir.endsWith("/") ) {
chukwaRootDir += "/";
}
log.info("chukwaRootDir:" + chukwaRootDir);
String archiveRootDir = conf.get(CHUKWA_ARCHIVE_DIR_FIELD, chukwaRootDir +DEFAULT_CHUKWA_DATASINK_DIR_NAME);
if ( ! archiveRootDir.endsWith("/") ) {
archiveRootDir += "/";
}
log.info("archiveDir:" + archiveRootDir);
Path pArchiveRootDir = new Path(archiveRootDir);
setup(pArchiveRootDir);
String archivesRootProcessingDir = chukwaRootDir + ARCHIVES_PROCESSING_DIR_NAME;
// String archivesErrorDir = archivesRootProcessingDir + DEFAULT_ARCHIVES_IN_ERROR_DIR_NAME;
String archivesMRInputDir = archivesRootProcessingDir + ARCHIVES_MR_INPUT_DIR_NAME;
String archivesMROutputDir = archivesRootProcessingDir+ ARCHIVES_MR_OUTPUT_DIR_NAME;
String finalArchiveOutput = chukwaRootDir + DEFAULT_FINAL_ARCHIVES;
int maxPermittedErrorCount = conf.getInt(CHUKWA_ARCHIVE_MAX_ERROR_COUNT_FIELD,
DEFAULT_MAX_ERROR_COUNT);
Path pDailyRawArchivesInput = new Path(archiveRootDir);
Path pArchivesMRInputDir = new Path(archivesMRInputDir);
Path pArchivesRootProcessingDir = new Path(archivesRootProcessingDir);
Path pFinalArchiveOutput = new Path(finalArchiveOutput);
if (!archivesMRInputDir.endsWith("/")) {
archivesMRInputDir +="/";
}
setup( pArchivesRootProcessingDir );
setup( pDailyRawArchivesInput );
setup( pFinalArchiveOutput );
int errorCount = 0;
long lastRun = 0l;
while (isRunning) {
try {
if (maxPermittedErrorCount != -1 && errorCount >= maxPermittedErrorCount) {
log.warn("==================\nToo many errors (" + errorCount +
"), Bail out!\n==================");
break;
}
// /chukwa/archives/<YYYYMMDD>/dataSinkDirXXX
// to
// /chukwa/archives/final/<YYYYMMDD>_<TS>
if (fs.exists(pArchivesMRInputDir)) {
FileStatus[] days = fs.listStatus(pArchivesMRInputDir);
if (days.length > 0) {
log.info("reprocessing current Archive input" + days[0].getPath());
runArchive(archivesMRInputDir + days[0].getPath().getName() + "/",archivesMROutputDir,finalArchiveOutput);
errorCount = 0;
continue;
}
}
log.info("Raw Archive dir:" + pDailyRawArchivesInput);
long now = System.currentTimeMillis();
int currentDay = Integer.parseInt(day.format(System.currentTimeMillis()));
FileStatus[] daysInRawArchiveDir = fs.listStatus(pDailyRawArchivesInput);
if (daysInRawArchiveDir.length == 0 ) {
log.debug( pDailyRawArchivesInput + " is empty, going to sleep for 1 minute");
Thread.sleep(1 * 60 * 1000);
continue;
}
// We don't want to process DataSink file more than once every 2 hours
// for current day
if (daysInRawArchiveDir.length == 1 ) {
int workingDay = Integer.parseInt(daysInRawArchiveDir[0].getPath().getName());
long nextRun = lastRun + (2*ONE_HOUR) - (1*60*1000);// 2h -1min
if (workingDay == currentDay && now < nextRun) {
log.info("lastRun < 2 hours so skip archive for now, going to sleep for 30 minutes, currentDate is:" + new java.util.Date());
Thread.sleep(30 * 60 * 1000);
continue;
}
}
String dayArchivesMRInputDir = null;
for (FileStatus fsDay : daysInRawArchiveDir) {
dayArchivesMRInputDir = archivesMRInputDir + fsDay.getPath().getName() + "/";
processDay(fsDay, dayArchivesMRInputDir,archivesMROutputDir, finalArchiveOutput);
lastRun = now;
}
}catch (Throwable e) {
errorCount ++;
e.printStackTrace();
log.warn(e);
}
}
}
public void processDay(FileStatus fsDay, String archivesMRInputDir,
String archivesMROutputDir,String finalArchiveOutput) throws Exception {
FileStatus[] dataSinkDirsInRawArchiveDir = fs.listStatus(fsDay.getPath());
long now = System.currentTimeMillis();
int currentDay = Integer.parseInt(day.format(System.currentTimeMillis()));
int workingDay = Integer.parseInt(fsDay.getPath().getName());
long oneHourAgo = now - ONE_HOUR;
if (dataSinkDirsInRawArchiveDir.length == 0 && workingDay < currentDay) {
fs.delete(fsDay.getPath(),false);
log.info("deleting raw dataSink dir for day:" + fsDay.getPath().getName());
return;
}
int fileCount = 0;
for (FileStatus fsDataSinkDir : dataSinkDirsInRawArchiveDir) {
long modificationDate = fsDataSinkDir.getModificationTime();
if (modificationDate < oneHourAgo || workingDay < currentDay) {
log.info("processDay,modificationDate:" + modificationDate +", adding: " + fsDataSinkDir.getPath() );
fileCount += fs.listStatus(fsDataSinkDir.getPath()).length;
moveDataSinkFilesToArchiveMrInput(fsDataSinkDir,archivesMRInputDir);
// process no more than MAX_FILES directories
if (fileCount >= MAX_FILES) {
log.info("processDay, reach capacity");
runArchive(archivesMRInputDir,archivesMROutputDir,finalArchiveOutput);
fileCount = 0;
} else {
log.info("processDay,modificationDate:" + modificationDate +", skipping: " + fsDataSinkDir.getPath() );
}
}
}
}
public void runArchive(String archivesMRInputDir,String archivesMROutputDir,
String finalArchiveOutput) throws Exception {
String[] args = new String[3];
args[0] = conf.get("archive.grouper","Stream");
args[1] = archivesMRInputDir + "*/*.done" ;
args[2] = archivesMROutputDir;
Path pArchivesMRInputDir = new Path(archivesMRInputDir);
Path pArchivesMROutputDir = new Path(archivesMROutputDir);
if (fs.exists(pArchivesMROutputDir)) {
log.warn("Deleteing mroutput dir for archive ...");
fs.delete(pArchivesMROutputDir, true);
}
log.info("ChukwaArchiveManager processing :" + args[1] + " going to output to " + args[2] );
int res = ToolRunner.run(this.conf, new ChukwaArchiveBuilder(),args);
log.info("Archive result: " + res);
if (res != 0) {
throw new Exception("Archive result != 0");
}
if (!finalArchiveOutput.endsWith("/")) {
finalArchiveOutput +="/";
}
String day = pArchivesMRInputDir.getName();
finalArchiveOutput += day;
Path pDay = new Path(finalArchiveOutput);
setup(pDay);
finalArchiveOutput += "/archive_" + System.currentTimeMillis();
Path pFinalArchiveOutput = new Path(finalArchiveOutput);
log.info("Final move: moving " + pArchivesMROutputDir + " to " + pFinalArchiveOutput);
if (fs.rename(pArchivesMROutputDir, pFinalArchiveOutput ) ) {
log.info("deleting " + pArchivesMRInputDir);
fs.delete(pArchivesMRInputDir, true);
} else {
log.warn("move to final archive folder failed!");
}
}
public void moveDataSinkFilesToArchiveMrInput(FileStatus fsDataSinkDir,
String archivesMRInputDir) throws IOException {
if (!archivesMRInputDir.endsWith("/")) {
archivesMRInputDir +="/";
}
Path pArchivesMRInputDir = new Path(archivesMRInputDir);
setup(pArchivesMRInputDir);
fs.rename(fsDataSinkDir.getPath(), pArchivesMRInputDir);
log.info("moving " + fsDataSinkDir.getPath() + " to " + pArchivesMRInputDir);
}
/**
* Create directory if !exists
* @param directory
* @throws IOException
*/
protected void setup(Path directory) throws IOException {
if ( ! fs.exists(directory)) {
fs.mkdirs(directory);
}
}
}
| 8,342 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.archive;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger;
/**
* Main class for mapreduce job to do archiving of Chunks.
*
* Map class and reduce classes are both identity; actual logic is in
* Partitioner and OutputFormat classes. Those are selected by first argument.
*
*
*
*/
public class ChukwaArchiveBuilder extends Configured implements Tool {
static class UniqueKeyReduce extends MapReduceBase implements
Reducer<ChukwaArchiveKey, ChunkImpl, ChukwaArchiveKey, ChunkImpl> {
/**
* Outputs exactly one value for each key; this suppresses duplicates
*/
@Override
public void reduce(ChukwaArchiveKey key, Iterator<ChunkImpl> vals,
OutputCollector<ChukwaArchiveKey, ChunkImpl> out, Reporter r)
throws IOException {
ChunkImpl i = vals.next();
out.collect(key, i);
int dups = 0;
while(vals.hasNext()) {
vals.next();
dups ++;
}
r.incrCounter("app", "duplicate chunks", dups);
}
}
static Logger log = Logger.getLogger(ChukwaArchiveBuilder.class);
static int printUsage() {
System.out
.println("ChukwaArchiveBuilder <Stream/DataType/Daily/Hourly> <input> <output>");
ToolRunner.printGenericCommandUsage(System.out);
return -1;
}
public int run(String[] args) throws Exception {
// Make sure there are exactly 3 parameters left.
if (args.length != 3) {
System.out.println("ERROR: Wrong number of parameters: " + args.length
+ " instead of 3.");
return printUsage();
}
JobConf jobConf = new JobConf(getConf(), ChukwaArchiveBuilder.class);
jobConf.setInputFormat(SequenceFileInputFormat.class);
jobConf.setMapperClass(IdentityMapper.class);
jobConf.setReducerClass(UniqueKeyReduce.class);
// jobConf.setReducerClass(IdentityReducer.class);
if (args[0].equalsIgnoreCase("Daily")) {
jobConf.setPartitionerClass(ChukwaArchiveDailyPartitioner.class);
jobConf.setOutputFormat(ChukwaArchiveDailyOutputFormat.class);
jobConf.setJobName("Chukwa-DailyArchiveBuilder");
} else if (args[0].equalsIgnoreCase("Hourly")) {
jobConf.setJobName("Chukwa-HourlyArchiveBuilder");
jobConf.setPartitionerClass(ChukwaArchiveHourlyPartitioner.class);
jobConf.setOutputFormat(ChukwaArchiveHourlyOutputFormat.class);
} else if (args[0].equalsIgnoreCase("DataType")) {
jobConf.setJobName("Chukwa-ArchiveBuilder-DataType");
int reduceCount = jobConf.getInt("chukwaArchiveBuilder.reduceCount", 1);
log.info("Reduce Count:" + reduceCount);
jobConf.setNumReduceTasks(reduceCount);
jobConf.setPartitionerClass(ChukwaArchiveDataTypePartitioner.class);
jobConf.setOutputFormat(ChukwaArchiveDataTypeOutputFormat.class);
} else if (args[0].equalsIgnoreCase("Stream")) {
jobConf.setJobName("Chukwa-HourlyArchiveBuilder-Stream");
int reduceCount = jobConf.getInt("chukwaArchiveBuilder.reduceCount", 1);
log.info("Reduce Count:" + reduceCount);
jobConf.setNumReduceTasks(reduceCount);
jobConf.setPartitionerClass(ChukwaArchiveStreamNamePartitioner.class);
jobConf.setOutputFormat(ChukwaArchiveStreamNameOutputFormat.class);
} else {
System.out.println("ERROR: Wrong Time partionning: " + args[0]
+ " instead of [Stream/DataType/Hourly/Daily].");
return printUsage();
}
jobConf.setOutputKeyClass(ChukwaArchiveKey.class);
jobConf.setOutputValueClass(ChunkImpl.class);
FileInputFormat.setInputPaths(jobConf, args[1]);
FileOutputFormat.setOutputPath(jobConf, new Path(args[2]));
JobClient.runJob(jobConf);
return 0;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new ChukwaConfiguration(), new ChukwaArchiveBuilder(),
args);
return;
}
}
| 8,343 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveDailyOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.archive;
import java.text.SimpleDateFormat;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat;
import org.apache.log4j.Logger;
public class ChukwaArchiveDailyOutputFormat extends
MultipleSequenceFileOutputFormat<ChukwaArchiveKey, ChunkImpl> {
static Logger log = Logger.getLogger(ChukwaArchiveDailyOutputFormat.class);
SimpleDateFormat sdf = new SimpleDateFormat("yyyy_MM_dd");
@Override
protected String generateFileNameForKeyValue(ChukwaArchiveKey key,
ChunkImpl chunk, String name) {
if (log.isDebugEnabled()) {
log.debug("ChukwaArchiveOutputFormat.fileName: "
+ sdf.format(key.getTimePartition()));
}
return sdf.format(key.getTimePartition()) + ".arc";
}
}
| 8,344 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveHourlyPartitioner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.archive;
import java.text.SimpleDateFormat;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Partitioner;
public class ChukwaArchiveHourlyPartitioner<K, V> implements
Partitioner<ChukwaArchiveKey, ChunkImpl> {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy_MM_dd_HH-00");
public void configure(JobConf arg0) {
}
public int getPartition(ChukwaArchiveKey key, ChunkImpl chunl,
int numReduceTasks) {
return (sdf.format(key.getTimePartition()).hashCode() & Integer.MAX_VALUE)
% numReduceTasks;
}
}
| 8,345 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/archive/SinkArchiver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.archive;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT;
import org.apache.log4j.Logger;
import java.io.IOException;
/**
* A lightweight tool for archiving, suitable for small-to-medium-size Chukwa
* deployments that don't use Demux.
* Grabs everything in the data sink, runs the Archiver MapReduce job,
* then promotes output to the archive dir.
*
* Input is determined by conf option chukwaArchiveDir; defaults to
* /chukwa/logs
*
* Uses /chukwa/archivesProcessing/mr[Input/Output] as tmp storage
*
* Outputs to /chukwa/archives
*
*/
public class SinkArchiver implements CHUKWA_CONSTANT {
final public static PathFilter DATA_SINK_FILTER = new PathFilter() {
public boolean accept(Path file) {
return file.getName().endsWith(".done");
}
};
static Logger log = Logger.getLogger(SinkArchiver.class);
public static void main(String[] args) {
try {
Configuration conf = new ChukwaConfiguration();
if(conf.get(ChukwaArchiveDataTypeOutputFormat.GROUP_BY_CLUSTER_OPTION_NAME) == null )
conf.set(ChukwaArchiveDataTypeOutputFormat.GROUP_BY_CLUSTER_OPTION_NAME, "true");
FileSystem fs = FileSystem.get(conf);
SinkArchiver archiver = new SinkArchiver();
archiver.exec(fs, conf);
} catch(IOException e) {
e.printStackTrace();
}
}
/*
* Pull most of the logic into instance methods so that we can
* more easily unit-test, by altering passed-in configuration.
*/
public void exec(FileSystem fs, Configuration conf) {
try {
String chukwaRootDir = conf.get(CHUKWA_ROOT_DIR_FIELD, DEFAULT_CHUKWA_ROOT_DIR_NAME);
if ( ! chukwaRootDir.endsWith("/") ) {
chukwaRootDir += "/";
}
String archiveSource = conf.get(CHUKWA_ARCHIVE_DIR_FIELD, chukwaRootDir +DEFAULT_CHUKWA_LOGS_DIR_NAME);
if ( ! archiveSource.endsWith("/") ) {
archiveSource += "/";
}
String archivesRootProcessingDir = chukwaRootDir + ARCHIVES_PROCESSING_DIR_NAME;
//String archivesErrorDir = archivesRootProcessingDir + ARCHIVES_IN_ERROR_DIR_NAME;
String archivesMRInputDir = archivesRootProcessingDir + ARCHIVES_MR_INPUT_DIR_NAME;
String archivesMROutputDir = archivesRootProcessingDir+ ARCHIVES_MR_OUTPUT_DIR_NAME;
Path pSource = new Path(archiveSource);
Path pMRInputDir = new Path(archivesMRInputDir);
if(!fs.exists(pMRInputDir))
fs.mkdirs(pMRInputDir);
Path pOutputDir = new Path(archivesMROutputDir);
if(!fs.exists(pOutputDir))
fs.mkdirs(pOutputDir);
if(fs.listStatus(pOutputDir).length == 0)
fs.delete(pOutputDir, true);
Path archive = new Path(chukwaRootDir + "archive");
selectInputs(fs, pSource, pMRInputDir);
int result = runMapRedJob(conf, archivesMRInputDir, archivesMROutputDir);
if(result == 0) { //success, so empty input dir
fs.delete(pMRInputDir, true);
}
if(!fs.exists(archive)) {
fs.mkdirs(archive);
}
FileStatus[] files = fs.listStatus(pOutputDir);
for(FileStatus f: files) {
if(!f.getPath().getName().endsWith("_logs"))
promoteAndMerge(fs, f.getPath(), archive);
}
fs.delete(pOutputDir, true);
} catch (Exception e) {
e.printStackTrace();
}
}
private void selectInputs(FileSystem fs, Path pSource,
Path pMRInputDir) throws IOException {
FileStatus[] dataSinkFiles = fs.listStatus(pSource, DATA_SINK_FILTER);
for(FileStatus fstatus: dataSinkFiles) {
boolean rename = fs.rename(fstatus.getPath(),pMRInputDir);
log.info("Moving " + fstatus.getPath() + " to " + pMRInputDir
+", status is: " + rename);
}
}
public int runMapRedJob(Configuration conf, String in, String out)
throws Exception {
String grouper = conf.get("archive.grouper","DataType");
String[] args = new String[] {grouper, in, out};
int res = ToolRunner.run(conf, new ChukwaArchiveBuilder(),
args);
return res;
}
/**
* Merges the contents of src into dest.
* If src is a file, moves it to dest.
*
* @param fs the filesystem in question
* @param src a file or directory to merge into dest
* @param dest a directory to merge into
* @throws IOException if error in promote and merge
*/
public void promoteAndMerge(FileSystem fs, Path src, Path dest)
throws IOException {
FileStatus stat = fs.getFileStatus(src);
String baseName = src.getName();
Path target = new Path(dest, baseName);
if(!fs.exists(target)) {
fs.rename(src, dest);
System.out.println("moving " + src + " to " + dest);
} else if(stat.isDir()) {//recurse
FileStatus[] files = fs.listStatus(src);
for(FileStatus f: files) {
promoteAndMerge(fs, f.getPath(), target);
}
} else { //append a number to unique-ify filename
int i=0;
do {
//FIXME: can make this more generic
String newName;
if(baseName.endsWith(".arc")) {
newName = baseName.substring(0, baseName.length() - 4) + "-"+i+".arc";
}
else
newName = baseName+"-"+i;
target = new Path(dest, newName);
} while(fs.exists(target));
fs.rename(src, target);
System.out.println("promoting " + src + " to " + target);
}
}
}
| 8,346 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveDataTypeOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.archive;
import java.text.SimpleDateFormat;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.extraction.engine.RecordUtil;
import org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.util.Progressable;
import org.apache.log4j.Logger;
public class ChukwaArchiveDataTypeOutputFormat extends
MultipleSequenceFileOutputFormat<ChukwaArchiveKey, ChunkImpl> {
static final String GROUP_BY_CLUSTER_OPTION_NAME = "archive.groupByClusterName";
static Logger log = Logger.getLogger(ChukwaArchiveDataTypeOutputFormat.class);
SimpleDateFormat sdf = new SimpleDateFormat("yyyy_MM_dd");
boolean useClusterID;
public RecordWriter<ChukwaArchiveKey,ChunkImpl> getRecordWriter(FileSystem fs,
JobConf job, String name, Progressable arg3)
throws java.io.IOException{
log.info(GROUP_BY_CLUSTER_OPTION_NAME + " is " + job.get(GROUP_BY_CLUSTER_OPTION_NAME));
useClusterID = "true".equals(job.get(GROUP_BY_CLUSTER_OPTION_NAME));
return super.getRecordWriter(fs, job, name, arg3);
}
@Override
protected String generateFileNameForKeyValue(ChukwaArchiveKey key,
ChunkImpl chunk, String name) {
if (log.isDebugEnabled()) {
log.debug("ChukwaArchiveOutputFormat.fileName: "
+ sdf.format(key.getTimePartition()));
}
if(useClusterID) {
String clusterID = RecordUtil.getClusterName(chunk);
return clusterID + "/" + chunk.getDataType() + "_" + sdf.format(key.getTimePartition())
+ ".arc";
} else
return chunk.getDataType() + "_" + sdf.format(key.getTimePartition())
+ ".arc";
}
}
| 8,347 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/archive/ChukwaArchiveHourlyOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.archive;
import java.text.SimpleDateFormat;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat;
import org.apache.log4j.Logger;
public class ChukwaArchiveHourlyOutputFormat extends
MultipleSequenceFileOutputFormat<ChukwaArchiveKey, ChunkImpl> {
static Logger log = Logger.getLogger(ChukwaArchiveHourlyOutputFormat.class);
SimpleDateFormat sdf = new SimpleDateFormat("yyyy_MM_dd_HH-00");
@Override
protected String generateFileNameForKeyValue(ChukwaArchiveKey key,
ChunkImpl chunk, String name) {
if (log.isDebugEnabled()) {
log.debug("ChukwaArchiveOutputFormat.fileName: "
+ sdf.format(key.getTimePartition()));
}
return sdf.format(key.getTimePartition()) + ".arc";
}
}
| 8,348 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/TaggerPlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux;
import org.apache.hadoop.chukwa.extraction.engine.Record;
public interface TaggerPlugin {
public void tag(String line, Record record);
}
| 8,349 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/Demux.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux;
import java.io.File;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.extraction.demux.processor.ChukwaOutputCollector;
import org.apache.hadoop.chukwa.extraction.demux.processor.mapper.MapProcessor;
import org.apache.hadoop.chukwa.extraction.demux.processor.mapper.MapProcessorFactory;
import org.apache.hadoop.chukwa.extraction.demux.processor.reducer.ReduceProcessorFactory;
import org.apache.hadoop.chukwa.extraction.demux.processor.reducer.ReduceProcessor;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobPriority;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger;
public class Demux extends Configured implements Tool {
static Logger log = Logger.getLogger(Demux.class);
public static Configuration jobConf = null;
protected static void setJobConf(JobConf jobConf) {
Demux.jobConf = jobConf;
}
protected Configuration getJobConf() {
return Demux.jobConf;
}
public static class MapClass extends MapReduceBase implements
Mapper<ChukwaArchiveKey, ChunkImpl, ChukwaRecordKey, ChukwaRecord> {
private Configuration jobConf = null;
@Override
public void configure(JobConf jobConf) {
super.configure(jobConf);
setJobConf(jobConf);
}
private void setJobConf(JobConf jobConf) {
this.jobConf = jobConf;
}
public void map(ChukwaArchiveKey key, ChunkImpl chunk,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws IOException {
ChukwaOutputCollector chukwaOutputCollector = new ChukwaOutputCollector(
"DemuxMapOutput", output, reporter);
try {
long duration = System.currentTimeMillis();
if (log.isDebugEnabled()) {
log.debug("Entry: [" + String.valueOf(chunk.getData()) + "] EventType: ["
+ chunk.getDataType() + "]");
}
String defaultProcessor = jobConf.get(
"chukwa.demux.mapper.default.processor",
"org.apache.hadoop.chukwa.extraction.demux.processor.mapper.DefaultProcessor");
String processorClass_pri = jobConf.get(chunk.getDataType(),
defaultProcessor);
String processorClass = processorClass_pri.split(",")[0];
if (!processorClass.equalsIgnoreCase("Drop")) {
reporter.incrCounter("DemuxMapInput", "total chunks", 1);
reporter.incrCounter("DemuxMapInput",
chunk.getDataType() + " chunks", 1);
MapProcessor processor = MapProcessorFactory
.getProcessor(processorClass);
processor.process(key, chunk, chukwaOutputCollector, reporter);
if (log.isDebugEnabled()) {
duration = System.currentTimeMillis() - duration;
log.debug("Demux:Map dataType:" + chunk.getDataType()
+ " duration:" + duration + " processor:" + processorClass
+ " recordCount:" + chunk.getRecordOffsets().length);
}
} else {
log.info("action:Demux, dataType:" + chunk.getDataType()
+ " duration:0 processor:Drop recordCount:"
+ chunk.getRecordOffsets().length);
}
} catch (Exception e) {
log.warn("Exception in Demux:MAP", e);
e.printStackTrace();
}
}
}
public static class ReduceClass extends MapReduceBase implements
Reducer<ChukwaRecordKey, ChukwaRecord, ChukwaRecordKey, ChukwaRecord> {
private Configuration jobConf = null;
public void configure(JobConf jobConf) {
super.configure(jobConf);
this.jobConf = jobConf;
}
public void reduce(ChukwaRecordKey key, Iterator<ChukwaRecord> values,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws IOException {
ChukwaOutputCollector chukwaOutputCollector = new ChukwaOutputCollector(
"DemuxReduceOutput", output, reporter);
try {
long duration = System.currentTimeMillis();
reporter.incrCounter("DemuxReduceInput", "total distinct keys", 1);
reporter.incrCounter("DemuxReduceInput", key.getReduceType()
+ " total distinct keys", 1);
String defaultProcessor_classname = "org.apache.hadoop.chukwa.extraction.demux.processor.reducer" +
".IdentityReducer";
String defaultProcessor = jobConf.get("chukwa.demux.reducer.default.processor",
"," + defaultProcessor_classname);
String processClass_pri = jobConf.get(key.getReduceType(), defaultProcessor);
String[] processClass_tmps = processClass_pri.split(",");
String processClass = null;
if (processClass_tmps.length != 2)
processClass = defaultProcessor_classname;
else
processClass = processClass_tmps[1];
ReduceProcessor processor = ReduceProcessorFactory.getProcessor(processClass);
System.out.println(processor.getClass().getName());
processor.process(key, values, chukwaOutputCollector, reporter);
if (log.isDebugEnabled()) {
duration = System.currentTimeMillis() - duration;
log.debug("Demux:Reduce, dataType:" + key.getReduceType()
+ " duration:" + duration);
}
} catch (Exception e) {
log.warn("Exception in Demux:Reduce", e);
e.printStackTrace();
}
}
}
static int printUsage() {
System.out.println("Demux [-m <maps>] [-r <reduces>] <input> <output>");
ToolRunner.printGenericCommandUsage(System.out);
return -1;
}
public static void addParsers(Configuration conf) {
String parserPath = conf.get("chukwa.data.dir") + File.separator + "demux";
try {
FileSystem fs = FileSystem.get(new Configuration());
FileStatus[] fstatus = fs.listStatus(new Path(parserPath));
if (fstatus != null) {
String hdfsUrlPrefix = conf.get("fs.defaultFS");
for (FileStatus parser : fstatus) {
Path jarPath = new Path(parser.getPath().toString().replace(hdfsUrlPrefix, ""));
log.debug("Adding parser JAR path " + jarPath);
DistributedCache.addFileToClassPath(jarPath, conf);
}
}
} catch (IOException e) {
log.error(ExceptionUtil.getStackTrace(e));
}
}
public int run(String[] args) throws Exception {
JobConf conf = new JobConf(new ChukwaConfiguration(), Demux.class);
SimpleDateFormat day = new java.text.SimpleDateFormat("yyyyMMdd_HH_mm");
conf.setJobName("Chukwa-Demux_" + day.format(new Date()));
conf.setInputFormat(SequenceFileInputFormat.class);
conf.setMapperClass(Demux.MapClass.class);
conf.setPartitionerClass(ChukwaRecordPartitioner.class);
conf.setReducerClass(Demux.ReduceClass.class);
conf.setOutputKeyClass(ChukwaRecordKey.class);
conf.setOutputValueClass(ChukwaRecord.class);
conf.setOutputFormat(ChukwaRecordOutputFormat.class);
conf.setJobPriority(JobPriority.VERY_HIGH);
addParsers(conf);
List<String> other_args = new ArrayList<String>();
for (int i = 0; i < args.length; ++i) {
try {
if ("-m".equals(args[i])) {
conf.setNumMapTasks(Integer.parseInt(args[++i]));
} else if ("-r".equals(args[i])) {
conf.setNumReduceTasks(Integer.parseInt(args[++i]));
} else {
other_args.add(args[i]);
}
} catch (NumberFormatException except) {
System.out.println("ERROR: Integer expected instead of " + args[i]);
return printUsage();
} catch (ArrayIndexOutOfBoundsException except) {
System.out.println("ERROR: Required parameter missing from "
+ args[i - 1]);
return printUsage();
}
}
// Make sure there are exactly 2 parameters left.
if (other_args.size() != 2) {
System.out.println("ERROR: Wrong number of parameters: "
+ other_args.size() + " instead of 2.");
return printUsage();
}
FileInputFormat.setInputPaths(conf, other_args.get(0));
FileOutputFormat.setOutputPath(conf, new Path(other_args.get(1)));
JobClient.runJob(conf);
return 0;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new Demux(), args);
return;
}
}
| 8,350 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/DemuxManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.text.SimpleDateFormat;
import java.util.Date;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT;
import org.apache.hadoop.chukwa.util.NagiosHelper;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger;
public class DemuxManager implements CHUKWA_CONSTANT {
static Logger log = Logger.getLogger(DemuxManager.class);
int globalErrorcounter = 0;
Date firstErrorTime = null;
protected int ERROR_SLEEP_TIME = 60;
protected int NO_DATASINK_SLEEP_TIME = 20;
protected int DEFAULT_MAX_ERROR_COUNT = 6;
protected int DEFAULT_MAX_FILES_PER_DEMUX = 500;
protected int DEFAULT_REDUCER_COUNT = 8;
protected int maxPermittedErrorCount = DEFAULT_MAX_ERROR_COUNT;
protected int demuxReducerCount = 0;
protected ChukwaConfiguration conf = null;
protected FileSystem fs = null;
protected int reprocess = 0;
protected boolean sendAlert = true;
protected SimpleDateFormat dayTextFormat = new java.text.SimpleDateFormat("yyyyMMdd");
protected volatile boolean isRunning = true;
final private static PathFilter DATA_SINK_FILTER = new PathFilter() {
public boolean accept(Path file) {
return file.getName().endsWith(".done");
}
};
public static void main(String[] args) throws Exception {
DemuxManager manager = new DemuxManager();
manager.start();
}
public DemuxManager() throws Exception {
this.conf = new ChukwaConfiguration();
init();
}
public DemuxManager(ChukwaConfiguration conf) throws Exception {
this.conf = conf;
init();
}
protected void init() throws IOException, URISyntaxException {
String fsName = conf.get(HDFS_DEFAULT_NAME_FIELD);
fs = FileSystem.get(new URI(fsName), conf);
}
public void shutdown() {
this.isRunning = false;
}
public int getReprocess() {
return reprocess;
}
/**
* Start the Demux Manager daemon
* @throws Exception if error in processing data
*/
public void start() throws Exception {
String chukwaRootDir = conf.get(CHUKWA_ROOT_DIR_FIELD, DEFAULT_CHUKWA_ROOT_DIR_NAME);
if ( ! chukwaRootDir.endsWith("/") ) {
chukwaRootDir += "/";
}
log.info("chukwaRootDir:" + chukwaRootDir);
String demuxRootDir = chukwaRootDir + DEFAULT_DEMUX_PROCESSING_DIR_NAME;
String demuxErrorDir = demuxRootDir + DEFAULT_DEMUX_IN_ERROR_DIR_NAME;
String demuxInputDir = demuxRootDir + DEFAULT_DEMUX_MR_INPUT_DIR_NAME;
String demuxOutputDir = demuxRootDir + DEFAULT_DEMUX_MR_OUTPUT_DIR_NAME;
String dataSinkDir = conf.get(CHUKWA_DATA_SINK_DIR_FIELD, chukwaRootDir +DEFAULT_CHUKWA_LOGS_DIR_NAME);
if ( ! dataSinkDir.endsWith("/") ) {
dataSinkDir += "/";
}
log.info("dataSinkDir:" + dataSinkDir);
String postProcessDir = conf.get(CHUKWA_POST_PROCESS_DIR_FIELD, chukwaRootDir +DEFAULT_CHUKWA_POSTPROCESS_DIR_NAME);
if ( ! postProcessDir.endsWith("/") ) {
postProcessDir += "/";
}
log.info("postProcessDir:" + postProcessDir);
String archiveRootDir = conf.get(CHUKWA_ARCHIVE_DIR_FIELD, chukwaRootDir +DEFAULT_CHUKWA_DATASINK_DIR_NAME);
if ( ! archiveRootDir.endsWith("/") ) {
archiveRootDir += "/";
}
log.info("archiveRootDir:" + archiveRootDir);
maxPermittedErrorCount = conf.getInt(CHUKWA_DEMUX_MAX_ERROR_COUNT_FIELD,
DEFAULT_MAX_ERROR_COUNT);
demuxReducerCount = conf.getInt(CHUKWA_DEMUX_REDUCER_COUNT_FIELD, DEFAULT_REDUCER_COUNT);
log.info("demuxReducerCount:" + demuxReducerCount);
String nagiosHost = conf.get(CHUKWA_NAGIOS_HOST_FIELD);
int nagiosPort = conf.getInt(CHUKWA_NAGIOS_PORT_FIELD,0);
String reportingHost = conf.get(CHUKWA_REPORTING_HOST_FIELD);
log.info("Nagios information: nagiosHost:" + nagiosHost + ", nagiosPort:"
+ nagiosPort + ", reportingHost:" + reportingHost);
if (nagiosHost == null || nagiosHost.length() == 0 || nagiosPort == 0 || reportingHost == null || reportingHost.length() == 0) {
sendAlert = false;
log.warn("Alerting is OFF");
}
boolean demuxReady = false;
while (isRunning) {
try {
demuxReady = false;
if (maxPermittedErrorCount != -1 && globalErrorcounter >= maxPermittedErrorCount) {
log.warn("==================\nToo many errors (" + globalErrorcounter +
"), Bail out!\n==================");
break;
}
// Check for anomalies
if (checkDemuxOutputDir(demuxOutputDir) == true) {
// delete current demux output dir
if ( deleteDemuxOutputDir(demuxOutputDir) == false ) {
log.warn("Cannot delete an existing demux output directory!");
throw new IOException("Cannot move demuxOutput to postProcess!");
}
continue;
} else if (checkDemuxInputDir(demuxInputDir) == true) { // dataSink already there
reprocess++;
// Data has been processed more than 3 times ... move to InError directory
if (reprocess > 3) {
if (moveDataSinkFilesToDemuxErrorDirectory(demuxInputDir,demuxErrorDir) == false) {
log.warn("Cannot move dataSink files to DemuxErrorDir!");
throw new IOException("Cannot move dataSink files to DemuxErrorDir!");
}
reprocess = 0;
continue;
}
log.error("Demux inputDir aready contains some dataSink files,"
+ " going to reprocess, reprocessCount=" + reprocess);
demuxReady = true;
} else { // standard code path
reprocess = 0;
// Move new dataSink Files
if (moveDataSinkFilesToDemuxInputDirectory(dataSinkDir, demuxInputDir) == true) {
demuxReady = true; // if any are available
} else {
demuxReady = false; // if none
}
}
// start a new demux ?
if (demuxReady == true) {
boolean demuxStatus = processData(dataSinkDir, demuxInputDir, demuxOutputDir,
postProcessDir, archiveRootDir);
sendDemuxStatusToNagios(nagiosHost,nagiosPort,reportingHost,demuxErrorDir,demuxStatus,null);
// if demux suceeds, then we reset these.
if (demuxStatus) {
globalErrorcounter = 0;
firstErrorTime = null;
}
} else {
log.info("Demux not ready so going to sleep ...");
Thread.sleep(NO_DATASINK_SLEEP_TIME * 1000);
}
}catch(Throwable e) {
globalErrorcounter ++;
if (firstErrorTime == null) firstErrorTime = new Date();
log.warn("Consecutive error number " + globalErrorcounter +
" encountered since " + firstErrorTime, e);
sendDemuxStatusToNagios(nagiosHost,nagiosPort,reportingHost,demuxErrorDir,false, e.getMessage());
try { Thread.sleep(ERROR_SLEEP_TIME * 1000); }
catch (InterruptedException e1) {/*do nothing*/ }
init();
}
}
}
/**
* Send NSCA status to Nagios
* @param nagiosHost
* @param nagiosPort
* @param reportingHost
* @param demuxInErrorDir
* @param demuxStatus
* @param exception
*/
protected void sendDemuxStatusToNagios(String nagiosHost,int nagiosPort,String reportingHost,
String demuxInErrorDir,boolean demuxStatus,String demuxException) {
if (sendAlert == false) {
return;
}
boolean demuxInErrorStatus = true;
String demuxInErrorMsg = "";
try {
Path pDemuxInErrorDir = new Path(demuxInErrorDir);
if ( fs.exists(pDemuxInErrorDir)) {
FileStatus[] demuxInErrorDirs = fs.listStatus(pDemuxInErrorDir);
if (demuxInErrorDirs.length == 0) {
demuxInErrorStatus = false;
}
}
} catch (Throwable e) {
demuxInErrorMsg = e.getMessage();
log.warn(e);
}
// send Demux status
if (demuxStatus == true) {
NagiosHelper.sendNsca("Demux OK",NagiosHelper.NAGIOS_OK);
} else {
NagiosHelper.sendNsca("Demux failed. " + demuxException,NagiosHelper.NAGIOS_CRITICAL);
}
// send DemuxInErrorStatus
if (demuxInErrorStatus == false) {
NagiosHelper.sendNsca("DemuxInError OK",NagiosHelper.NAGIOS_OK);
} else {
NagiosHelper.sendNsca("DemuxInError not empty -" + demuxInErrorMsg,NagiosHelper.NAGIOS_CRITICAL);
}
}
/**
* Process Data, i.e.
* - run demux
* - move demux output to postProcessDir
* - move dataSink file to archiveDir
*
* @param dataSinkDir
* @param demuxInputDir
* @param demuxOutputDir
* @param postProcessDir
* @param archiveDir
* @return True iff succeed
* @throws IOException
*/
protected boolean processData(String dataSinkDir, String demuxInputDir,
String demuxOutputDir, String postProcessDir, String archiveDir) throws IOException {
boolean demuxStatus = false;
long startTime = System.currentTimeMillis();
demuxStatus = runDemux(demuxInputDir, demuxOutputDir);
log.info("Demux Duration: " + (System.currentTimeMillis() - startTime));
if (demuxStatus == false) {
log.warn("Demux failed!");
} else {
// Move demux output to postProcessDir
if (checkDemuxOutputDir(demuxOutputDir)) {
if (moveDemuxOutputDirToPostProcessDirectory(demuxOutputDir, postProcessDir) == false) {
log.warn("Cannot move demuxOutput to postProcess! bail out!");
throw new IOException("Cannot move demuxOutput to postProcess! bail out!");
}
} else {
log.warn("Demux processing OK but no output");
}
// Move DataSink Files to archiveDir
if (moveDataSinkFilesToArchiveDirectory(demuxInputDir, archiveDir) == false) {
log.warn("Cannot move datasinkFile to archive! bail out!");
throw new IOException("Cannot move datasinkFile to archive! bail out!");
}
}
return demuxStatus;
}
/**
* Submit and Run demux Job
* @param demuxInputDir
* @param demuxOutputDir
* @return true id Demux succeed
*/
protected boolean runDemux(String demuxInputDir, String demuxOutputDir) {
// to reload the configuration, and demux's reduce number
Configuration tempConf = new Configuration(conf);
tempConf.reloadConfiguration();
demuxReducerCount = tempConf.getInt(CHUKWA_DEMUX_REDUCER_COUNT_FIELD, DEFAULT_REDUCER_COUNT);
String[] demuxParams;
int i=0;
Demux.addParsers(tempConf);
demuxParams = new String[4];
demuxParams[i++] = "-r";
demuxParams[i++] = "" + demuxReducerCount;
demuxParams[i++] = demuxInputDir;
demuxParams[i++] = demuxOutputDir;
try {
return ( 0 == ToolRunner.run(tempConf,new Demux(), demuxParams) );
} catch (Throwable e) {
e.printStackTrace();
globalErrorcounter ++;
if (firstErrorTime == null) firstErrorTime = new Date();
log.error("Failed to run demux. Consecutive error number " +
globalErrorcounter + " encountered since " + firstErrorTime, e);
}
return false;
}
/**
* Move dataSink files to Demux input directory
* @param dataSinkDir
* @param demuxInputDir
* @return true if there's any dataSink files ready to be processed
* @throws IOException
*/
protected boolean moveDataSinkFilesToDemuxInputDirectory(
String dataSinkDir, String demuxInputDir) throws IOException {
Path pDataSinkDir = new Path(dataSinkDir);
Path pDemuxInputDir = new Path(demuxInputDir);
log.info("dataSinkDir: " + dataSinkDir);
log.info("demuxInputDir: " + demuxInputDir);
boolean containsFile = false;
FileStatus[] dataSinkFiles = fs.listStatus(pDataSinkDir,DATA_SINK_FILTER);
if (dataSinkFiles.length > 0) {
setup(pDemuxInputDir);
}
int maxFilesPerDemux = 0;
for (FileStatus fstatus : dataSinkFiles) {
boolean rename = fs.rename(fstatus.getPath(),pDemuxInputDir);
log.info("Moving " + fstatus.getPath() + " to " + pDemuxInputDir +", status is:" + rename);
maxFilesPerDemux ++;
containsFile = true;
if (maxFilesPerDemux >= DEFAULT_MAX_FILES_PER_DEMUX) {
log.info("Max File per Demux reached:" + maxFilesPerDemux);
break;
}
}
return containsFile;
}
/**
* Move sourceFolder inside destFolder
* @param dataSinkDir : ex chukwa/demux/inputDir
* @param demuxErrorDir : ex /chukwa/demux/inError
* @return true if able to move chukwa/demux/inputDir to /chukwa/demux/inError/<YYYYMMDD>/demuxInputDirXXX
* @throws IOException
*/
protected boolean moveDataSinkFilesToDemuxErrorDirectory(
String dataSinkDir, String demuxErrorDir) throws IOException {
demuxErrorDir += "/" + dayTextFormat.format(System.currentTimeMillis());
return moveFolder(dataSinkDir,demuxErrorDir,"demuxInputDir");
}
/**
* Move sourceFolder inside destFolder
* @param demuxInputDir: ex chukwa/demux/inputDir
* @param archiveDirectory: ex /chukwa/archives
* @return true if able to move chukwa/demux/inputDir to /chukwa/archives/raw/<YYYYMMDD>/dataSinkDirXXX
* @throws IOException
*/
protected boolean moveDataSinkFilesToArchiveDirectory(
String demuxInputDir, String archiveDirectory) throws IOException {
archiveDirectory += "/" + dayTextFormat.format(System.currentTimeMillis());
return moveFolder(demuxInputDir,archiveDirectory,"dataSinkDir");
}
/**
* Move sourceFolder inside destFolder
* @param demuxOutputDir: ex chukwa/demux/outputDir
* @param postProcessDirectory: ex /chukwa/postProcess
* @return true if able to move chukwa/demux/outputDir to /chukwa/postProcess/demuxOutputDirXXX
* @throws IOException
*/
protected boolean moveDemuxOutputDirToPostProcessDirectory(
String demuxOutputDir, String postProcessDirectory) throws IOException {
return moveFolder(demuxOutputDir,postProcessDirectory,"demuxOutputDir");
}
/**
* Test if demuxInputDir exists
* @param demuxInputDir
* @return true if demuxInputDir exists
* @throws IOException
*/
protected boolean checkDemuxInputDir(String demuxInputDir)
throws IOException {
return dirExists(demuxInputDir);
}
/**
* Test if demuxOutputDir exists
* @param demuxOutputDir
* @return true if demuxOutputDir exists
* @throws IOException
*/
protected boolean checkDemuxOutputDir(String demuxOutputDir)
throws IOException {
return dirExists(demuxOutputDir);
}
/**
* Delete DemuxOutput directory
* @param demuxOutputDir
* @return true if succeed
* @throws IOException
*/
protected boolean deleteDemuxOutputDir(String demuxOutputDir) throws IOException
{
return fs.delete(new Path(demuxOutputDir), true);
}
/**
* Create directory if !exists
* @param directory
* @throws IOException
*/
protected void setup(Path directory) throws IOException {
if ( ! fs.exists(directory)) {
fs.mkdirs(directory);
}
}
/**
* Check if source exists and if source is a directory
* @param f source file
*/
protected boolean dirExists(String directory) throws IOException {
Path pDirectory = new Path(directory);
return (fs.exists(pDirectory) && fs.getFileStatus(pDirectory).isDir());
}
/**
* Move sourceFolder inside destFolder
* @param srcDir
* @param destDir
* @return
* @throws IOException
*/
protected boolean moveFolder(String srcDir,String destDir, String prefix) throws IOException {
if (!destDir.endsWith("/")) {
destDir +="/";
}
Path pSrcDir = new Path(srcDir);
Path pDestDir = new Path(destDir );
setup(pDestDir);
destDir += prefix +"_" +System.currentTimeMillis();
Path pFinalDestDir = new Path(destDir );
return fs.rename(pSrcDir, pFinalDestDir);
}
}
| 8,351 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/DailyChukwaRecordRolling.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux;
import java.io.IOException;
import java.net.URI;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.List;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.chukwa.util.HierarchyDataType;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobPriority;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.util.Tool;
import org.apache.log4j.Logger;
// TODO do an abstract class for all rolling
public class DailyChukwaRecordRolling extends Configured implements Tool {
static Logger log = Logger.getLogger(DailyChukwaRecordRolling.class);
static SimpleDateFormat sdf = new java.text.SimpleDateFormat("yyyyMMdd");
static ChukwaConfiguration conf = null;
static FileSystem fs = null;
static final String HadoopLogDir = "_logs";
static final String hadoopTempDir = "_temporary";
static boolean rollInSequence = true;
static boolean deleteRawdata = false;
public static void usage() {
System.err
.println("usage: java org.apache.hadoop.chukwa.extraction.demux.DailyChukwaRecordRolling rollInSequence <True/False> deleteRawdata <True/False>");
}
public static boolean hourlyRolling(String dailyStreamDirectory) {
Path pHour = null;
try {
log.info("Checking for HourlyRolling in " + dailyStreamDirectory);
for (int i=0;i<24;i++) {
pHour = new Path(dailyStreamDirectory + "/" + i);
if (! fs.exists(pHour)) {
log.info("HourlyData is missing for:" + pHour);
continue;
} else {
FileStatus[] files = fs.listStatus(pHour);
boolean containsHourly = false;
for(FileStatus file: files) {
log.info("Debug checking" + file.getPath());
if (file.getPath().getName().indexOf("_HourlyDone_") > 0) {
containsHourly = true;
break;
}
}
if (containsHourly == false) {
log.info("HourlyDone is missing for : " + pHour);
return false;
}
}
}
return true;
}catch(Exception e) {
e.printStackTrace();
return false;
}
}
public static void buildDailyFiles(String chukwaMainRepository,
String tempDir, String rollingFolder, int workingDay) throws IOException {
// process
boolean alldone = true;
Path dayPath = new Path(rollingFolder + "/daily/" + workingDay);
FileStatus[] clustersFS = fs.listStatus(dayPath);
for (FileStatus clusterFs : clustersFS) {
String cluster = clusterFs.getPath().getName();
Path dataSourceClusterHourPaths = new Path(rollingFolder + "/daily/"
+ workingDay + "/" + cluster);
FileStatus[] dataSourcesFS = fs.listStatus(dataSourceClusterHourPaths);
for (FileStatus dataSourceFS : dataSourcesFS) {
//CHUKWA-648: Make Chukwa Reduce Type to support hierarchy format
for (FileStatus dataSourcePath : HierarchyDataType.globStatus(fs,
dataSourceFS.getPath(), true)) {
String dataSource = HierarchyDataType.getDataType(
dataSourcePath.getPath(),
fs.getFileStatus(dataSourceClusterHourPaths).getPath());
// Repo path = reposRootDirectory/<cluster>/<day>/*/*.evt
// put the rotate flag
fs.mkdirs(new Path(chukwaMainRepository + "/" + cluster + "/"
+ dataSource + "/" + workingDay + "/rotateDone"));
if (hourlyRolling(chukwaMainRepository + "/" + cluster + "/" + dataSource + "/" + workingDay) == false) {
log.warn("Skipping this directory, hourly not done. " + chukwaMainRepository + "/" + cluster + "/"
+ dataSource + "/" + workingDay );
alldone = false;
continue;
}
log.info("Running Daily rolling for " + chukwaMainRepository + "/" + cluster + "/"
+ dataSource + "/" + workingDay + "/rotateDone");
// rotate
// Merge
String[] mergeArgs = new String[5];
// input
mergeArgs[0] = chukwaMainRepository + "/" + cluster + "/" + dataSource
+ "/" + workingDay + "/[0-9]*/*.evt";
// temp dir
mergeArgs[1] = tempDir + "/" + cluster + "/" + dataSource + "/"
+ workingDay + "_" + System.currentTimeMillis();
// final output dir
mergeArgs[2] = chukwaMainRepository + "/" + cluster + "/" + dataSource
+ "/" + workingDay;
// final output fileName
mergeArgs[3] = dataSource + "_DailyDone_" + workingDay;
// delete rolling directory
mergeArgs[4] = rollingFolder + "/daily/" + workingDay + "/" + cluster
+ "/" + dataSource;
log.info("DailyChukwaRecordRolling 0: " + mergeArgs[0]);
log.info("DailyChukwaRecordRolling 1: " + mergeArgs[1]);
log.info("DailyChukwaRecordRolling 2: " + mergeArgs[2]);
log.info("DailyChukwaRecordRolling 3: " + mergeArgs[3]);
log.info("DailyChukwaRecordRolling 4: " + mergeArgs[4]);
RecordMerger merge = new RecordMerger(conf, fs,
new DailyChukwaRecordRolling(), mergeArgs, deleteRawdata);
List<RecordMerger> allMerge = new ArrayList<RecordMerger>();
if (rollInSequence) {
merge.mergeRecords();
} else {
allMerge.add(merge);
merge.start();
}
// join all Threads
if (!rollInSequence) {
while (allMerge.size() > 0) {
RecordMerger m = allMerge.remove(0);
try {
m.join();
} catch (InterruptedException e) {
}
}
} // End if (!rollInSequence)
// Delete the processed dataSourceFS
FileUtil.fullyDelete(fs, dataSourceFS.getPath());
} // End for(FileStatus dataSourceFS : dataSourcesFS)
// Delete the processed clusterFs
if (alldone == true) {
FileUtil.fullyDelete(fs, clusterFs.getPath());
}
} // End for(FileStatus clusterFs : clustersFS)
}
// Delete the processed dayPath
if (alldone == true) {
FileUtil.fullyDelete(fs, dayPath);
}
}
/**
* @param args is command line parameters
* @throws Exception if unable to process data
*/
public static void main(String[] args) throws Exception {
conf = new ChukwaConfiguration();
String fsName = conf.get("writer.hdfs.filesystem");
fs = FileSystem.get(new URI(fsName), conf);
// TODO read from config
String rollingFolder = "/chukwa/rolling/";
String chukwaMainRepository = "/chukwa/repos/";
String tempDir = "/chukwa/temp/dailyRolling/";
// TODO do a real parameter parsing
if (args.length != 4) {
usage();
return;
}
if (!args[0].equalsIgnoreCase("rollInSequence")) {
usage();
return;
}
if (!args[2].equalsIgnoreCase("deleteRawdata")) {
usage();
return;
}
if (args[1].equalsIgnoreCase("true")) {
rollInSequence = true;
} else {
rollInSequence = false;
}
if (args[3].equalsIgnoreCase("true")) {
deleteRawdata = true;
} else {
deleteRawdata = false;
}
log.info("rollInSequence: " + rollInSequence);
log.info("deleteRawdata: " + deleteRawdata);
Calendar calendar = Calendar.getInstance();
int currentDay = Integer.parseInt(sdf.format(calendar.getTime()));
int currentHour = calendar.get(Calendar.HOUR_OF_DAY);
log.info("CurrentDay: " + currentDay);
log.info("currentHour" + currentHour);
Path rootFolder = new Path(rollingFolder + "/daily/");
FileStatus[] daysFS = fs.listStatus(rootFolder);
for (FileStatus dayFS : daysFS) {
try {
int workingDay = Integer.parseInt(dayFS.getPath().getName());
log.info("Daily working on :" + workingDay);
if (workingDay < currentDay) {
try {
buildDailyFiles(chukwaMainRepository, tempDir, rollingFolder,
workingDay);
} catch(Throwable e) {
e.printStackTrace();
log.warn("Daily rolling failed on :" + rollingFolder +"/" + workingDay ) ;
}
} // End if ( workingDay < currentDay)
} // End Try workingDay =
// Integer.parseInt(sdf.format(dayFS.getPath().getName()));
catch (NumberFormatException e) { /* Not a standard Day directory skip */
log.debug(ExceptionUtil.getStackTrace(e));
}
} // for(FileStatus dayFS : daysFS)
}
public int run(String[] args) throws Exception {
JobConf conf = new JobConf(new ChukwaConfiguration(), DailyChukwaRecordRolling.class);
conf.setJobName("DailyChukwa-Rolling");
conf.setInputFormat(SequenceFileInputFormat.class);
conf.setMapperClass(IdentityMapper.class);
conf.setReducerClass(IdentityReducer.class);
conf.setOutputKeyClass(ChukwaRecordKey.class);
conf.setOutputValueClass(ChukwaRecord.class);
conf.setOutputFormat(SequenceFileOutputFormat.class);
log.info("DailyChukwaRecordRolling input: " + args[0]);
log.info("DailyChukwaRecordRolling output: " + args[1]);
FileInputFormat.setInputPaths(conf, args[0]);
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
conf.setJobPriority(JobPriority.LOW);
conf.setNumReduceTasks(1);
JobClient.runJob(conf);
return 0;
}
}
| 8,352 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/ChukwaRecordOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux;
import org.apache.hadoop.chukwa.extraction.demux.processor.Util;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.engine.RecordUtil;
import org.apache.hadoop.chukwa.util.HierarchyDataType;
import org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat;
import org.apache.log4j.Logger;
public class ChukwaRecordOutputFormat extends
MultipleSequenceFileOutputFormat<ChukwaRecordKey, ChukwaRecord> {
static Logger log = Logger.getLogger(ChukwaRecordOutputFormat.class);
@Override
protected String generateFileNameForKeyValue(ChukwaRecordKey key,
ChukwaRecord record, String name) {
//CHUKWA-648: Make Chukwa Reduce Type to support hierarchy format
//Allow the user to define hierarchy data-type separated by slash mark
//Transform the reduceType from
// "datatypeLevel1-datatypeLevel2-datatypeLevel3" to
// "datatypeLevel1/datatypeLevel2/datatypeLevel3"
String output = RecordUtil.getClusterName(record) + "/"
+ key.getReduceType() + "/"
+ HierarchyDataType.getHierarchyDataTypeDirectory(key.getReduceType())
+ Util.generateTimeOutput(record.getTime());
// {log.info("ChukwaOutputFormat.fileName: [" + output +"]");}
return output;
}
}
| 8,353 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/ChukwaRecordPartitioner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Partitioner;
import org.apache.log4j.Logger;
public class ChukwaRecordPartitioner<K, V> implements
Partitioner<ChukwaRecordKey, ChukwaRecord> {
static Logger log = Logger.getLogger(ChukwaRecordPartitioner.class);
public void configure(JobConf arg0) {
}
public int getPartition(
org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey key,
org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord record,
int numReduceTasks) {
if (log.isDebugEnabled()) {
log
.debug("Partitioner key: ["
+ key.getReduceType()
+ "] - Reducer:"
+ ((key.getReduceType().hashCode() & Integer.MAX_VALUE) % numReduceTasks));
}
String hashkey = key.getReduceType();
if(key.getKey().startsWith(CHUKWA_CONSTANT.INCLUDE_KEY_IN_PARTITIONER)) hashkey = key.getReduceType()+"#"+key.getKey();
return (hashkey.hashCode() & Integer.MAX_VALUE)
% numReduceTasks;
}
}
| 8,354 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/RecordMerger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux;
import java.io.IOException;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
public class RecordMerger extends Thread {
static Logger log = Logger.getLogger(RecordMerger.class);
ChukwaConfiguration conf = null;
FileSystem fs = null;
String[] mergeArgs = null;
Tool tool = null;
boolean deleteRawData = false;
public RecordMerger(ChukwaConfiguration conf, FileSystem fs, Tool tool,
String[] mergeArgs, boolean deleteRawData) {
this.conf = conf;
this.fs = fs;
this.tool = tool;
this.mergeArgs = mergeArgs.clone();
this.deleteRawData = deleteRawData;
}
@Override
public void run() {
mergeRecords();
}
void mergeRecords() {
System.out.println("\t Running Merge! : output [" + mergeArgs[1] + "]");
int res;
try {
res = ToolRunner.run(conf, tool, mergeArgs);
System.out.println("MR exit status: " + res);
if (res == 0) {
writeRecordFile(mergeArgs[1] + "/part-00000", mergeArgs[2],
mergeArgs[3]);
// delete input
if (deleteRawData) {
FileUtil.fullyDelete(fs, new Path(mergeArgs[0]));
Path hours = new Path(mergeArgs[2]);
FileStatus[] hoursOrMinutesFS = fs.listStatus(hours);
for (FileStatus hourOrMinuteFS : hoursOrMinutesFS) {
String dirName = hourOrMinuteFS.getPath().getName();
try {
Integer.parseInt(dirName);
FileUtil.fullyDelete(fs, new Path(mergeArgs[2] + "/" + dirName));
if (log.isDebugEnabled()) {
log.debug("Deleting Hour directory: " + mergeArgs[2] + "/"
+ dirName);
}
} catch (NumberFormatException e) { /*
* Not an Hour or Minutes
* directory- Do nothing
*/
log.debug(ExceptionUtil.getStackTrace(e));
}
}
}
// delete rolling tag
FileUtil.fullyDelete(fs, new Path(mergeArgs[3]));
// delete M/R temp directory
FileUtil.fullyDelete(fs, new Path(mergeArgs[1]));
} else {
throw new RuntimeException("Error in M/R merge operation!");
}
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException("Error in M/R merge operation!", e);
}
}
void writeRecordFile(String input, String outputDir, String fileName)
throws IOException {
boolean done = false;
int count = 1;
Path recordFile = new Path(input);
do {
Path destDirPath = new Path(outputDir);
Path destFilePath = new Path(outputDir + "/" + fileName + "." + count
+ ".evt");
if (!fs.exists(destDirPath)) {
fs.mkdirs(destDirPath);
log.info(">>>>>>>>>>>> create Dir" + destDirPath);
}
if (!fs.exists(destFilePath)) {
boolean res = fs.rename(recordFile, destFilePath);
if (res == false) {
log.info(">>>>>>>>>>>> Use standard copy rename failded");
FileUtil.copy(fs, recordFile, fs, destFilePath, false, false, conf);
}
done = true;
} else {
log.info("Start MoveToRepository main()");
}
count++;
// Just put a limit here
// TODO read from config
if (count > 1000) {
throw new IOException("too many files in this directory: "
+ destDirPath);
}
} while (!done);
}
}
| 8,355 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/MoveToRepository.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux;
import java.io.IOException;
import java.net.URI;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.util.HierarchyDataType;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.Logger;
// TODO
// First version of the Spill
// need some polishing
public class MoveToRepository {
static Logger log = Logger.getLogger(MoveToRepository.class);
static ChukwaConfiguration conf = null;
static FileSystem fs = null;
static SimpleDateFormat sdf = new java.text.SimpleDateFormat("yyyyMMdd");
static Calendar calendar = Calendar.getInstance();
static Collection<Path> processClusterDirectory(Path srcDir, String destDir)
throws Exception {
log.info("processClusterDirectory (" + srcDir.getName() + "," + destDir
+ ")");
FileStatus fstat = fs.getFileStatus(srcDir);
Collection<Path> destFiles = new HashSet<Path>();
if (!fstat.isDir()) {
throw new IOException(srcDir + " is not a directory!");
} else {
FileStatus[] datasourceDirectories = fs.listStatus(srcDir);
for (FileStatus datasourceDirectory : datasourceDirectories) {
log.info(datasourceDirectory.getPath() + " isDir?"
+ datasourceDirectory.isDir());
if (!datasourceDirectory.isDir()) {
throw new IOException(
"Top level datasource directory should be a directory :"
+ datasourceDirectory.getPath());
}
PathFilter filter = new PathFilter()
{public boolean accept(Path file) {
return file.getName().endsWith(".evt");
} };
//CHUKWA-648: Make Chukwa Reduce Type to support hierarchy format
// to processDataSourceDirectory according to hierarchy data type format
List<FileStatus> eventfiles = HierarchyDataType.globStatus(fs, datasourceDirectory.getPath(),filter,true);
for (FileStatus eventfile : eventfiles){
Path datatypeDir = eventfile.getPath().getParent();
String dirName = HierarchyDataType.getDataType(datatypeDir, srcDir);
Path destPath = new Path(destDir + "/" + dirName);
log.info("dest directory path: " + destPath);
log.info("processClusterDirectory processing Datasource: (" + dirName
+ ")");
StringBuilder dtDir = new StringBuilder(srcDir.toString()).append("/").append(dirName);
log.debug("srcDir: " + dtDir.toString());
processDatasourceDirectory(srcDir.toString(), new Path(dtDir.toString()), destDir + "/" + dirName);
}
}
}
return destFiles;
}
static Collection<Path> processDatasourceDirectory(String clusterpath, Path srcDir,
String destDir) throws Exception {
Path cPath = new Path(clusterpath);
String cluster = cPath.getName();
Collection<Path> destFiles = new HashSet<Path>();
String fileName = null;
int fileDay = 0;
int fileHour = 0;
int fileMin = 0;
FileStatus[] recordFiles = fs.listStatus(srcDir);
for (FileStatus recordFile : recordFiles) {
// dataSource_20080915_18_15.1.evt
// <datasource>_<yyyyMMdd_HH_mm>.1.evt
fileName = recordFile.getPath().getName();
log.info("processDatasourceDirectory processing RecordFile: (" + fileName
+ ")");
log.info("fileName: " + fileName);
int l = fileName.length();
String dataSource = HierarchyDataType.getDataType(srcDir, cPath);
log.info("Datasource: " + dataSource);
if (fileName.endsWith(".D.evt")) {
// Hadoop_dfs_datanode_20080919.D.evt
fileDay = Integer.parseInt(fileName.substring(l - 14, l - 6));
Path destFile = writeRecordFile(destDir + "/" + fileDay + "/",
recordFile.getPath(),
HierarchyDataType.getHierarchyDataTypeFileName(dataSource) + "_"
+ fileDay);
if (destFile != null) {
destFiles.add(destFile);
}
} else if (fileName.endsWith(".H.evt")) {
// Hadoop_dfs_datanode_20080925_1.H.evt
// Hadoop_dfs_datanode_20080925_12.H.evt
String day = null;
String hour = null;
if (fileName.charAt(l - 8) == '_') {
day = fileName.substring(l - 16, l - 8);
log.info("day->" + day);
hour = "" + fileName.charAt(l - 7);
log.info("hour->" + hour);
} else {
day = fileName.substring(l - 17, l - 9);
log.info("day->" + day);
hour = fileName.substring(l - 8, l - 6);
log.info("hour->" + hour);
}
fileDay = Integer.parseInt(day);
fileHour = Integer.parseInt(hour);
// rotate there so spill
Path destFile = writeRecordFile(destDir + "/" + fileDay + "/"
+ fileHour + "/", recordFile.getPath(),
HierarchyDataType.getHierarchyDataTypeFileName(dataSource) + "_"
+ fileDay + "_" + fileHour);
if (destFile != null) {
destFiles.add(destFile);
}
// mark this directory for daily rotate
addDirectory4Rolling(true, fileDay, fileHour, cluster, dataSource);
} else if (fileName.endsWith(".R.evt")) {
if (fileName.charAt(l - 11) == '_') {
fileDay = Integer.parseInt(fileName.substring(l - 19, l - 11));
fileHour = Integer.parseInt("" + fileName.charAt(l - 10));
fileMin = Integer.parseInt(fileName.substring(l - 8, l - 6));
} else {
fileDay = Integer.parseInt(fileName.substring(l - 20, l - 12));
fileHour = Integer.parseInt(fileName.substring(l - 11, l - 9));
fileMin = Integer.parseInt(fileName.substring(l - 8, l - 6));
}
log.info("fileDay: " + fileDay);
log.info("fileHour: " + fileHour);
log.info("fileMin: " + fileMin);
Path destFile = writeRecordFile(
destDir + "/" + fileDay + "/" + fileHour + "/" + fileMin,
recordFile.getPath(),
HierarchyDataType.getHierarchyDataTypeFileName(HierarchyDataType.trimSlash(dataSource))
+ "_" + fileDay + "_" + fileHour + "_" + fileMin);
if (destFile != null) {
destFiles.add(destFile);
}
// mark this directory for hourly rotate
addDirectory4Rolling(false, fileDay, fileHour, cluster, dataSource);
} else {
throw new RuntimeException("Wrong fileName format! [" + fileName + "]");
}
}
return destFiles;
}
static void addDirectory4Rolling(boolean isDailyOnly, int day, int hour,
String cluster, String dataSource) throws IOException {
// TODO get root directory from config
String rollingDirectory = "/chukwa/rolling/";
Path path = new Path(rollingDirectory + "/daily/" + day + "/" + cluster
+ "/" + dataSource);
if (!fs.exists(path)) {
fs.mkdirs(path);
}
if (!isDailyOnly) {
path = new Path(rollingDirectory + "/hourly/" + day + "/" + hour + "/"
+ cluster + "/" + dataSource);
if (!fs.exists(path)) {
fs.mkdirs(path);
}
}
}
static Path writeRecordFile(String destDir, Path recordFile, String fileName)
throws IOException {
boolean done = false;
int count = 1;
do {
Path destDirPath = new Path(destDir);
Path destFilePath = new Path(destDir + "/" + fileName + "." + count
+ ".evt");
if (!fs.exists(destDirPath)) {
fs.mkdirs(destDirPath);
log.info(">>>>>>>>>>>> create Dir" + destDirPath);
}
if (!fs.exists(destFilePath)) {
log.info(">>>>>>>>>>>> Before Rename" + recordFile + " -- "
+ destFilePath);
boolean rename = fs.rename(recordFile,destFilePath);
done = true;
log.info(">>>>>>>>>>>> after Rename" + destFilePath + " , rename:"+rename);
return destFilePath;
}
count++;
if (count > 1000) {
log.warn("too many files in this directory: " + destDir);
}
} while (!done);
return null;
}
static boolean checkRotate(String directoryAsString,
boolean createDirectoryIfNotExist) throws IOException {
Path directory = new Path(directoryAsString);
boolean exist = fs.exists(directory);
if (!exist) {
if (createDirectoryIfNotExist == true) {
fs.mkdirs(directory);
}
return false;
} else {
return fs.exists(new Path(directoryAsString + "/rotateDone"));
}
}
public static Path[] doMove(Path srcDir, String destDir) throws Exception {
conf = new ChukwaConfiguration();
String fsName = conf.get("writer.hdfs.filesystem");
fs = FileSystem.get(new URI(fsName), conf);
log.info("Start MoveToRepository doMove()");
FileStatus fstat = fs.getFileStatus(srcDir);
Collection<Path> destinationFiles = new HashSet<Path>();
if (!fstat.isDir()) {
throw new IOException(srcDir + " is not a directory!");
} else {
FileStatus[] clusters = fs.listStatus(srcDir);
// Run a moveOrMerge on all clusters
String name = null;
for (FileStatus cluster : clusters) {
name = cluster.getPath().getName();
// Skip hadoop M/R outputDir
if (name.startsWith("_")) {
continue;
}
log.info("main procesing Cluster (" + cluster.getPath().getName() + ")");
destinationFiles.addAll(processClusterDirectory(cluster.getPath(),
destDir + "/" + cluster.getPath().getName()));
// Delete the demux's cluster dir
FileUtil.fullyDelete(fs, cluster.getPath());
}
}
log.info("Done with MoveToRepository doMove()");
return destinationFiles.toArray(new Path[destinationFiles.size()]);
}
/**
* @param args is command line parameter
* @throws Exception if error in processing data
*/
public static void main(String[] args) throws Exception {
Path srcDir = new Path(args[0]);
String destDir = args[1];
doMove(srcDir, destDir);
}
}
| 8,356 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/PostProcessorManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.dataloader.DataLoaderFactory;
import static org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT.HDFS_DEFAULT_NAME_FIELD;
import static org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT.CHUKWA_ROOT_DIR_FIELD;
import static org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT.CHUKWA_POST_PROCESS_DIR_FIELD;
import static org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT.DEFAULT_CHUKWA_POSTPROCESS_DIR_NAME;
import static org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT.CHUKWA_ROOT_REPOS_DIR_FIELD;
import static org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT.DEFAULT_REPOS_DIR_NAME;
import static org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT.CHUKWA_POSTPROCESS_IN_ERROR_DIR_FIELD;
import static org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT.DEFAULT_POSTPROCESS_IN_ERROR_DIR_NAME;
import static org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT.CHUKWA_POSTPROCESS_MAX_ERROR_COUNT_FIELD;
import static org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT.POST_DEMUX_DATA_LOADER;
import static org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT.POST_DEMUX_SUCCESS_ACTION;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.chukwa.util.HierarchyDataType;
import org.apache.hadoop.chukwa.datatrigger.TriggerAction;
import org.apache.hadoop.chukwa.datatrigger.TriggerEvent;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.log4j.Logger;
public class PostProcessorManager {
static Logger log = Logger.getLogger(PostProcessorManager.class);
protected HashMap<String, String> dataSources = new HashMap<String, String>();
protected int errorCount = 0;
protected int ERROR_SLEEP_TIME = 60;
protected ChukwaConfiguration conf = null;
protected FileSystem fs = null;
protected volatile boolean isRunning = true;
private static final int DEFAULT_MAX_ERROR_COUNT = 4;
final private static PathFilter POST_PROCESS_DEMUX_DIR_FILTER = new PathFilter() {
public boolean accept(Path file) {
return ( file.getName().startsWith("demuxOutputDir") || file.getName().startsWith("pigOutputDir"));
}
};
public PostProcessorManager() throws Exception {
this.conf = new ChukwaConfiguration();
init();
}
public PostProcessorManager(ChukwaConfiguration conf) throws Exception {
this.conf = conf;
init();
}
protected void init() throws IOException, URISyntaxException {
String fsName = conf.get(HDFS_DEFAULT_NAME_FIELD);
fs = FileSystem.get(new URI(fsName), conf);
}
public static void main(String[] args) throws Exception {
PostProcessorManager postProcessorManager = new PostProcessorManager();
postProcessorManager.start();
}
public void shutdown() {
this.isRunning = false;
}
public void start() {
String chukwaRootDir = conf.get(CHUKWA_ROOT_DIR_FIELD, "/chukwa/");
if ( ! chukwaRootDir.endsWith("/") ) {
chukwaRootDir += "/";
}
log.info("chukwaRootDir:" + chukwaRootDir);
String postProcessDir = conf.get(CHUKWA_POST_PROCESS_DIR_FIELD, chukwaRootDir +DEFAULT_CHUKWA_POSTPROCESS_DIR_NAME);
if ( ! postProcessDir.endsWith("/") ) {
postProcessDir += "/";
}
String chukwaRootReposDir = conf.get(CHUKWA_ROOT_REPOS_DIR_FIELD, chukwaRootDir +DEFAULT_REPOS_DIR_NAME);
if ( ! chukwaRootReposDir.endsWith("/") ) {
chukwaRootReposDir += "/";
}
String chukwaPostProcessInErrorDir = conf.get(CHUKWA_POSTPROCESS_IN_ERROR_DIR_FIELD, chukwaRootDir +DEFAULT_POSTPROCESS_IN_ERROR_DIR_NAME);
if ( ! chukwaPostProcessInErrorDir.endsWith("/") ) {
chukwaPostProcessInErrorDir += "/";
}
int maxPermittedErrorCount = conf.getInt(CHUKWA_POSTPROCESS_MAX_ERROR_COUNT_FIELD,
DEFAULT_MAX_ERROR_COUNT);
dataSources = new HashMap<String, String>();
Path postProcessDirectory = new Path(postProcessDir);
while (isRunning) {
if (maxPermittedErrorCount != -1 && errorCount >= maxPermittedErrorCount) {
log.warn("==================\nToo many errors (" + errorCount +
"), Bail out!\n==================");
throw new RuntimeException("Bail out!");
}
try {
FileStatus[] demuxOutputDirs = fs.listStatus(postProcessDirectory,POST_PROCESS_DEMUX_DIR_FILTER);
List<String> directories = new ArrayList<String>();
for (FileStatus fstatus : demuxOutputDirs) {
directories.add(fstatus.getPath().getName());
}
if (demuxOutputDirs.length == 0) {
try { Thread.sleep(10*1000);} catch(InterruptedException e) { /* do nothing*/}
continue;
}
Collections.sort(directories);
String directoryToBeProcessed = null;
long start = 0;
for(String directory : directories) {
directoryToBeProcessed = postProcessDirectory + "/"+ directory;
log.info("PostProcess Start, directory:" + directoryToBeProcessed);
start = System.currentTimeMillis();
try {
if ( processDataLoaders(directoryToBeProcessed) == true) {
Path[] destFiles = movetoMainRepository(
directoryToBeProcessed,chukwaRootReposDir);
if (destFiles != null && destFiles.length > 0) {
deleteDirectory(directoryToBeProcessed);
log.info("PostProcess Stop, directory:" + directoryToBeProcessed);
log.info("processDemuxOutput Duration:" + (System.currentTimeMillis() - start));
processPostMoveTriggers(destFiles);
continue;
}
} else {
log.warn("Error in processDemuxOutput for :" + directoryToBeProcessed + ". Will try again.");
if (errorCount > 3)
moveToInErrorDirectory(directoryToBeProcessed,directory,chukwaPostProcessInErrorDir);
else
errorCount++;
continue;
}
// if we are here it's because something bad happen during processing
log.warn("Error in processDemuxOutput for :" + directoryToBeProcessed);
moveToInErrorDirectory(directoryToBeProcessed,directory,chukwaPostProcessInErrorDir);
} catch (Throwable e) {
log.warn("Error in processDemuxOutput:" ,e);
}
}
} catch (Throwable e) {
errorCount ++;
log.warn(e);
try { Thread.sleep(ERROR_SLEEP_TIME * 1000); }
catch (InterruptedException e1) {/*do nothing*/ }
}
}
}
public boolean processDataLoaders(String directory) throws IOException {
long start = System.currentTimeMillis();
try {
String[] classes = conf.get(POST_DEMUX_DATA_LOADER,"org.apache.hadoop.chukwa.dataloader.MetricDataLoaderPool,org.apache.hadoop.chukwa.dataloader.FSMDataLoader").split(",");
for(String dataLoaderName : classes) {
Class<? extends DataLoaderFactory> dl = (Class<? extends DataLoaderFactory>) Class.forName(dataLoaderName);
java.lang.reflect.Constructor<? extends DataLoaderFactory> c =
dl.getConstructor();
DataLoaderFactory dataloader = c.newInstance();
//DataLoaderFactory dataLoader = (DataLoaderFactory) Class.
// forName(dataLoaderName).getConstructor().newInstance();
log.info(dataLoaderName+" processing: "+directory);
StringBuilder dirSearch = new StringBuilder();
dirSearch.append(directory);
dirSearch.append("/*/*");
log.debug("dirSearch: " + dirSearch);
Path demuxDir = new Path(dirSearch.toString());
// CHUKWA-648: Make Chukwa Reduce Type to support hierarchy format
// List all event files under the hierarchy data-type directory
PathFilter filter = new PathFilter()
{public boolean accept(Path file) {
return file.getName().endsWith(".evt");
} };
List<FileStatus> eventfiles = HierarchyDataType.globStatus(fs, demuxDir,filter,true);
FileStatus[] events = eventfiles.toArray(new FileStatus[eventfiles.size()]);
dataloader.load(conf, fs, events);
}
} catch(Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
return false;
}
log.info("loadData Duration:" + (System.currentTimeMillis() - start));
return true;
}
public boolean processPostMoveTriggers(Path[] files) throws IOException {
long start = System.currentTimeMillis();
try {
String actions = conf.get(POST_DEMUX_SUCCESS_ACTION, null);
if (actions == null || actions.trim().length() == 0) {
return true;
}
log.info("PostProcess firing postMoveTriggers");
String[] classes = actions.trim().split(",");
for(String actionName : classes) {
Class<? extends TriggerAction> actionClass =
(Class<? extends TriggerAction>) Class.forName(actionName);
java.lang.reflect.Constructor<? extends TriggerAction> c =
actionClass.getConstructor();
TriggerAction action = c.newInstance();
log.info(actionName + " handling " + files.length + " events");
//send the files that were just added benieth the repos/ dir.
FileStatus[] events = fs.listStatus(files);
action.execute(conf, fs, events, TriggerEvent.POST_DEMUX_SUCCESS);
}
} catch(Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
return false;
}
log.info("postMoveTriggers Duration:" + (System.currentTimeMillis() - start));
return true;
}
public Path[] movetoMainRepository(String sourceDirectory,String repoRootDirectory) throws Exception {
long start = System.currentTimeMillis();
Path[] destFiles = MoveToRepository.doMove(new Path(sourceDirectory),repoRootDirectory);
log.info("movetoMainRepository Duration:" + (System.currentTimeMillis() - start));
return destFiles;
}
public boolean moveToInErrorDirectory(String sourceDirectory,String dirName,String inErrorDirectory) throws Exception {
Path inErrorDir = new Path(inErrorDirectory);
if (!fs.exists(inErrorDir)) {
fs.mkdirs(inErrorDir);
}
if (inErrorDirectory.endsWith("/")) {
inErrorDirectory += "/";
}
String finalInErrorDirectory = inErrorDirectory + dirName + "_" + System.currentTimeMillis();
fs.rename(new Path(sourceDirectory), new Path(finalInErrorDirectory));
log.warn("Error in postProcess :" + sourceDirectory + " has been moved to:" + finalInErrorDirectory);
return true;
}
public boolean deleteDirectory(String directory) throws IOException {
return fs.delete(new Path(directory), true);
}
}
| 8,357 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/MoveOrMergeRecordFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class MoveOrMergeRecordFile extends Configured implements Tool {
static ChukwaConfiguration conf = null;
static FileSystem fs = null;
static final String HadoopLogDir = "_logs";
static final String hadoopTempDir = "_temporary";
public int run(String[] args) throws Exception {
JobConf conf = new JobConf(getConf(), MoveOrMergeRecordFile.class);
conf.setJobName("Chukwa-MoveOrMergeLogFile");
conf.setInputFormat(SequenceFileInputFormat.class);
conf.setMapperClass(IdentityMapper.class);
conf.setReducerClass(IdentityReducer.class);
// conf.setPartitionerClass(ChukwaPartitioner.class);
// conf.setOutputFormat(ChukwaOutputFormat.class);
conf.setOutputKeyClass(ChukwaRecordKey.class);
conf.setOutputValueClass(ChukwaRecord.class);
conf.setOutputFormat(SequenceFileOutputFormat.class);
FileInputFormat.setInputPaths(conf, args[0]);
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
return 0;
}
static void moveOrMergeOneCluster(Path srcDir, String destDir)
throws Exception {
System.out.println("moveOrMergeOneCluster (" + srcDir.getName() + ","
+ destDir + ")");
FileStatus fstat = fs.getFileStatus(srcDir);
if (!fstat.isDir()) {
throw new IOException(srcDir + " is not a directory!");
} else {
FileStatus[] datasourceDirectories = fs.listStatus(srcDir);
for (FileStatus datasourceDirectory : datasourceDirectories) {
System.out.println(datasourceDirectory.getPath() + " isDir?"
+ datasourceDirectory.isDir());
if (!datasourceDirectory.isDir()) {
throw new IOException("Top level should just contains directories :"
+ datasourceDirectory.getPath());
}
String dirName = datasourceDirectory.getPath().getName();
Path destPath = new Path(destDir + "/" + dirName);
System.out.println("dest directory path: " + destPath);
if (!fs.exists(destPath)) {
System.out.println("create datasource directory [" + destDir + "/"
+ dirName + "]");
fs.mkdirs(destPath);
}
FileStatus[] evts = fs.listStatus(datasourceDirectory.getPath(),
new EventFileFilter());
for (FileStatus eventFile : evts) {
Path eventFilePath = eventFile.getPath();
String filename = eventFilePath.getName();
System.out.println("src dir File: [" + filename + "]");
Path destFilePath = new Path(destDir + "/" + dirName + "/" + filename);
if (!fs.exists(destFilePath)) {
System.out.println("Moving File: [" + destFilePath + "]");
// Copy to final Location
FileUtil.copy(fs, eventFilePath, fs, destFilePath, false, false,
conf);
} else {
System.out.println("Need to merge! : [" + destFilePath + "]");
String strMrPath = datasourceDirectory.getPath().toString() + "/"
+ "MR_" + System.currentTimeMillis();
Path mrPath = new Path(strMrPath);
System.out.println("\t New MR directory : [" + mrPath + "]");
// Create MR input Dir
fs.mkdirs(mrPath);
// Move Input files
FileUtil.copy(fs, eventFilePath, fs,
new Path(strMrPath + "/1.evt"), false, false, conf);
fs.rename(destFilePath, new Path(strMrPath + "/2.evt"));
// Merge
String[] mergeArgs = new String[2];
mergeArgs[0] = strMrPath;
mergeArgs[1] = strMrPath + "/mrOutput";
DoMerge merge = new DoMerge(conf, fs, eventFilePath, destFilePath,
mergeArgs);
merge.start();
}
}
}
}
}
/**
* @param args is command line parameters
* @throws Exception if unable to process data
*/
public static void main(String[] args) throws Exception {
conf = new ChukwaConfiguration();
String fsName = conf.get("writer.hdfs.filesystem");
fs = FileSystem.get(new URI(fsName), conf);
Path srcDir = new Path(args[0]);
String destDir = args[1];
FileStatus fstat = fs.getFileStatus(srcDir);
if (!fstat.isDir()) {
throw new IOException(srcDir + " is not a directory!");
} else {
FileStatus[] clusters = fs.listStatus(srcDir);
// Run a moveOrMerge on all clusters
String name = null;
for (FileStatus cluster : clusters) {
name = cluster.getPath().getName();
// Skip hadoop outDir
if ((name.intern() == HadoopLogDir.intern())
|| (name.intern() == hadoopTempDir.intern())) {
continue;
}
moveOrMergeOneCluster(cluster.getPath(), destDir + "/"
+ cluster.getPath().getName());
}
}
System.out.println("Done with moveOrMerge main()");
}
}
class DoMerge extends Thread {
ChukwaConfiguration conf = null;
FileSystem fs = null;
String[] mergeArgs = new String[2];
Path destFilePath = null;
Path eventFilePath = null;
public DoMerge(ChukwaConfiguration conf, FileSystem fs, Path eventFilePath,
Path destFilePath, String[] mergeArgs) {
this.conf = conf;
this.fs = fs;
this.eventFilePath = eventFilePath;
this.destFilePath = destFilePath;
this.mergeArgs = mergeArgs;
}
@Override
public void run() {
System.out.println("\t Running Merge! : output [" + mergeArgs[1] + "]");
int res;
try {
res = ToolRunner.run(new ChukwaConfiguration(),
new MoveOrMergeRecordFile(), mergeArgs);
System.out.println("MR exit status: " + res);
if (res == 0) {
System.out.println("\t Moving output file : to [" + destFilePath + "]");
FileUtil.copy(fs, new Path(mergeArgs[1] + "/part-00000"), fs,
destFilePath, false, false, conf);
fs.rename(new Path(mergeArgs[1] + "/part-00000"), eventFilePath);
} else {
throw new RuntimeException("Error in M/R merge operation!");
}
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException("Error in M/R merge operation!", e);
}
}
}
class EventFileFilter implements PathFilter {
public boolean accept(Path path) {
return (path.toString().endsWith(".evt"));
}
}
| 8,358 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/HourlyChukwaRecordRolling.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux;
import java.io.IOException;
import java.net.URI;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.List;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.util.HierarchyDataType;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobPriority;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.util.Tool;
import org.apache.log4j.Logger;
// TODO do an abstract class for all rolling
public class HourlyChukwaRecordRolling extends Configured implements Tool {
static Logger log = Logger.getLogger(HourlyChukwaRecordRolling.class);
static SimpleDateFormat sdf = new java.text.SimpleDateFormat("yyyyMMdd");
static ChukwaConfiguration conf = null;
static FileSystem fs = null;
static final String HadoopLogDir = "_logs";
static final String hadoopTempDir = "_temporary";
static boolean rollInSequence = true;
static boolean deleteRawdata = false;
public static void usage() {
System.err
.println("usage: java org.apache.hadoop.chukwa.extraction.demux.HourlyChukwaRecordRolling rollInSequence <True/False> deleteRawdata <True/False>");
}
public static void buildHourlyFiles(String chukwaMainRepository,
String tempDir, String rollingFolder, int workingDay, int workingHour)
throws IOException {
// process
Path hourPath = new Path(rollingFolder + "/hourly/" + workingDay + "/"
+ workingHour);
FileStatus[] clustersFS = fs.listStatus(hourPath);
for (FileStatus clusterFs : clustersFS) {
String cluster = clusterFs.getPath().getName();
Path dataSourceClusterHourPaths = new Path(rollingFolder + "/hourly/"
+ workingDay + "/" + workingHour + "/" + cluster);
FileStatus[] dataSourcesFS = fs.listStatus(dataSourceClusterHourPaths);
for (FileStatus dataSourceFS : dataSourcesFS) {
//CHUKWA-648: Make Chukwa Reduce Type to support hierarchy format
for (FileStatus dataSourcePath : HierarchyDataType.globStatus(fs,
dataSourceFS.getPath(), true)) {
String dataSource = HierarchyDataType.getDataType(
dataSourcePath.getPath(),
fs.getFileStatus(dataSourceClusterHourPaths).getPath());
// Repo path = reposRootDirectory/<cluster>/<datasource>/<day>/<hour>/*/*.evt
// put the rotate flag
fs.mkdirs(new Path(chukwaMainRepository + "/" + cluster + "/"
+ dataSource + "/" + workingDay + "/" + workingHour
+ "/rotateDone"));
// rotate
// Merge
String[] mergeArgs = new String[5];
// input
mergeArgs[0] = chukwaMainRepository + "/" + cluster + "/" + dataSource
+ "/" + workingDay + "/" + workingHour + "/[0-5]*/*.evt";
// temp dir
mergeArgs[1] = tempDir + "/" + cluster + "/" + dataSource + "/"
+ workingDay + "/" + workingHour + "_" + System.currentTimeMillis();
// final output dir
mergeArgs[2] = chukwaMainRepository + "/" + cluster + "/" + dataSource
+ "/" + workingDay + "/" + workingHour;
// final output fileName
mergeArgs[3] = dataSource + "_HourlyDone_" + workingDay + "_" + workingHour;
// delete rolling directory
mergeArgs[4] = rollingFolder + "/hourly/" + workingDay + "/"
+ workingHour + "/" + cluster + "/" + dataSource;
log.info("HourlyChukwaRecordRolling 0: " + mergeArgs[0]);
log.info("HourlyChukwaRecordRolling 1: " + mergeArgs[1]);
log.info("HourlyChukwaRecordRolling 2: " + mergeArgs[2]);
log.info("HourlyChukwaRecordRolling 3: " + mergeArgs[3]);
log.info("HourlyChukwaRecordRolling 4: " + mergeArgs[4]);
RecordMerger merge = new RecordMerger(conf, fs,
new HourlyChukwaRecordRolling(), mergeArgs, deleteRawdata);
List<RecordMerger> allMerge = new ArrayList<RecordMerger>();
if (rollInSequence) {
merge.mergeRecords();
} else {
allMerge.add(merge);
merge.start();
}
// join all Threads
if (!rollInSequence) {
while (allMerge.size() > 0) {
RecordMerger m = allMerge.remove(0);
try {
m.join();
} catch (InterruptedException e) {
}
}
} // End if (!rollInSequence)
}
// Delete the processed dataSourceFS
FileUtil.fullyDelete(fs, dataSourceFS.getPath());
} // End for(FileStatus dataSourceFS : dataSourcesFS)
// Delete the processed clusterFs
FileUtil.fullyDelete(fs, clusterFs.getPath());
} // End for(FileStatus clusterFs : clustersFS)
// Delete the processed hour
FileUtil.fullyDelete(fs, hourPath);
}
/**
* @param args is command line parameters
* @throws Exception if error in processing data
*/
public static void main(String[] args) throws Exception {
conf = new ChukwaConfiguration();
String fsName = conf.get("writer.hdfs.filesystem");
fs = FileSystem.get(new URI(fsName), conf);
// TODO read from config
String rollingFolder = "/chukwa/rolling/";
String chukwaMainRepository = "/chukwa/repos/";
String tempDir = "/chukwa/temp/hourlyRolling/";
// TODO do a real parameter parsing
if (args.length != 4) {
usage();
return;
}
if (!args[0].equalsIgnoreCase("rollInSequence")) {
usage();
return;
}
if (!args[2].equalsIgnoreCase("deleteRawdata")) {
usage();
return;
}
if (args[1].equalsIgnoreCase("true")) {
rollInSequence = true;
} else {
rollInSequence = false;
}
if (args[3].equalsIgnoreCase("true")) {
deleteRawdata = true;
} else {
deleteRawdata = false;
}
Calendar calendar = Calendar.getInstance();
int currentDay = Integer.parseInt(sdf.format(calendar.getTime()));
int currentHour = calendar.get(Calendar.HOUR_OF_DAY);
log.info("CurrentDay: " + currentDay);
log.info("currentHour" + currentHour);
Path rootFolder = new Path(rollingFolder + "/hourly/");
FileStatus[] daysFS = fs.listStatus(rootFolder);
for (FileStatus dayFS : daysFS) {
try {
log.info("dayFs:" + dayFS.getPath().getName());
int workingDay = Integer.parseInt(dayFS.getPath().getName());
Path hourlySrc = new Path(rollingFolder + "/hourly/" + workingDay);
FileStatus[] hoursFS = fs.listStatus(hourlySrc);
for (FileStatus hourFS : hoursFS) {
String workinhHourStr = hourFS.getPath().getName();
int workingHour = Integer.parseInt(workinhHourStr);
if ((workingDay < currentDay) || // all previous days
((workingDay == currentDay) && (workingHour < currentHour)) // Up
// to
// the
// last
// hour
) {
try {
buildHourlyFiles(chukwaMainRepository, tempDir, rollingFolder,
workingDay, workingHour);
} catch(Throwable e) {
e.printStackTrace();
log.warn("Hourly rolling failed on :" + rollingFolder +"/" + workingDay +"/" + workingHour ) ;
}
} // End if ( (workingDay < currentDay) || ( (workingDay ==
// currentDay) && (intHour < currentHour) ) )
} // End for(FileStatus hourFS : hoursFS)
} // End Try workingDay =
// Integer.parseInt(sdf.format(dayFS.getPath().getName()));
catch (NumberFormatException e) { /* Not a standard Day directory skip */
log.warn("Exception in hourlyRolling:", e);
}
} // for(FileStatus dayFS : daysFS)
}
public int run(String[] args) throws Exception {
JobConf conf = new JobConf(new ChukwaConfiguration(), HourlyChukwaRecordRolling.class);
conf.setJobName("HourlyChukwa-Rolling");
conf.setInputFormat(SequenceFileInputFormat.class);
conf.setMapperClass(IdentityMapper.class);
conf.setReducerClass(IdentityReducer.class);
conf.setOutputKeyClass(ChukwaRecordKey.class);
conf.setOutputValueClass(ChukwaRecord.class);
conf.setOutputFormat(SequenceFileOutputFormat.class);
log.info("HourlyChukwaRecordRolling input: " + args[0]);
log.info("HourlyChukwaRecordRolling output: " + args[1]);
FileInputFormat.setInputPaths(conf, args[0]);
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
conf.setJobPriority(JobPriority.LOW);
conf.setNumReduceTasks(1);
JobClient.runJob(conf);
return 0;
}
}
| 8,359 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/Util.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor;
import java.text.SimpleDateFormat;
import java.util.Calendar;
public class Util {
static SimpleDateFormat day = new java.text.SimpleDateFormat("yyyyMMdd");
static Calendar calendar = Calendar.getInstance();
static int currentDay = 0;
static int currentHour = 0;
static {
synchronized (calendar) {
calendar.setTimeInMillis(System.currentTimeMillis());
currentDay = Integer.parseInt(day.format(calendar.getTime()));
currentHour = calendar.get(Calendar.HOUR_OF_DAY);
}
}
public static String generateTimeOutput(long timestamp) {
int workingDay = 0;
int workingHour = 0;
String output = null;
int minutes = 0;
synchronized (calendar) {
calendar.setTimeInMillis(timestamp);
workingDay = Integer.parseInt(day.format(calendar.getTime()));
workingHour = calendar.get(Calendar.HOUR_OF_DAY);
minutes = calendar.get(Calendar.MINUTE);
}
if (workingDay != currentDay) {
output = "_" + workingDay + ".D.evt";
} else {
if (workingHour != currentHour) {
output = "_" + workingDay + "_" + workingHour + ".H.evt";
} else {
output = "_" + workingDay + "_" + workingHour + "_";
int dec = minutes / 10;
output += dec;
int m = minutes - (dec * 10);
if (m < 5) {
output += "0.R.evt";
} else {
output += "5.R.evt";
}
}
}
return output;
}
}
| 8,360 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/ChukwaOutputCollector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor;
import java.io.IOException;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
public class ChukwaOutputCollector implements
OutputCollector<ChukwaRecordKey, ChukwaRecord> {
private OutputCollector<ChukwaRecordKey, ChukwaRecord> outputCollector = null;
private Reporter reporter = null;
private String groupName = null;
public ChukwaOutputCollector(
String groupName,
OutputCollector<ChukwaRecordKey, ChukwaRecord> outputCollector,
Reporter reporter) {
this.reporter = reporter;
this.outputCollector = outputCollector;
this.groupName = groupName;
}
@Override
public void collect(ChukwaRecordKey key, ChukwaRecord value)
throws IOException {
this.outputCollector.collect(key, value);
reporter.incrCounter(groupName, "total records", 1);
reporter.incrCounter(groupName, key.getReduceType() + " records", 1);
}
}
| 8,361 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/ChunkProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
public interface ChunkProcessor {
public String getDataType();
public void process(Chunk chunk, OutputCollector<Text, ChukwaRecord> output,
Reporter reporter);
}
| 8,362 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/AbstractProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.nio.charset.Charset;
import java.util.Calendar;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.engine.Record;
import org.apache.hadoop.chukwa.util.RecordConstants;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
public abstract class AbstractProcessor implements MapProcessor {
static Logger log = Logger.getLogger(AbstractProcessor.class);
Calendar calendar = Calendar.getInstance();
byte[] bytes;
int[] recordOffsets;
protected int currentPos = 0;
protected int startOffset = 0;
protected ChukwaArchiveKey archiveKey = null;
protected ChukwaRecordKey key = new ChukwaRecordKey();
protected Chunk chunk = null;
boolean chunkInErrorSaved = false;
OutputCollector<ChukwaRecordKey, ChukwaRecord> output = null;
Reporter reporter = null;
public AbstractProcessor() {
}
protected abstract void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws Throwable;
protected void saveChunkInError(Throwable throwable) {
if (chunkInErrorSaved == false) {
try {
ChunkSaver.saveChunk(chunk, throwable, output, reporter);
chunkInErrorSaved = true;
} catch (Exception e) {
e.printStackTrace();
}
}
}
public void process(ChukwaArchiveKey archiveKey, Chunk chunk,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter) {
chunkInErrorSaved = false;
this.archiveKey = archiveKey;
this.output = output;
this.reporter = reporter;
reset(chunk);
while (hasNext()) {
try {
parse(nextLine(), output, reporter);
} catch (Throwable e) {
saveChunkInError(e);
}
}
}
protected void buildGenericRecord(ChukwaRecord record, String body,
long timestamp, String dataSource) {
calendar.setTimeInMillis(timestamp);
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
key.setKey("" + calendar.getTimeInMillis() + "/" + chunk.getSource() + "/"
+ timestamp);
key.setReduceType(dataSource);
if (body != null) {
record.add(Record.bodyField, body);
}
record.setTime(timestamp);
record.add(Record.tagsField, chunk.getTags());
record.add(Record.sourceField, chunk.getSource());
record.add(Record.applicationField, chunk.getStreamName());
}
protected void reset(Chunk chunk) {
this.chunk = chunk;
this.bytes = chunk.getData();
this.recordOffsets = chunk.getRecordOffsets();
currentPos = 0;
startOffset = 0;
}
protected boolean hasNext() {
return (currentPos < recordOffsets.length);
}
protected String nextLine() {
String log = new String(bytes, startOffset, (recordOffsets[currentPos]
- startOffset + 1), Charset.forName("UTF-8"));
startOffset = recordOffsets[currentPos] + 1;
currentPos++;
return RecordConstants.recoverRecordSeparators("\n", log);
}
public int getCurrentPos() {
return currentPos;
}
public void setCurrentPos(int currentPos) {
this.currentPos = currentPos;
}
public int getStartOffset() {
return startOffset;
}
public void setStartOffset(int startOffset) {
this.startOffset = startOffset;
}
}
| 8,363 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/ChukwaMetricsProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Tables;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Table;
import org.apache.log4j.Logger;
@Tables(annotations={
@Table(name="chukwa",columnFamily="chukwaAgent_chunkQueue"),
@Table(name="chukwa",columnFamily="chukwaAgent_metrics"),
@Table(name="chukwa",columnFamily="chukwaAgent_httpSender")
})
public class ChukwaMetricsProcessor extends HadoopMetricsProcessor {
static Logger log = Logger.getLogger(ChukwaMetricsProcessor.class);
}
| 8,364 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/ZookeeperProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.util.Calendar;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.TimeZone;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Tables;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Table;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
import org.json.simple.JSONObject;
import org.json.simple.JSONValue;
@Tables(annotations = { @Table(name = "Zookeeper", columnFamily = "zk") })
public class ZookeeperProcessor extends AbstractProcessor {
static Map<String, Long> rateMap = new ConcurrentHashMap<String, Long>();
static {
long zero = 0L;
rateMap.put("PacketsSent", zero);
rateMap.put("PacketsReceived", zero);
}
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws Throwable {
Logger log = Logger.getLogger(ZookeeperProcessor.class);
long timeStamp = Calendar.getInstance(TimeZone.getTimeZone("UTC"))
.getTimeInMillis();
final ChukwaRecord record = new ChukwaRecord();
Map<String, ChukwaRecord> metricsMap = new HashMap<String, ChukwaRecord>() {
private static final long serialVersionUID = 1L;
{
put("MinRequestLatency", record);
put("AvgRequestLatency", record);
put("MaxRequestLatency", record);
put("PacketsReceived", record);
put("PacketsSent", record);
put("OutstandingRequests", record);
put("NodeCount", record);
put("WatchCount", record);
}
};
try {
JSONObject obj = (JSONObject) JSONValue.parse(recordEntry);
String ttTag = chunk.getTag("timeStamp");
if (ttTag == null) {
log.warn("timeStamp tag not set in JMX adaptor for zookeeper");
} else {
timeStamp = Long.parseLong(ttTag);
}
@SuppressWarnings("unchecked")
Iterator<Map.Entry<String, ?>> keys = ((JSONObject) obj).entrySet().iterator();
while (keys.hasNext()) {
Map.Entry<String, ?> entry = keys.next();
String key = entry.getKey();
Object value = entry.getValue();
String valueString = value == null ? "" : value.toString();
if (metricsMap.containsKey(key)) {
ChukwaRecord rec = metricsMap.get(key);
rec.add(key, valueString);
}
}
buildGenericRecord(record, null, timeStamp, "zk");
output.collect(key, record);
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
}
}
}
| 8,365 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/MapProcessorFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.util.HashMap;
import org.apache.log4j.Logger;
public class MapProcessorFactory {
static Logger log = Logger.getLogger(MapProcessorFactory.class);
private static HashMap<String, MapProcessor> processors = new HashMap<String, MapProcessor>(); // registry
public MapProcessorFactory() {
}
public static MapProcessor getProcessor(String parserClass)
throws UnknownRecordTypeException {
if (processors.containsKey(parserClass)) {
return processors.get(parserClass);
} else {
MapProcessor processor = null;
try {
processor = (MapProcessor) Class.forName(parserClass).getConstructor()
.newInstance();
} catch (ClassNotFoundException e) {
throw new UnknownRecordTypeException("Unknown parserClass:"
+ parserClass, e);
} catch (Exception e) {
throw new UnknownRecordTypeException("error constructing processor", e);
}
// TODO using a ThreadSafe/reuse flag to actually decide if we want
// to reuse the same processor again and again
processors.put(parserClass, processor);
return processor;
}
}
}
| 8,366 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/ClientTraceProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.io.IOException;
import java.net.InetAddress;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Table;
import org.apache.hadoop.chukwa.extraction.engine.Record;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
@Table(name="Hadoop",columnFamily="ClientTrace")
public class ClientTraceProcessor extends AbstractProcessor {
private static final String recordType = "ClientTrace";
private final SimpleDateFormat sdf =
new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
private final Matcher kvMatcher;
private final Matcher idMatcher;
private final Matcher ipMatcher;
// extract date, source
private final Pattern idPattern =
Pattern.compile("^(.{23}).*clienttrace.*");
// extract "key: value" pairs
private final Pattern kvPattern =
Pattern.compile("\\s+(\\w+):\\s+([^,]+)");
private final Pattern ipPattern =
Pattern.compile("[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+");
public ClientTraceProcessor() {
super();
kvMatcher = kvPattern.matcher("");
idMatcher = idPattern.matcher("");
ipMatcher = ipPattern.matcher("");
}
public enum Locality {
LOCAL("local"), INTRA("intra_rack"), INTER("inter_rack");
String lbl;
Locality(String lbl) {
this.lbl = lbl;
}
public String getLabel() {
return lbl;
}
};
protected Locality getLocality(String src, String dst) throws Exception {
if (null == src || null == dst) {
throw new IOException("Missing src/dst");
}
ipMatcher.reset(src);
if (!ipMatcher.find()) {
throw new IOException("Could not find src");
}
byte[] srcIP = InetAddress.getByName(ipMatcher.group(0)).getAddress();
ipMatcher.reset(dst);
if (!ipMatcher.find()) {
throw new IOException("Could not find dst");
}
byte[] dstIP = InetAddress.getByName(ipMatcher.group(0)).getAddress();
for (int i = 0; i < 4; ++i) {
if (srcIP[i] != dstIP[i]) {
return (3 == i && (srcIP[i] & 0xC0) == (dstIP[i] & 0xC0))
? Locality.INTRA
: Locality.INTER;
}
}
return Locality.LOCAL;
}
@Override
public void parse(String recordEntry,
OutputCollector<ChukwaRecordKey,ChukwaRecord> output, Reporter reporter)
throws Throwable {
try {
idMatcher.reset(recordEntry);
long ms;
long ms_fullresolution;
if (idMatcher.find()) {
ms = sdf.parse(idMatcher.group(1)).getTime();
ms_fullresolution = ms;
} else {
throw new IOException("Could not find date/source");
}
kvMatcher.reset(recordEntry);
if (!kvMatcher.find()) {
throw new IOException("Failed to find record");
}
ChukwaRecord rec = new ChukwaRecord();
do {
rec.add(kvMatcher.group(1), kvMatcher.group(2));
} while (kvMatcher.find());
Locality loc = getLocality(rec.getValue("src"), rec.getValue("dest"));
rec.add("locality", loc.getLabel());
calendar.setTimeInMillis(ms);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
ms = calendar.getTimeInMillis();
calendar.set(Calendar.MINUTE, 0);
key.setKey(calendar.getTimeInMillis() + "/" + loc.getLabel() + "/" +
rec.getValue("op").toLowerCase() + "/" + ms);
key.setReduceType("ClientTrace");
rec.setTime(ms);
rec.add(Record.tagsField, chunk.getTags());
rec.add(Record.sourceField, chunk.getSource());
rec.add(Record.applicationField, chunk.getStreamName());
rec.add("actual_time",Long.toString(ms_fullresolution));
output.collect(key, rec);
} catch (ParseException e) {
log.warn("Unable to parse the date in DefaultProcessor ["
+ recordEntry + "]", e);
e.printStackTrace();
throw e;
} catch (IOException e) {
log.warn("Unable to collect output in DefaultProcessor ["
+ recordEntry + "]", e);
e.printStackTrace();
throw e;
}
}
public String getDataType() {
return recordType;
}
}
| 8,367 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/MapProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
public interface MapProcessor {
public void process(ChukwaArchiveKey archiveKey, Chunk chunk,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter);
}
| 8,368 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/HadoopMetricsProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import java.util.Iterator;
import java.util.Map;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Tables;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Table;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.engine.Record;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
import org.json.simple.JSONObject;
import org.json.simple.JSONValue;
@Tables(annotations={
@Table(name="Hadoop",columnFamily="jvm_metrics"),
@Table(name="Hadoop",columnFamily="mapred_metrics"),
@Table(name="Hadoop",columnFamily="dfs_metrics"),
@Table(name="Hadoop",columnFamily="dfs_namenode"),
@Table(name="Hadoop",columnFamily="dfs_FSNamesystem"),
@Table(name="Hadoop",columnFamily="dfs_datanode"),
@Table(name="Hadoop",columnFamily="mapred_jobtracker"),
@Table(name="Hadoop",columnFamily="mapred_shuffleInput"),
@Table(name="Hadoop",columnFamily="mapred_shuffleOutput"),
@Table(name="Hadoop",columnFamily="mapred_tasktracker"),
@Table(name="Hadoop",columnFamily="rpc_metrics")
})
public class HadoopMetricsProcessor extends AbstractProcessor {
static Logger log = Logger.getLogger(HadoopMetricsProcessor.class);
static final String chukwaTimestampField = "timestamp";
static final String contextNameField = "contextName";
static final String recordNameField = "recordName";
private SimpleDateFormat sdf = null;
public HadoopMetricsProcessor() {
// TODO move that to config
sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm");
}
@SuppressWarnings("unchecked")
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws Throwable {
try {
// Look for syslog PRI, if PRI is not found, start from offset of 0.
int idx = recordEntry.indexOf('>', 0);
String dStr = recordEntry.substring(idx+1, idx+23);
int start = idx + 25;
idx = recordEntry.indexOf(' ', start);
// String level = recordEntry.substring(start, idx);
start = idx + 1;
idx = recordEntry.indexOf(' ', start);
// String className = recordEntry.substring(start, idx-1);
String body = recordEntry.substring(idx + 1);
body = body.replaceAll("\n", "");
// log.info("record [" + recordEntry + "] body [" + body +"]");
Date d = sdf.parse(dStr);
start = body.indexOf('{');
JSONObject json = (JSONObject) JSONValue.parse(body.substring(start));
ChukwaRecord record = new ChukwaRecord();
StringBuilder datasource = new StringBuilder();
String contextName = null;
String recordName = null;
Iterator<Map.Entry<String, ?>> ki = json.entrySet().iterator();
while (ki.hasNext()) {
Map.Entry<String, ?> entry = ki.next();
String keyName = entry.getKey();
Object value = entry.getValue();
if (chukwaTimestampField.intern() == keyName.intern()) {
d = new Date((Long) value);
Calendar cal = Calendar.getInstance();
cal.setTimeInMillis(d.getTime());
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
d.setTime(cal.getTimeInMillis());
} else if (contextNameField.intern() == keyName.intern()) {
contextName = (String) value;
} else if (recordNameField.intern() == keyName.intern()) {
recordName = (String) value;
record.add(keyName, value.toString());
} else {
if(json.get(keyName)!=null) {
record.add(keyName, value.toString());
}
}
}
if(contextName!=null) {
datasource.append(contextName);
datasource.append("_");
}
datasource.append(recordName);
record.add("cluster", chunk.getTag("cluster"));
if(contextName!=null && contextName.equals("jvm")) {
buildJVMRecord(record, d.getTime(), datasource.toString());
} else {
buildGenericRecord(record, null, d.getTime(), datasource.toString());
}
output.collect(key, record);
} catch (ParseException e) {
log.warn("Wrong format in HadoopMetricsProcessor [" + recordEntry + "]",
e);
throw e;
} catch (IOException e) {
log.warn("Unable to collect output in HadoopMetricsProcessor ["
+ recordEntry + "]", e);
throw e;
} catch (Exception e) {
log.warn("Wrong format in HadoopMetricsProcessor [" + recordEntry + "]",
e);
throw e;
}
}
protected void buildJVMRecord(ChukwaRecord record, long timestamp, String dataSource) {
calendar.setTimeInMillis(timestamp);
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
key.setKey("" + calendar.getTimeInMillis() + "/" + chunk.getSource() + ":" +
record.getValue("processName")+ "/" + timestamp);
key.setReduceType(dataSource);
record.setTime(timestamp);
record.add(Record.tagsField, chunk.getTags());
record.add(Record.sourceField, chunk.getSource());
record.add(Record.applicationField, chunk.getStreamName());
}
public String getDataType() {
return HadoopMetricsProcessor.class.getName();
}
}
| 8,369 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/ProcessorFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.util.HashMap;
import org.apache.log4j.Logger;
public class ProcessorFactory {
static Logger log = Logger.getLogger(ProcessorFactory.class);
// TODO
// add new mapper package at the end.
// We should have a more generic way to do this.
// Ex: read from config
// list of alias
// and
// alias -> processor class
private static HashMap<String, ChunkProcessor> processors = new HashMap<String, ChunkProcessor>(); // registry
private ProcessorFactory() {
}
public static ChunkProcessor getProcessor(String recordType)
throws UnknownRecordTypeException {
String path = "org.apache.hadoop.chukwa.extraction.demux.processor.mapper"
+ recordType;
if (processors.containsKey(recordType)) {
return processors.get(recordType);
} else {
ChunkProcessor processor = null;
try {
processor = (ChunkProcessor) Class.forName(path).getConstructor()
.newInstance();
} catch (ClassNotFoundException e) {
throw new UnknownRecordTypeException(
"Unknown recordType:" + recordType, e);
} catch (Exception e) {
throw new UnknownRecordTypeException("error constructing processor", e);
}
// TODO using a ThreadSafe/reuse flag to actually decide if we want
// to reuse the same processor again and again
register(recordType, processor);
return processor;
}
}
/**
* Register a specific parser for a {@link ChunkProcessor} implementation.
* @param recordType is data type assigned during adaptor creation
* @param processor is parser class to process data
*/
public static synchronized void register(String recordType,
ChunkProcessor processor) {
log.info("register " + processor.getClass().getName()
+ " for this recordType :" + recordType);
if (processors.containsKey(recordType)) {
throw new DuplicateProcessorException(
"Duplicate processor for recordType:" + recordType);
}
ProcessorFactory.processors.put(recordType, processor);
}
}
| 8,370 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/LogEntry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
public class LogEntry {
private SimpleDateFormat sdf = new SimpleDateFormat(
"yyyy-MM-dd HH:mm");
private Date date;
private String logLevel;
private String className;
private String body;
public LogEntry(String recordEntry) throws ParseException {
String dStr = recordEntry.substring(0, 23);
date = sdf.parse(dStr);
int start = 24;
int idx = recordEntry.indexOf(' ', start);
logLevel = recordEntry.substring(start, idx);
start = idx + 1;
idx = recordEntry.indexOf(' ', start);
className = recordEntry.substring(start, idx - 1);
body = recordEntry.substring(idx + 1);
}
public Date getDate() {
return (Date) date.clone();
}
public void setDate(Date date) {
this.date = (Date) date.clone();
}
public String getLogLevel() {
return logLevel;
}
public String getClassName() {
return className;
}
public String getBody() {
return body;
}
}
| 8,371 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/JobLogHistoryProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map.Entry;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Table;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.engine.Record;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
@Table(name="Mapreduce",columnFamily="JobLogHistory")
public class JobLogHistoryProcessor extends AbstractProcessor {
static Logger log = Logger.getLogger(JobLogHistoryProcessor.class);
private static final String recordType = "JobLogHistory";
private static final String internalRegex = "(.*?)=\"(.*?)\"(.*)([\\n])?";
private Pattern ip = null;
private Matcher internalMatcher = null;
public JobLogHistoryProcessor() {
ip = Pattern.compile(internalRegex);
internalMatcher = ip.matcher("-");
}
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws Throwable {
// log.info("JobLogHistoryProcessor record: [" + recordEntry + "] type["
// + chunk.getDataType() + "]");
try {
HashMap<String, String> keys = new HashMap<String, String>();
ChukwaRecord record = null;
int firstSep = recordEntry.indexOf(" ");
keys.put("RECORD_TYPE", recordEntry.substring(0, firstSep));
// log.info("JobLogHistoryProcessor Add field: [RECORD_TYPE]["
// + keys.get("RECORD_TYPE") + "]");
String body = recordEntry.substring(firstSep);
internalMatcher.reset(body);
// String fieldName = null;
// String fieldValue = null;
while (internalMatcher.matches()) {
keys.put(internalMatcher.group(1).trim(), internalMatcher.group(2)
.trim());
// TODO Remove debug info before production
// fieldName = internalMatcher.group(1).trim();
// fieldValue = internalMatcher.group(2).trim();
// log.info("JobLogHistoryProcessor Add field: [" + fieldName +
// "][" + fieldValue +"]" );
// log.info("EOL : [" + internalMatcher.group(3) + "]" );
internalMatcher.reset(internalMatcher.group(3));
}
if (!keys.containsKey("JOBID")) {
// Extract JobID from taskID
// JOBID = "job_200804210403_0005"
// TASKID = "tip_200804210403_0005_m_000018"
String jobId = keys.get("TASKID");
int idx1 = jobId.indexOf('_', 0);
int idx2 = jobId.indexOf('_', idx1 + 1);
idx2 = jobId.indexOf('_', idx2 + 1);
keys.put("JOBID", jobId.substring(idx1 + 1, idx2));
// log.info("JobLogHistoryProcessor Add field: [JOBID]["
// + keys.get("JOBID") + "]");
} else {
String jobId = keys.get("JOBID").replace("_", "").substring(3);
keys.put("JOBID", jobId);
}
// if (keys.get("RECORD_TYPE").equalsIgnoreCase("Job") &&
// keys.containsKey("SUBMIT_TIME"))
// {
// // Job JOBID="job_200804210403_0005" JOBNAME="MY_JOB"
// USER="userxxx"
// // SUBMIT_TIME="1208760436751"
// JOBCONF="/mapredsystem/xxx.yyy.com/job_200804210403_0005/job.xml"
//
//
// }
// else if (keys.get("RECORD_TYPE").equalsIgnoreCase("Job") &&
// keys.containsKey("LAUNCH_TIME"))
// {
// // Job JOBID="job_200804210403_0005" LAUNCH_TIME="1208760437110"
// TOTAL_MAPS="5912" TOTAL_REDUCES="739"
//
// }
// else if (keys.get("RECORD_TYPE").equalsIgnoreCase("Job") &&
// keys.containsKey("FINISH_TIME"))
// {
// // Job JOBID="job_200804210403_0005" FINISH_TIME="1208760906816"
// JOB_STATUS="SUCCESS" FINISHED_MAPS="5912" FINISHED_REDUCES="739"
// FAILED_MAPS="0" FAILED_REDUCES="0"
// // COUNTERS="File Systems.Local bytes read:1735053407244,File
// Systems.Local bytes written:2610106384012,File Systems.HDFS bytes
// read:801605644910,File Systems.HDFS bytes written:44135800,
// // Job Counters .Launched map tasks:5912,Job Counters .Launched
// reduce tasks:739,Job Counters .Data-local map tasks:5573,Job
// Counters .Rack-local map tasks:316,Map-Reduce Framework.
// // Map input records:9410696067,Map-Reduce Framework.Map output
// records:9410696067,Map-Reduce Framework.Map input
// bytes:801599188816,Map-Reduce Framework.Map output
// bytes:784427968116,
// // Map-Reduce Framework.Combine input records:0,Map-Reduce
// Framework.Combine output records:0,Map-Reduce Framework.Reduce
// input groups:477265,Map-Reduce Framework.Reduce input
// records:739000,
// // Map-Reduce Framework.Reduce output records:739000"
//
// }
// else
if (keys.get("RECORD_TYPE").equalsIgnoreCase("MapAttempt")
&& keys.containsKey("START_TIME")) {
// MapAttempt TASK_TYPE="MAP"
// TASKID="tip_200804210403_0005_m_000018"
// TASK_ATTEMPT_ID="task_200804210403_0005_m_000018_0"
// START_TIME="1208760437531"
// HOSTNAME="tracker_xxx.yyy.com:xxx.yyy.com/xxx.xxx.xxx.xxx:53734"
key = new ChukwaRecordKey();
key.setKey("JobLogHist/Map/" + keys.get("JOBID") + "/"
+ keys.get("START_TIME"));
key.setReduceType("JobLogHistoryReduceProcessor");
record = new ChukwaRecord();
record.setTime(Long.parseLong(keys.get("START_TIME")));
record.add("JOBID", keys.get("JOBID"));
record.add("START_TIME", keys.get("START_TIME"));
record.add(Record.tagsField, chunk.getTags());
// log.info("JobLogHist/Map/S");
output.collect(key, record);
} else if (keys.get("RECORD_TYPE").equalsIgnoreCase("MapAttempt")
&& keys.containsKey("FINISH_TIME")) {
// MapAttempt TASK_TYPE="MAP"
// TASKID="tip_200804210403_0005_m_005494"
// TASK_ATTEMPT_ID="task_200804210403_0005_m_005494_0"
// TASK_STATUS="SUCCESS"
// FINISH_TIME="1208760624124"
// HOSTNAME="tracker_xxxx.yyyy.com:xxx.yyy.com/xxx.xxx.xxx.xxx:55491"
key = new ChukwaRecordKey();
key.setKey("JobLogHist/Map/" + keys.get("JOBID") + "/"
+ keys.get("FINISH_TIME"));
key.setReduceType("JobLogHistoryReduceProcessor");
record = new ChukwaRecord();
record.setTime(Long.parseLong(keys.get("FINISH_TIME")));
record.add("JOBID", keys.get("JOBID"));
record.add("FINISH_TIME", keys.get("FINISH_TIME"));
record.add(Record.tagsField, chunk.getTags());
// log.info("JobLogHist/Map/E");
output.collect(key, record);
}
else if (keys.get("RECORD_TYPE").equalsIgnoreCase("ReduceAttempt")
&& keys.containsKey("START_TIME")) {
// ReduceAttempt TASK_TYPE="REDUCE"
// TASKID="tip_200804210403_0005_r_000138"
// TASK_ATTEMPT_ID="task_200804210403_0005_r_000138_0"
// START_TIME="1208760454885"
// HOSTNAME="tracker_xxxx.yyyy.com:xxx.yyy.com/xxx.xxx.xxx.xxx:51947"
key = new ChukwaRecordKey();
key.setKey("JobLogHist/SHUFFLE/" + keys.get("JOBID") + "/"
+ keys.get("START_TIME"));
key.setReduceType("JobLogHistoryReduceProcessor");
record = new ChukwaRecord();
record.setTime(Long.parseLong(keys.get("START_TIME")));
record.add("JOBID", keys.get("JOBID"));
record.add("START_TIME", keys.get("START_TIME"));
record.add(Record.tagsField, chunk.getTags());
// log.info("JobLogHist/SHUFFLE/S");
output.collect(key, record);
} else if (keys.get("RECORD_TYPE").equalsIgnoreCase("ReduceAttempt")
&& keys.containsKey("FINISH_TIME")) {
// ReduceAttempt TASK_TYPE="REDUCE"
// TASKID="tip_200804210403_0005_r_000138"
// TASK_ATTEMPT_ID="task_200804210403_0005_r_000138_0"
// TASK_STATUS="SUCCESS" SHUFFLE_FINISHED="1208760787167"
// SORT_FINISHED="1208760787354" FINISH_TIME="1208760802395"
// HOSTNAME="tracker__xxxx.yyyy.com:xxx.yyy.com/xxx.xxx.xxx.xxx:51947"
key = new ChukwaRecordKey();
key.setKey("JobLogHist/SHUFFLE/" + keys.get("JOBID") + "/"
+ keys.get("SHUFFLE_FINISHED"));
key.setReduceType("JobLogHistoryReduceProcessor");
record = new ChukwaRecord();
record.setTime(Long.parseLong(keys.get("SHUFFLE_FINISHED")));
record.add("JOBID", keys.get("JOBID"));
record.add("SHUFFLE_FINISHED", keys.get("SHUFFLE_FINISHED"));
record.add(Record.tagsField, chunk.getTags());
// log.info("JobLogHist/SHUFFLE/E");
output.collect(key, record);
// SORT
key = new ChukwaRecordKey();
key.setKey("JobLogHist/SORT/" + keys.get("JOBID") + "/"
+ keys.get("SHUFFLE_FINISHED"));
key.setReduceType("JobLogHistoryReduceProcessor");
record = new ChukwaRecord();
record.setTime(Long.parseLong(keys.get("SHUFFLE_FINISHED")));
record.add("JOBID", keys.get("JOBID"));
record.add("START_TIME", keys.get("SHUFFLE_FINISHED"));
record.add(Record.tagsField, chunk.getTags());
// log.info("JobLogHist/SORT/S");
output.collect(key, record);
key = new ChukwaRecordKey();
key.setKey("JobLogHist/SORT/" + keys.get("JOBID") + "/"
+ keys.get("SORT_FINISHED"));
key.setReduceType("JobLogHistoryReduceProcessor");
record = new ChukwaRecord();
record.setTime(Long.parseLong(keys.get("SORT_FINISHED")));
record.add("JOBID", keys.get("JOBID"));
record.add("SORT_FINISHED", keys.get("SORT_FINISHED"));
record.add(Record.tagsField, chunk.getTags());
// log.info("JobLogHist/SORT/E");
output.collect(key, record);
// Reduce
key = new ChukwaRecordKey();
key.setKey("JobLogHist/REDUCE/" + keys.get("JOBID") + "/"
+ keys.get("SORT_FINISHED"));
key.setReduceType("JobLogHistoryReduceProcessor");
record = new ChukwaRecord();
record.setTime(Long.parseLong(keys.get("SORT_FINISHED")));
record.add("JOBID", keys.get("JOBID"));
record.add("START_TIME", keys.get("SORT_FINISHED"));
record.add(Record.tagsField, chunk.getTags());
// log.info("JobLogHist/REDUCE/S");
output.collect(key, record);
key = new ChukwaRecordKey();
key.setKey("JobLogHist/REDUCE/" + keys.get("JOBID") + "/"
+ keys.get("FINISH_TIME"));
key.setReduceType("JobLogHistoryReduceProcessor");
record = new ChukwaRecord();
record.setTime(Long.parseLong(keys.get("SORT_FINISHED")));
record.add("JOBID", keys.get("JOBID"));
record.add("FINISH_TIME", keys.get("SORT_FINISHED"));
record.add(Record.tagsField, chunk.getTags());
// log.info("JobLogHist/REDUCE/E");
output.collect(key, record);
} else if (keys.get("RECORD_TYPE").equalsIgnoreCase("Job")) {
// 1
// Job JOBID="job_200809062051_0001" JOBNAME="wordcount" USER="xxx"
// SUBMIT_TIME="1208760906812"
// JOBCONF=
// "/user/xxx/mapredsystem/563976.yyy.zzz.com/job_200809062051_0001/job.xml"
// 2
// Job JOBID="job_200809062051_0001" LAUNCH_TIME="1208760906816"
// TOTAL_MAPS="3" TOTAL_REDUCES="7"
// 3
// Job JOBID="job_200804210403_0005" FINISH_TIME="1208760906826"
// JOB_STATUS="SUCCESS" FINISHED_MAPS="5912"
// FINISHED_REDUCES="739" FAILED_MAPS="0" FAILED_REDUCES="0"
// COUNTERS="File Systems.Local bytes read:1735053407244,File
// Systems.Local bytes written:2610106384012,File Systems.HDFS
// bytes read:801605644910,File Systems.HDFS bytes
// written:44135800,
// Job Counters .Launched map tasks:5912,Job Counters .Launched
// reduce tasks:739,Job Counters .Data-local map tasks:5573,Job
// Counters .Rack-local map tasks:316,Map-Reduce Framework.
// Map input records:9410696067,Map-Reduce Framework.Map output
// records:9410696067,Map-Reduce Framework.Map input
// bytes:801599188816,Map-Reduce Framework.Map output
// bytes:784427968116,
// Map-Reduce Framework.Combine input records:0,Map-Reduce
// Framework.Combine output records:0,Map-Reduce
// Framework.Reduce input groups:477265,Map-Reduce
// Framework.Reduce input records:739000,
// Map-Reduce Framework.Reduce output records:739000"
record = new ChukwaRecord();
key = new ChukwaRecordKey();
buildGenericRecord(record, null, Long
.parseLong(keys.get("FINISH_TIME")), "MRJob");
if (keys.containsKey("COUNTERS")) {
extractCounters(record, keys.get("COUNTERS"));
}
key = new ChukwaRecordKey();
key.setKey("MRJob/" + keys.get("JOBID"));
key.setReduceType("MRJobReduceProcessor");
record = new ChukwaRecord();
record.add(Record.tagsField, chunk.getTags());
if (keys.containsKey("SUBMIT_TIME")) {
record.setTime(Long.parseLong(keys.get("SUBMIT_TIME")));
} else if (keys.containsKey("LAUNCH_TIME")) {
record.setTime(Long.parseLong(keys.get("LAUNCH_TIME")));
} else if (keys.containsKey("FINISH_TIME")) {
record.setTime(Long.parseLong(keys.get("FINISH_TIME")));
}
for(Entry<String, String> entry : keys.entrySet()) {
record.add(entry.getKey(), entry.getValue());
}
output.collect(key, record);
}
if (keys.containsKey("TASK_TYPE")
&& keys.containsKey("COUNTERS")
&& (keys.get("TASK_TYPE").equalsIgnoreCase("REDUCE") || keys.get(
"TASK_TYPE").equalsIgnoreCase("MAP"))) {
// MAP
// Task TASKID="tip_200804210403_0005_m_000154" TASK_TYPE="MAP"
// TASK_STATUS="SUCCESS" FINISH_TIME="1208760463883"
// COUNTERS="File Systems.Local bytes read:159265655,File
// Systems.Local bytes written:318531310,
// File Systems.HDFS bytes read:145882417,Map-Reduce
// Framework.Map input records:1706604,
// Map-Reduce Framework.Map output records:1706604,Map-Reduce
// Framework.Map input bytes:145882057,
// Map-Reduce Framework.Map output bytes:142763253,Map-Reduce
// Framework.Combine input records:0,Map-Reduce
// Framework.Combine output records:0"
// REDUCE
// Task TASKID="tip_200804210403_0005_r_000524"
// TASK_TYPE="REDUCE" TASK_STATUS="SUCCESS"
// FINISH_TIME="1208760877072"
// COUNTERS="File Systems.Local bytes read:1179319677,File
// Systems.Local bytes written:1184474889,File Systems.HDFS
// bytes written:59021,
// Map-Reduce Framework.Reduce input groups:684,Map-Reduce
// Framework.Reduce input records:1000,Map-Reduce
// Framework.Reduce output records:1000"
record = new ChukwaRecord();
key = new ChukwaRecordKey();
buildGenericRecord(record, null, Long
.parseLong(keys.get("FINISH_TIME")), "SizeVsFinishTime");
extractCounters(record, keys.get("COUNTERS"));
record.add("JOBID", keys.get("JOBID"));
record.add("TASKID", keys.get("TASKID"));
record.add("TASK_TYPE", keys.get("TASK_TYPE"));
record.add(Record.tagsField, chunk.getTags());
// log.info("MR_Graph +1");
output.collect(key, record);
}
} catch (IOException e) {
log.warn("Unable to collect output in JobLogHistoryProcessor ["
+ recordEntry + "]", e);
e.printStackTrace();
throw e;
}
}
protected void extractCounters(ChukwaRecord record, String input) {
String[] data = null;
String[] counters = input.split(",");
for (String counter : counters) {
data = counter.split(":");
record.add(data[0].replaceAll(" ", "_").replaceAll("\\.", "_")
.toUpperCase(), data[1]);
}
}
public String getDataType() {
return JobLogHistoryProcessor.recordType;
}
}
| 8,372 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/JobSummary.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Tables;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Table;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.engine.Record;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
@Tables(annotations={
@Table(name="Jobs",columnFamily="summary")
})
public class JobSummary extends AbstractProcessor {
static Logger log = Logger.getLogger(JobSummary.class);
static final String chukwaTimestampField = "timestamp";
static final String contextNameField = "contextName";
static final String recordNameField = "recordName";
private SimpleDateFormat sdf = null;
public JobSummary() {
// TODO move that to config
sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm");
}
@SuppressWarnings("unchecked")
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws Throwable {
try {
// Look for syslog PRI, if PRI is not found, start from offset of 0.
int idx = recordEntry.indexOf('>', 0);
String dStr = recordEntry.substring(idx+1, idx+23);
int start = idx + 25;
idx = recordEntry.indexOf(' ', start);
// String level = recordEntry.substring(start, idx);
start = idx + 1;
idx = recordEntry.indexOf(' ', start);
// String className = recordEntry.substring(start, idx-1);
String body = recordEntry.substring(idx + 1);
body = body.replaceAll("\n", "");
// log.info("record [" + recordEntry + "] body [" + body +"]");
Date d = sdf.parse(dStr);
ChukwaRecord record = new ChukwaRecord();
String[] list = body.split(",");
for(String pair : list) {
String[] kv = pair.split("=");
record.add(kv[0], kv[1]);
}
record.add("cluster", chunk.getTag("cluster"));
buildGenericRecord(record, d.getTime(), "summary");
output.collect(key, record);
} catch (ParseException e) {
log.warn("Wrong format in JobSummary [" + recordEntry + "]",
e);
throw e;
} catch (IOException e) {
log.warn("Unable to collect output in JobSummary ["
+ recordEntry + "]", e);
throw e;
} catch (Exception e) {
log.warn("Wrong format in JobSummary [" + recordEntry + "]",
e);
throw e;
}
}
protected void buildGenericRecord(ChukwaRecord record,
long timestamp, String dataSource) {
calendar.setTimeInMillis(timestamp);
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
key.setKey("" + calendar.getTimeInMillis() + "/" + record.getValue("jobId") + "/"
+ timestamp);
key.setReduceType(dataSource);
record.setTime(timestamp);
record.add(Record.tagsField, chunk.getTags());
record.add(Record.sourceField, chunk.getSource());
record.add(Record.applicationField, chunk.getStreamName());
}
public String getDataType() {
return JobSummary.class.getName();
}
}
| 8,373 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/TsProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Map;
import java.util.HashMap;
import java.util.regex.Pattern;
import java.util.regex.Matcher;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Table;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.demux.Demux;
import org.apache.hadoop.chukwa.util.RegexUtil;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
/**
* TsProcessor is a generic processor that can be configured to find the timestamp
* in the text of a record. By default, this class expects that a record
* starts with a date in this format: <code>yyyy-MM-dd HH:mm:ss,SSS</code>
* <P>
* This format can be changed with the following configurations.
* <UL>
* <LI><code>TsProcessor.default.time.format</code> - Changes the default time
* format used by all data types.</LI>
* <LI><code>TsProcessor.time.format.[some_data_type]</code> - Overrides the default
* format for a specific data type.</LI>
* </UL>
* If the time string is not at the beginning of the record you can configure a
* regular expression to locate the timestamp text with either of the following
* configurations. The text found in group 1 of the regular expression match
* will be used with the configured date format.
* <UL>
* <LI><code>TsProcessor.default.time.regex</code> - Changes the default time
* location regex of the time text for all data types.</LI>
* <LI><code>TsProcessor.time.regex.[some_data_type]</code> - Overrides the
* default time location regex for a specific data type.</LI>
* </UL>
*
*/
@Table(name="TsProcessor",columnFamily="log")
public class TsProcessor extends AbstractProcessor {
static Logger log = Logger.getLogger(TsProcessor.class);
public static final String DEFAULT_DATE_FORMAT = "yyyy-MM-dd HH:mm:ss,SSS";
public static final String DEFAULT_TIME_REGEX = "TsProcessor.default.time.regex";
public static final String TIME_REGEX = "TsProcessor.time.regex.";
private Map<String, Pattern> datePatternMap;
private Map<String, SimpleDateFormat> dateFormatMap;
public TsProcessor() {
datePatternMap = new HashMap<String, Pattern>();
dateFormatMap = new HashMap<String, SimpleDateFormat>();
}
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws Throwable {
String dStr = null;
try {
SimpleDateFormat sdf = fetchDateFormat(chunk.getDataType());
Pattern datePattern = fetchDateLocationPattern(chunk.getDataType());
// fetch the part of the record that contains the date.
if(datePattern != null) {
Matcher m = datePattern.matcher(recordEntry);
if (!m.matches() || m.groupCount() < 1) {
throw new ParseException("Regex " + datePattern +
" couldn't extract date string from record: " + recordEntry, 0);
}
else {
dStr = m.group(1);
}
}
else {
dStr = recordEntry;
}
Date d = sdf.parse(dStr);
ChukwaRecord record = new ChukwaRecord();
this.buildGenericRecord(record, recordEntry, d.getTime(), chunk
.getDataType());
output.collect(key, record);
} catch (ParseException e) {
log.warn("Unable to parse the date in DefaultProcessor [" + recordEntry
+ "], date string='" + dStr + "'", e);
e.printStackTrace();
throw e;
} catch (IOException e) {
log.warn("Unable to collect output in DefaultProcessor [" + recordEntry
+ "]", e);
e.printStackTrace();
throw e;
}
}
/**
* For a given dataType, returns the SimpeDateFormat to use.
* @param dataType
* @return
*/
private SimpleDateFormat fetchDateFormat(String dataType) {
if (dateFormatMap.get(dataType) != null) {
return dateFormatMap.get(dataType);
}
Configuration jobConf = Demux.jobConf;
String dateFormat = DEFAULT_DATE_FORMAT;
if (jobConf != null) {
dateFormat = jobConf.get("TsProcessor.default.time.format", dateFormat);
dateFormat = jobConf.get("TsProcessor.time.format." + chunk.getDataType(),
dateFormat);
}
log.info("dataType: " + chunk.getDataType() + ", dateFormat="+ dateFormat);
SimpleDateFormat sdf = new SimpleDateFormat(dateFormat);
dateFormatMap.put(dataType, sdf);
return sdf;
}
/**
* For a given dataType, returns a Pattern that will produce the date portion
* of the string.
* @param dataType
* @return
*/
private Pattern fetchDateLocationPattern(String dataType) {
if (datePatternMap.containsKey(dataType)) {
return datePatternMap.get(dataType);
}
Configuration jobConf = Demux.jobConf;
String datePattern = null;
Pattern pattern = null;
if (jobConf != null) {
String timeRegexProperty = TIME_REGEX + chunk.getDataType();
datePattern = jobConf.get(DEFAULT_TIME_REGEX, null);
datePattern = jobConf.get(timeRegexProperty, datePattern);
if (datePattern != null) {
if (!RegexUtil.isRegex(datePattern, 1)) {
log.warn("Error parsing '" + DEFAULT_TIME_REGEX + "' or '"
+ timeRegexProperty + "' properties as a regex: "
+ RegexUtil.regexError(datePattern, 1)
+ ". This date pattern will be skipped.");
return null;
}
pattern = Pattern.compile(datePattern);
}
}
datePatternMap.put(dataType, pattern);
return pattern;
}
}
| 8,374 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/DefaultProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.io.IOException;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
public class DefaultProcessor extends AbstractProcessor {
static Logger log = Logger.getLogger(DefaultProcessor.class);
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter) {
try {
ChukwaRecord record = new ChukwaRecord();
this.buildGenericRecord(record, recordEntry, archiveKey
.getTimePartition(), chunk.getDataType());
output.collect(key, record);
} catch (IOException e) {
log.warn("Unable to collect output in DefaultProcessor [" + recordEntry
+ "]", e);
e.printStackTrace();
}
}
}
| 8,375 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/NamenodeProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.util.Calendar;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.TimeZone;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Table;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Tables;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
import org.json.simple.JSONObject;
import org.json.simple.JSONValue;
@Tables(annotations = { @Table(name = "Namenode", columnFamily = "summary"),
@Table(name = "Namenode", columnFamily = "hdfs"),
@Table(name = "Namenode", columnFamily = "rpc"),
@Table(name = "Namenode", columnFamily = "jvm") })
public class NamenodeProcessor extends AbstractProcessor {
static Map<String, Long> rateMap = new ConcurrentHashMap<String, Long>();
static {
long zero = 0L;
rateMap.put("AddBlockOps", zero);
rateMap.put("CreateFileOps", zero);
rateMap.put("DeleteFileOps", zero);
rateMap.put("FileInfoOps", zero);
rateMap.put("FilesAppended", zero);
rateMap.put("FilesCreated", zero);
rateMap.put("FilesDeleted", zero);
rateMap.put("FileInGetListingOps", zero);
rateMap.put("FilesRenamed", zero);
rateMap.put("GetBlockLocations", zero);
rateMap.put("GetListingOps", zero);
rateMap.put("SentBytes", zero);
rateMap.put("ReceivedBytes", zero);
rateMap.put("rpcAuthorizationSuccesses", zero);
rateMap.put("rpcAuthorizationFailures", zero);
rateMap.put("RpcQueueTime_num_ops", zero);
rateMap.put("RpcProcessingTime_num_ops", zero);
rateMap.put("gcCount", zero);
}
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws Throwable {
try {
Logger log = Logger.getLogger(NamenodeProcessor.class);
long timeStamp = Calendar.getInstance(TimeZone.getTimeZone("UTC"))
.getTimeInMillis();
final ChukwaRecord hdfs_overview = new ChukwaRecord();
final ChukwaRecord hdfs_namenode = new ChukwaRecord();
final ChukwaRecord namenode_jvm = new ChukwaRecord();
final ChukwaRecord namenode_rpc = new ChukwaRecord();
Map<String, ChukwaRecord> metricsMap = new HashMap<String, ChukwaRecord>() {
private static final long serialVersionUID = 1L;
{
put("BlockCapacity", hdfs_overview);
put("BlocksTotal", hdfs_overview);
put("CapacityTotalGB", hdfs_overview);
put("CapacityUsedGB", hdfs_overview);
put("CapacityRemainingGB", hdfs_overview);
put("CorruptBlocks", hdfs_overview);
put("ExcessBlocks", hdfs_overview);
put("FilesTotal", hdfs_overview);
put("MissingBlocks", hdfs_overview);
put("PendingDeletionBlocks", hdfs_overview);
put("PendingReplicationBlocks", hdfs_overview);
put("ScheduledReplicationBlocks", hdfs_overview);
put("TotalLoad", hdfs_overview);
put("UnderReplicatedBlocks", hdfs_overview);
put("gcCount", namenode_jvm);
put("gcTimeMillis", namenode_jvm);
put("logError", namenode_jvm);
put("logFatal", namenode_jvm);
put("logInfo", namenode_jvm);
put("logWarn", namenode_jvm);
put("memHeapCommittedM", namenode_jvm);
put("memHeapUsedM", namenode_jvm);
put("threadsBlocked", namenode_jvm);
put("threadsNew", namenode_jvm);
put("threadsRunnable", namenode_jvm);
put("threadsTerminated", namenode_jvm);
put("threadsTimedWaiting", namenode_jvm);
put("threadsWaiting", namenode_jvm);
put("ReceivedBytes", namenode_rpc);
put("RpcProcessingTime_avg_time", namenode_rpc);
put("RpcProcessingTime_num_ops", namenode_rpc);
put("RpcQueueTime_avg_time", namenode_rpc);
put("RpcQueueTime_num_ops", namenode_rpc);
put("SentBytes", namenode_rpc);
put("rpcAuthorizationSuccesses", namenode_rpc);
put("rpcAuthenticationFailures", namenode_rpc);
put("rpcAuthenticationSuccesses", namenode_rpc);
}
};
JSONObject obj = (JSONObject) JSONValue.parse(recordEntry);
String ttTag = chunk.getTag("timeStamp");
if (ttTag == null) {
log.warn("timeStamp tag not set in JMX adaptor for namenode");
} else {
timeStamp = Long.parseLong(ttTag);
}
@SuppressWarnings("unchecked")
Iterator<Map.Entry<String, ?>> keys = obj.entrySet().iterator();
while (keys.hasNext()) {
Map.Entry<String, ?> entry = keys.next();
String key = entry.getKey();
Object value = entry.getValue();
String valueString = (value == null) ? "" : value.toString();
// These metrics are string types with JSON structure. So we parse them
// and get the count
if (key.equals("LiveNodes") || key.equals("DeadNodes")
|| key.equals("DecomNodes") || key.equals("NameDirStatuses")) {
JSONObject jobj = (JSONObject) JSONValue.parse(valueString);
valueString = Integer.toString(jobj.size());
}
// Calculate rate for some of the metrics
if (rateMap.containsKey(key)) {
long oldValue = rateMap.get(key);
long curValue = Long.parseLong(valueString);
rateMap.put(key, curValue);
long newValue = curValue - oldValue;
if (newValue < 0) {
log.error("NamenodeProcessor's rateMap might be reset or corrupted for metric "
+ key);
newValue = 0L;
}
valueString = Long.toString(newValue);
}
// Check if metric belongs to one of the categories in metricsMap. If
// not just write it in group Hadoop.HDFS.NameNode
if (metricsMap.containsKey(key)) {
ChukwaRecord rec = metricsMap.get(key);
rec.add(key, valueString);
} else {
hdfs_namenode.add(key, valueString);
}
}
buildGenericRecord(hdfs_overview, null, timeStamp, "summary");
output.collect(key, hdfs_overview);
buildGenericRecord(hdfs_namenode, null, timeStamp, "hdfs");
output.collect(key, hdfs_namenode);
buildGenericRecord(namenode_jvm, null, timeStamp, "jvm");
output.collect(key, namenode_jvm);
buildGenericRecord(namenode_rpc, null, timeStamp, "rpc");
output.collect(key, namenode_rpc);
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
}
}
}
| 8,376 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/JobConfProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.Calendar;
import java.util.Random;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Table;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Tables;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
import org.json.simple.JSONObject;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.w3c.dom.Text;
@Tables(annotations={
@Table(name="Mapreduce",columnFamily="JobData"),
@Table(name="Mapreduce",columnFamily="JobConfData")
})
public class JobConfProcessor extends AbstractProcessor {
static Logger log = Logger.getLogger(JobConfProcessor.class);
private static final String jobData = "JobData";
private static final String jobConfData = "JobConfData";
static Pattern timePattern = Pattern.compile("(.*)?time=\"(.*?)\"(.*)?");
static Pattern jobPattern = Pattern.compile("(.*?)job_(.*?)_conf\\.xml(.*?)");
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output,
Reporter reporter)
throws Throwable
{
Long time = 0L;
Random randomNumber = new Random();
String tags = this.chunk.getTags();
Matcher matcher = timePattern.matcher(tags);
if (matcher.matches()) {
time = Long.parseLong(matcher.group(2));
}
String capp = this.chunk.getStreamName();
String jobID = "";
matcher = jobPattern.matcher(capp);
if(matcher.matches()) {
jobID=matcher.group(2);
}
ChukwaRecord record = new ChukwaRecord();
ChukwaRecord jobConfRecord = new ChukwaRecord();
DocumentBuilderFactory docBuilderFactory
= DocumentBuilderFactory.newInstance();
//ignore all comments inside the xml file
docBuilderFactory.setIgnoringComments(true);
FileOutputStream out = null;
try {
DocumentBuilder builder = docBuilderFactory.newDocumentBuilder();
Document doc = null;
String fileName = "test_"+randomNumber.nextInt();
File tmp = new File(fileName);
out = new FileOutputStream(tmp);
out.write(recordEntry.getBytes(Charset.forName("UTF-8")));
out.close();
doc = builder.parse(fileName);
Element root = doc.getDocumentElement();
if (!"configuration".equals(root.getTagName()))
log.fatal("bad conf file: top-level element not <configuration>");
NodeList props = root.getChildNodes();
JSONObject json = new JSONObject();
String queue = "default";
for (int i = 0; i < props.getLength(); i++) {
Node propNode = props.item(i);
if (!(propNode instanceof Element))
continue;
Element prop = (Element)propNode;
if (!"property".equals(prop.getTagName()))
log.warn("bad conf file: element not <property>");
NodeList fields = prop.getChildNodes();
String attr = null;
String value = null;
for (int j = 0; j < fields.getLength(); j++) {
Node fieldNode = fields.item(j);
if (!(fieldNode instanceof Element))
continue;
Element field = (Element)fieldNode;
if ("name".equals(field.getTagName()) && field.hasChildNodes())
attr = ((Text)field.getFirstChild()).getData().trim();
if ("value".equals(field.getTagName()) && field.hasChildNodes())
value = ((Text)field.getFirstChild()).getData();
}
// Ignore this parameter if it has already been marked as 'final'
if (attr != null && value != null) {
json.put(attr, value);
if(attr.intern()=="mapred.job.queue.name".intern()) {
queue=value;
}
jobConfRecord.add("job_conf." + attr, value);
}
}
record.add("JOBCONF-JSON", json.toString());
record.add("mapred.job.queue.name", queue);
record.add("JOBID", "job_" + jobID);
buildGenericRecord(record, null, time, jobData);
calendar.setTimeInMillis(time);
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
key.setKey("" + calendar.getTimeInMillis() + "/job_" + jobID + "/" + time);
output.collect(key, record);
jobConfRecord.add("JOBID", "job_" + jobID);
buildGenericRecord(jobConfRecord, null, time, jobConfData);
output.collect(key, jobConfRecord);
if(!tmp.delete()) {
log.warn(tmp.getAbsolutePath() + " cannot be deleted.");
}
} catch(IOException e) {
if(out != null) {
out.close();
}
e.printStackTrace();
throw e;
}
}
public String getDataType() {
return JobConfProcessor.class.getName();
}
}
| 8,377 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/DatanodeProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.util.Calendar;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.TimeZone;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Table;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Tables;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
import org.json.simple.JSONObject;
import org.json.simple.JSONValue;
@Tables(annotations = { @Table(name = "Datanode", columnFamily = "dn"),
@Table(name = "Datanode", columnFamily = "jvm"),
@Table(name = "Datanode", columnFamily = "rpc") })
public class DatanodeProcessor extends AbstractProcessor {
static Map<String, Long> rateMap = new ConcurrentHashMap<String, Long>();
static {
long zero = 0L;
rateMap.put("blocks_verified", zero);
rateMap.put("blocks_written", zero);
rateMap.put("blocks_read", zero);
rateMap.put("bytes_written", zero);
rateMap.put("bytes_read", zero);
rateMap.put("heartBeats_num_ops", zero);
rateMap.put("SentBytes", zero);
rateMap.put("ReceivedBytes", zero);
rateMap.put("rpcAuthorizationSuccesses", zero);
rateMap.put("rpcAuthorizationFailures", zero);
rateMap.put("RpcQueueTime_num_ops", zero);
rateMap.put("RpcProcessingTime_num_ops", zero);
rateMap.put("gcCount", zero);
}
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws Throwable {
Logger log = Logger.getLogger(DatanodeProcessor.class);
long timeStamp = Calendar.getInstance(TimeZone.getTimeZone("UTC"))
.getTimeInMillis();
final ChukwaRecord hdfs_datanode = new ChukwaRecord();
final ChukwaRecord datanode_jvm = new ChukwaRecord();
final ChukwaRecord datanode_rpc = new ChukwaRecord();
Map<String, ChukwaRecord> metricsMap = new HashMap<String, ChukwaRecord>() {
private static final long serialVersionUID = 1L;
{
put("blocks_verified", hdfs_datanode);
put("blocks_written", hdfs_datanode);
put("blocks_read", hdfs_datanode);
put("blocks_replicated", hdfs_datanode);
put("blocks_removed", hdfs_datanode);
put("bytes_written", hdfs_datanode);
put("bytes_read", hdfs_datanode);
put("heartBeats_avg_time", hdfs_datanode);
put("heartBeats_num_ops", hdfs_datanode);
put("gcCount", datanode_jvm);
put("gcTimeMillis", datanode_jvm);
put("logError", datanode_jvm);
put("logFatal", datanode_jvm);
put("logInfo", datanode_jvm);
put("logWarn", datanode_jvm);
put("memHeapCommittedM", datanode_jvm);
put("memHeapUsedM", datanode_jvm);
put("threadsBlocked", datanode_jvm);
put("threadsNew", datanode_jvm);
put("threadsRunnable", datanode_jvm);
put("threadsTerminated", datanode_jvm);
put("threadsTimedWaiting", datanode_jvm);
put("threadsWaiting", datanode_jvm);
put("ReceivedBytes", datanode_rpc);
put("RpcProcessingTime_avg_time", datanode_rpc);
put("RpcProcessingTime_num_ops", datanode_rpc);
put("RpcQueueTime_avg_time", datanode_rpc);
put("RpcQueueTime_num_ops", datanode_rpc);
put("SentBytes", datanode_rpc);
put("rpcAuthorizationSuccesses", datanode_rpc);
}
};
try {
JSONObject obj = (JSONObject) JSONValue.parse(recordEntry);
String ttTag = chunk.getTag("timeStamp");
if (ttTag == null) {
log.warn("timeStamp tag not set in JMX adaptor for datanode");
} else {
timeStamp = Long.parseLong(ttTag);
}
@SuppressWarnings("unchecked")
Iterator<Map.Entry<String, ?>> keys = obj.entrySet().iterator();
while (keys.hasNext()) {
Map.Entry<String, ?> entry = keys.next();
String key = entry.getKey();
Object value = entry.getValue();
String valueString = value == null ? "" : value.toString();
// Calculate rate for some of the metrics
if (rateMap.containsKey(key)) {
long oldValue = rateMap.get(key);
long curValue = Long.parseLong(valueString);
rateMap.put(key, curValue);
long newValue = curValue - oldValue;
if (newValue < 0) {
log.error("DatanodeProcessor's rateMap might be reset or corrupted for metric "
+ key);
newValue = 0L;
}
valueString = Long.toString(newValue);
}
if (metricsMap.containsKey(key)) {
ChukwaRecord rec = metricsMap.get(key);
rec.add(key, valueString);
}
}
buildGenericRecord(hdfs_datanode, null, timeStamp, "dn");
output.collect(key, hdfs_datanode);
buildGenericRecord(datanode_jvm, null, timeStamp, "jvm");
output.collect(key, datanode_jvm);
buildGenericRecord(datanode_rpc, null, timeStamp, "rpc");
output.collect(key, datanode_rpc);
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
}
}
}
| 8,378 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/SysLog.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Table;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
@Table(name="SystemMetrics",columnFamily="SysLog")
public class SysLog extends AbstractProcessor {
static Logger log = Logger.getLogger(SysLog.class);
private static final String reduceType = "SysLog";
private SimpleDateFormat sdf = null;
public SysLog() {
sdf = new SimpleDateFormat("MMM d HH:mm:ss");
}
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws Throwable {
try {
String dStr = recordEntry.substring(0, 15);
Calendar convertDate = Calendar.getInstance();
Date d = sdf.parse(dStr);
int year = convertDate.get(Calendar.YEAR);
convertDate.setTime(d);
convertDate.set(Calendar.YEAR, year);
if(convertDate.getTimeInMillis() > Calendar.getInstance().getTimeInMillis()) {
convertDate.set(Calendar.YEAR, year - 1);
}
ChukwaRecord record = new ChukwaRecord();
buildGenericRecord(record, recordEntry, convertDate.getTime().getTime(),
reduceType);
output.collect(key, record);
} catch (ParseException e) {
e.printStackTrace();
log.warn("Wrong format in SysLog [" + recordEntry + "]", e);
throw e;
} catch (IOException e) {
e.printStackTrace();
log.warn("Unable to collect output in SysLog [" + recordEntry + "]", e);
throw e;
}
}
public String getDataType() {
return SysLog.class.getName();
}
}
| 8,379 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/JPluginMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.util.Map.Entry;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.inputtools.jplugin.ChukwaMetrics;
import org.apache.hadoop.chukwa.inputtools.jplugin.GenericChukwaMetricsList;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
public class JPluginMapper extends AbstractProcessor {
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output,
Reporter reporter) throws Throwable {
LogEntry entry = new LogEntry(recordEntry);
String xml = entry.getBody();
GenericChukwaMetricsList metricsList = new GenericChukwaMetricsList();
metricsList.fromXml(xml);
String recType = metricsList.getRecordType();
long timestamp = metricsList.getTimestamp();
for (ChukwaMetrics metrics : metricsList.getMetricsList()) {
key = new ChukwaRecordKey();
ChukwaRecord record = new ChukwaRecord();
this.buildGenericRecord(record, null, -1l, recType);
record.setTime(timestamp);
key.setKey(getKey(timestamp, metrics.getKey()));
record.add("key", metrics.getKey());
for (Entry<String, String> attr : metrics.getAttributes().entrySet()) {
record.add(attr.getKey(), attr.getValue());
}
output.collect(key, record);
}
}
private String getKey(long ts, String metricsKey) {
long unit = 60 * 60 * 1000;
long rounded = (ts / unit) * unit;
return rounded + "/" + metricsKey + "/" + ts;
}
}
| 8,380 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/SystemMetrics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Demux parser for system metrics data collected through
* org.apache.hadoop.chukwa.datacollection.adaptor.sigar.SystemMetrics.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.util.Calendar;
import java.util.Iterator;
import java.util.Map;
import java.util.TimeZone;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Table;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Tables;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.json.simple.JSONValue;
@Tables(annotations={
@Table(name="SystemMetrics",columnFamily="cpu"),
@Table(name="SystemMetrics",columnFamily="system"),
@Table(name="SystemMetrics",columnFamily="memory"),
@Table(name="SystemMetrics",columnFamily="network"),
@Table(name="SystemMetrics",columnFamily="disk")
})
public class SystemMetrics extends AbstractProcessor {
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws Throwable {
JSONObject json = (JSONObject) JSONValue.parse(recordEntry);
long timestamp = ((Long)json.get("timestamp")).longValue();
ChukwaRecord record = new ChukwaRecord();
Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
cal.setTimeInMillis(timestamp);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
JSONArray cpuList = (JSONArray) json.get("cpu");
double combined = 0.0;
double user = 0.0;
double sys = 0.0;
double idle = 0.0;
int actualSize = 0;
for(int i = 0; i< cpuList.size(); i++) {
JSONObject cpu = (JSONObject) cpuList.get(i);
//Work around for sigar returning null sometimes for cpu metrics on pLinux
if(cpu.get("combined") == null){
continue;
}
actualSize++;
combined = combined + Double.parseDouble(cpu.get("combined").toString());
user = user + Double.parseDouble(cpu.get("user").toString());
sys = sys + Double.parseDouble(cpu.get("sys").toString());
idle = idle + Double.parseDouble(cpu.get("idle").toString());
@SuppressWarnings("unchecked")
Iterator<Map.Entry<String, ?>> keys = cpu.entrySet().iterator();
while(keys.hasNext()) {
Map.Entry<String, ?> entry = keys.next();
String key = entry.getKey();
Object value = entry.getValue();
record.add(key + "." + i, value.toString());
}
}
combined = combined / actualSize;
user = user / actualSize;
sys = sys / actualSize;
idle = idle / actualSize;
record.add("combined", Double.toString(combined));
record.add("user", Double.toString(user));
record.add("idle", Double.toString(idle));
record.add("sys", Double.toString(sys));
buildGenericRecord(record, null, cal.getTimeInMillis(), "cpu");
output.collect(key, record);
record = new ChukwaRecord();
record.add("Uptime", json.get("uptime").toString());
JSONArray loadavg = (JSONArray) json.get("loadavg");
record.add("LoadAverage.1", loadavg.get(0).toString());
record.add("LoadAverage.5", loadavg.get(1).toString());
record.add("LoadAverage.15", loadavg.get(2).toString());
buildGenericRecord(record, null, cal.getTimeInMillis(), "system");
output.collect(key, record);
record = new ChukwaRecord();
JSONObject memory = (JSONObject) json.get("memory");
@SuppressWarnings("unchecked")
Iterator<Map.Entry<String, ?>> memKeys = memory.entrySet().iterator();
while(memKeys.hasNext()) {
Map.Entry<String, ?> entry = memKeys.next();
String key = entry.getKey();
Object value = entry.getValue();
record.add(key, value.toString());
}
buildGenericRecord(record, null, cal.getTimeInMillis(), "memory");
output.collect(key, record);
record = new ChukwaRecord();
JSONObject swap = (JSONObject) json.get("swap");
@SuppressWarnings("unchecked")
Iterator<Map.Entry<String, ?>> swapKeys = swap.entrySet().iterator();
while(swapKeys.hasNext()) {
Map.Entry<String, ?> entry = swapKeys.next();
String key = entry.getKey();
Object value = entry.getValue();
record.add(key, value.toString());
}
buildGenericRecord(record, null, cal.getTimeInMillis(), "swap");
output.collect(key, record);
double rxBytes = 0;
double rxDropped = 0;
double rxErrors = 0;
double rxPackets = 0;
double txBytes = 0;
double txCollisions = 0;
double txErrors = 0;
double txPackets = 0;
record = new ChukwaRecord();
JSONArray netList = (JSONArray) json.get("network");
for(int i = 0;i < netList.size(); i++) {
JSONObject netIf = (JSONObject) netList.get(i);
@SuppressWarnings("unchecked")
Iterator<Map.Entry<String, ?>> keys = netIf.entrySet().iterator();
while(keys.hasNext()) {
Map.Entry<String, ?> entry = keys.next();
String key = entry.getKey();
Object value = entry.getValue();
record.add(key + "." + i, value.toString());
if(i!=0) {
if(key.equals("RxBytes")) {
rxBytes = rxBytes + (Long) value;
} else if(key.equals("RxDropped")) {
rxDropped = rxDropped + (Long) value;
} else if(key.equals("RxErrors")) {
rxErrors = rxErrors + (Long) value;
} else if(key.equals("RxPackets")) {
rxPackets = rxPackets + (Long) value;
} else if(key.equals("TxBytes")) {
txBytes = txBytes + (Long) value;
} else if(key.equals("TxCollisions")) {
txCollisions = txCollisions + (Long) value;
} else if(key.equals("TxErrors")) {
txErrors = txErrors + (Long) value;
} else if(key.equals("TxPackets")) {
txPackets = txPackets + (Long) netIf.get(key);
}
}
}
}
buildGenericRecord(record, null, cal.getTimeInMillis(), "network");
record.add("RxBytes", Double.toString(rxBytes));
record.add("RxDropped", Double.toString(rxDropped));
record.add("RxErrors", Double.toString(rxErrors));
record.add("RxPackets", Double.toString(rxPackets));
record.add("TxBytes", Double.toString(txBytes));
record.add("TxCollisions", Double.toString(txCollisions));
record.add("TxErrors", Double.toString(txErrors));
record.add("TxPackets", Double.toString(txPackets));
output.collect(key, record);
double readBytes = 0;
double reads = 0;
double writeBytes = 0;
double writes = 0;
double total = 0;
double used = 0;
record = new ChukwaRecord();
JSONArray diskList = (JSONArray) json.get("disk");
for(int i = 0;i < diskList.size(); i++) {
JSONObject disk = (JSONObject) diskList.get(i);
@SuppressWarnings("unchecked")
Iterator<Map.Entry<String, ?>> keys = disk.entrySet().iterator();
while(keys.hasNext()) {
Map.Entry<String, ?> entry = keys.next();
String key = entry.getKey();
Object value = entry.getValue();
record.add(key + "." + i, value.toString());
if(key.equals("ReadBytes")) {
readBytes = readBytes + (Long) value;
} else if(key.equals("Reads")) {
reads = reads + (Long) value;
} else if(key.equals("WriteBytes")) {
writeBytes = writeBytes + (Long) value;
} else if(key.equals("Writes")) {
writes = writes + (Long) value;
} else if(key.equals("Total")) {
total = total + (Long) value;
} else if(key.equals("Used")) {
used = used + (Long) value;
}
}
}
double percentUsed = used/total;
record.add("ReadBytes", Double.toString(readBytes));
record.add("Reads", Double.toString(reads));
record.add("WriteBytes", Double.toString(writeBytes));
record.add("Writes", Double.toString(writes));
record.add("Total", Double.toString(total));
record.add("Used", Double.toString(used));
record.add("PercentUsed", Double.toString(percentUsed));
buildGenericRecord(record, null, cal.getTimeInMillis(), "disk");
output.collect(key, record);
record = new ChukwaRecord();
record.add("cluster", chunk.getTag("cluster"));
buildGenericRecord(record, null, cal.getTimeInMillis(), "tags");
output.collect(key, record);
}
}
| 8,381 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/DuplicateProcessorException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
public class DuplicateProcessorException extends RuntimeException {
/**
*
*/
private static final long serialVersionUID = 3890267797961057789L;
public DuplicateProcessorException() {
}
public DuplicateProcessorException(String message) {
super(message);
}
public DuplicateProcessorException(Throwable cause) {
super(cause);
}
public DuplicateProcessorException(String message, Throwable cause) {
super(message, cause);
}
}
| 8,382 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/UnknownRecordTypeException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
public class UnknownRecordTypeException extends Exception {
/**
*
*/
private static final long serialVersionUID = 8925135975093252279L;
public UnknownRecordTypeException() {
}
public UnknownRecordTypeException(String message) {
super(message);
}
public UnknownRecordTypeException(Throwable cause) {
super(cause);
}
public UnknownRecordTypeException(String message, Throwable cause) {
super(message, cause);
}
}
| 8,383 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/DebugOutputProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.io.IOException;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
public class DebugOutputProcessor extends AbstractProcessor {
static Logger log = Logger.getLogger(DebugOutputProcessor.class);
public static final String recordType = "Debug";
@Override
public void parse(String line,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter) {
log.info("record: [" + line + "] type[" + chunk.getDataType() + "]");
ChukwaRecord record = new ChukwaRecord();
buildGenericRecord(record, line, System.currentTimeMillis(), recordType);
key.setKey("" + chunk.getSeqID());
try {
output.collect(key, record);
} catch (IOException e) {
e.printStackTrace();
}
}
public String getDataType() {
return DebugOutputProcessor.recordType;
}
}
| 8,384 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/ChunkSaver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.nio.charset.Charset;
import java.util.Calendar;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.engine.Record;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
public class ChunkSaver {
static Logger log = Logger.getLogger(ChunkSaver.class);
public static ChukwaRecord saveChunk(Chunk chunk, Throwable throwable,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter) {
try {
reporter.incrCounter("DemuxError", "count", 1);
reporter.incrCounter("DemuxError", chunk.getDataType() + "Count", 1);
ChukwaRecord record = new ChukwaRecord();
long ts = System.currentTimeMillis();
Calendar calendar = Calendar.getInstance();
calendar.setTimeInMillis(ts);
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
ChukwaRecordKey key = new ChukwaRecordKey();
key.setKey("" + calendar.getTimeInMillis() + "/" + chunk.getDataType()
+ "/" + chunk.getSource() + "/" + ts);
key.setReduceType(chunk.getDataType() + "InError");
record.setTime(ts);
record.add(Record.tagsField, chunk.getTags());
record.add(Record.sourceField, chunk.getSource());
record.add(Record.applicationField, chunk.getStreamName());
DataOutputBuffer ob = new DataOutputBuffer(chunk
.getSerializedSizeEstimate());
chunk.write(ob);
record.add(Record.chunkDataField, new String(ob.getData(), Charset.forName("UTF-8")));
record.add(Record.chunkExceptionField, ExceptionUtil
.getStackTrace(throwable));
output.collect(key, record);
return record;
} catch (Throwable e) {
e.printStackTrace();
try {
log.warn("Unable to save a chunk: tags: " + chunk.getTags()
+ " - source:" + chunk.getSource() + " - dataType: "
+ chunk.getDataType() + " - Stream: " + chunk.getStreamName()
+ " - SeqId: " + chunk.getSeqID() + " - Data: "
+ new String(chunk.getData(), Charset.forName("UTF-8")));
} catch (Throwable e1) {
e.printStackTrace();
}
}
return null;
}
}
| 8,385 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/Log4jJobHistoryProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.io.IOException;
import java.util.Hashtable;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.engine.Record;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
public class Log4jJobHistoryProcessor extends AbstractProcessor {
static Logger log = Logger.getLogger(Log4jJobHistoryProcessor.class);
private static final String recordType = "JobLogHistory";
private static String internalRegex = "(.*?)=\"(.*?)\"(.*)([\\n])?";
private Pattern ip = null;
private Matcher internalMatcher = null;
public Log4jJobHistoryProcessor() {
ip = Pattern.compile(internalRegex);
internalMatcher = ip.matcher("-");
}
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws Throwable {
// log.info("JobLogHistoryProcessor record: [" + recordEntry + "] type["
// + chunk.getDataType() + "]");
try {
// String dStr = recordEntry.substring(0, 23);
int start = 24;
int idx = recordEntry.indexOf(' ', start);
// String level = recordEntry.substring(start, idx);
start = idx + 1;
idx = recordEntry.indexOf(' ', start);
// String className = recordEntry.substring(start, idx-1);
String body = recordEntry.substring(idx + 1);
Hashtable<String, String> keys = new Hashtable<String, String>();
ChukwaRecord record = null;
int firstSep = body.indexOf(" ");
keys.put("RECORD_TYPE", body.substring(0, firstSep));
// log.info("JobLogHistoryProcessor Add field: [RECORD_TYPE]["
// + keys.get("RECORD_TYPE") + "]");
body = body.substring(firstSep);
internalMatcher.reset(body);
// String fieldName = null;
// String fieldValue = null;
while (internalMatcher.matches()) {
keys.put(internalMatcher.group(1).trim(), internalMatcher.group(2)
.trim());
// TODO Remove debug info before production
// fieldName = internalMatcher.group(1).trim();
// fieldValue = internalMatcher.group(2).trim();
// log.info("JobLogHistoryProcessor Add field: [" + fieldName +
// "][" + fieldValue +"]" );
// log.info("EOL : [" + internalMatcher.group(3) + "]" );
internalMatcher.reset(internalMatcher.group(3));
}
if (!keys.containsKey("JOBID")) {
// Extract JobID from taskID
// JOBID = "job_200804210403_0005"
// TASKID = "tip_200804210403_0005_m_000018"
String jobId = keys.get("TASKID");
int idx1 = jobId.indexOf('_', 0);
int idx2 = jobId.indexOf('_', idx1 + 1);
idx2 = jobId.indexOf('_', idx2 + 1);
keys.put("JOBID", "job" + jobId.substring(idx1, idx2));
// log.info("JobLogHistoryProcessor Add field: [JOBID]["
// + keys.get("JOBID") + "]");
}
// if (keys.get("RECORD_TYPE").equalsIgnoreCase("Job") &&
// keys.containsKey("SUBMIT_TIME"))
// {
// // Job JOBID="job_200804210403_0005" JOBNAME="MY_JOB"
// USER="userxxx"
// // SUBMIT_TIME="1208760436751"
// JOBCONF="/mapredsystem/xxx.yyy.com/job_200804210403_0005/job.xml"
//
//
// }
// else if (keys.get("RECORD_TYPE").equalsIgnoreCase("Job") &&
// keys.containsKey("LAUNCH_TIME"))
// {
// // Job JOBID="job_200804210403_0005" LAUNCH_TIME="1208760437110"
// TOTAL_MAPS="5912" TOTAL_REDUCES="739"
//
// }
// else if (keys.get("RECORD_TYPE").equalsIgnoreCase("Job") &&
// keys.containsKey("FINISH_TIME"))
// {
// // Job JOBID="job_200804210403_0005" FINISH_TIME="1208760906816"
// JOB_STATUS="SUCCESS" FINISHED_MAPS="5912" FINISHED_REDUCES="739"
// FAILED_MAPS="0" FAILED_REDUCES="0"
// // COUNTERS="File Systems.Local bytes read:1735053407244,File
// Systems.Local bytes written:2610106384012,File Systems.HDFS bytes
// read:801605644910,File Systems.HDFS bytes written:44135800,
// // Job Counters .Launched map tasks:5912,Job Counters .Launched
// reduce tasks:739,Job Counters .Data-local map tasks:5573,Job
// Counters .Rack-local map tasks:316,Map-Reduce Framework.
// // Map input records:9410696067,Map-Reduce Framework.Map output
// records:9410696067,Map-Reduce Framework.Map input
// bytes:801599188816,Map-Reduce Framework.Map output
// bytes:784427968116,
// // Map-Reduce Framework.Combine input records:0,Map-Reduce
// Framework.Combine output records:0,Map-Reduce Framework.Reduce
// input groups:477265,Map-Reduce Framework.Reduce input
// records:739000,
// // Map-Reduce Framework.Reduce output records:739000"
//
// }
// else
if (keys.get("RECORD_TYPE").equalsIgnoreCase("MapAttempt")
&& keys.containsKey("START_TIME")) {
// MapAttempt TASK_TYPE="MAP"
// TASKID="tip_200804210403_0005_m_000018"
// TASK_ATTEMPT_ID="task_200804210403_0005_m_000018_0"
// START_TIME="1208760437531"
// HOSTNAME="tracker_xxx.yyy.com:xxx.yyy.com/xxx.xxx.xxx.xxx:53734"
key = new ChukwaRecordKey();
key.setKey("JobLogHist/Map/" + keys.get("JOBID") + "/"
+ keys.get("START_TIME"));
key.setReduceType("JobLogHistoryReduceProcessor");
record = new ChukwaRecord();
record.setTime(Long.parseLong(keys.get("START_TIME")));
record.add("JOBID", keys.get("JOBID"));
record.add("START_TIME", keys.get("START_TIME"));
record.add(Record.tagsField, chunk.getTags());
// log.info("JobLogHist/Map/S");
output.collect(key, record);
} else if (keys.get("RECORD_TYPE").equalsIgnoreCase("MapAttempt")
&& keys.containsKey("FINISH_TIME")) {
// MapAttempt TASK_TYPE="MAP"
// TASKID="tip_200804210403_0005_m_005494"
// TASK_ATTEMPT_ID="task_200804210403_0005_m_005494_0"
// TASK_STATUS="SUCCESS"
// FINISH_TIME="1208760624124"
// HOSTNAME="tracker_xxxx.yyyy.com:xxx.yyy.com/xxx.xxx.xxx.xxx:55491"
key = new ChukwaRecordKey();
key.setKey("JobLogHist/Map/" + keys.get("JOBID") + "/"
+ keys.get("FINISH_TIME"));
key.setReduceType("JobLogHistoryReduceProcessor");
record = new ChukwaRecord();
record.setTime(Long.parseLong(keys.get("FINISH_TIME")));
record.add("JOBID", keys.get("JOBID"));
record.add("FINISH_TIME", keys.get("FINISH_TIME"));
record.add(Record.tagsField, chunk.getTags());
// log.info("JobLogHist/Map/E");
output.collect(key, record);
}
else if (keys.get("RECORD_TYPE").equalsIgnoreCase("ReduceAttempt")
&& keys.containsKey("START_TIME")) {
// ReduceAttempt TASK_TYPE="REDUCE"
// TASKID="tip_200804210403_0005_r_000138"
// TASK_ATTEMPT_ID="task_200804210403_0005_r_000138_0"
// START_TIME="1208760454885"
// HOSTNAME="tracker_xxxx.yyyy.com:xxx.yyy.com/xxx.xxx.xxx.xxx:51947"
key = new ChukwaRecordKey();
key.setKey("JobLogHist/SHUFFLE/" + keys.get("JOBID") + "/"
+ keys.get("START_TIME"));
key.setReduceType("JobLogHistoryReduceProcessor");
record = new ChukwaRecord();
record.setTime(Long.parseLong(keys.get("START_TIME")));
record.add("JOBID", keys.get("JOBID"));
record.add("START_TIME", keys.get("START_TIME"));
record.add(Record.tagsField, chunk.getTags());
// log.info("JobLogHist/SHUFFLE/S");
output.collect(key, record);
} else if (keys.get("RECORD_TYPE").equalsIgnoreCase("ReduceAttempt")
&& keys.containsKey("FINISH_TIME")) {
// ReduceAttempt TASK_TYPE="REDUCE"
// TASKID="tip_200804210403_0005_r_000138"
// TASK_ATTEMPT_ID="task_200804210403_0005_r_000138_0"
// TASK_STATUS="SUCCESS" SHUFFLE_FINISHED="1208760787167"
// SORT_FINISHED="1208760787354" FINISH_TIME="1208760802395"
// HOSTNAME="tracker__xxxx.yyyy.com:xxx.yyy.com/xxx.xxx.xxx.xxx:51947"
key = new ChukwaRecordKey();
key.setKey("JobLogHist/SHUFFLE/" + keys.get("JOBID") + "/"
+ keys.get("SHUFFLE_FINISHED"));
key.setReduceType("JobLogHistoryReduceProcessor");
record = new ChukwaRecord();
record.setTime(Long.parseLong(keys.get("SHUFFLE_FINISHED")));
record.add("JOBID", keys.get("JOBID"));
record.add("SHUFFLE_FINISHED", keys.get("SHUFFLE_FINISHED"));
record.add(Record.tagsField, chunk.getTags());
// log.info("JobLogHist/SHUFFLE/E");
output.collect(key, record);
// SORT
key = new ChukwaRecordKey();
key.setKey("JobLogHist/SORT/" + keys.get("JOBID") + "/"
+ keys.get("SHUFFLE_FINISHED"));
key.setReduceType("JobLogHistoryReduceProcessor");
record = new ChukwaRecord();
record.setTime(Long.parseLong(keys.get("SHUFFLE_FINISHED")));
record.add("JOBID", keys.get("JOBID"));
record.add("START_TIME", keys.get("SHUFFLE_FINISHED"));
record.add(Record.tagsField, chunk.getTags());
// log.info("JobLogHist/SORT/S");
output.collect(key, record);
key = new ChukwaRecordKey();
key.setKey("JobLogHist/SORT/" + keys.get("JOBID") + "/"
+ keys.get("SORT_FINISHED"));
key.setReduceType("JobLogHistoryReduceProcessor");
record = new ChukwaRecord();
record.setTime(Long.parseLong(keys.get("SORT_FINISHED")));
record.add("JOBID", keys.get("JOBID"));
record.add("SORT_FINISHED", keys.get("SORT_FINISHED"));
record.add(Record.tagsField, chunk.getTags());
// log.info("JobLogHist/SORT/E");
output.collect(key, record);
// Reduce
key = new ChukwaRecordKey();
key.setKey("JobLogHist/REDUCE/" + keys.get("JOBID") + "/"
+ keys.get("SORT_FINISHED"));
key.setReduceType("JobLogHistoryReduceProcessor");
record = new ChukwaRecord();
record.setTime(Long.parseLong(keys.get("SORT_FINISHED")));
record.add("JOBID", keys.get("JOBID"));
record.add("START_TIME", keys.get("SORT_FINISHED"));
record.add(Record.tagsField, chunk.getTags());
// log.info("JobLogHist/REDUCE/S");
output.collect(key, record);
key = new ChukwaRecordKey();
key.setKey("JobLogHist/REDUCE/" + keys.get("JOBID") + "/"
+ keys.get("FINISH_TIME"));
key.setReduceType("JobLogHistoryReduceProcessor");
record = new ChukwaRecord();
record.setTime(Long.parseLong(keys.get("SORT_FINISHED")));
record.add("JOBID", keys.get("JOBID"));
record.add("FINISH_TIME", keys.get("SORT_FINISHED"));
record.add(Record.tagsField, chunk.getTags());
// log.info("JobLogHist/REDUCE/E");
output.collect(key, record);
} else if (keys.get("RECORD_TYPE").equalsIgnoreCase("Job")
&& keys.containsKey("COUNTERS")) {
// Job JOBID="job_200804210403_0005" FINISH_TIME="1208760906816"
// JOB_STATUS="SUCCESS" FINISHED_MAPS="5912"
// FINISHED_REDUCES="739" FAILED_MAPS="0" FAILED_REDUCES="0"
// COUNTERS="File Systems.Local bytes read:1735053407244,File
// Systems.Local bytes written:2610106384012,File Systems.HDFS
// bytes read:801605644910,File Systems.HDFS bytes
// written:44135800,
// Job Counters .Launched map tasks:5912,Job Counters .Launched
// reduce tasks:739,Job Counters .Data-local map tasks:5573,Job
// Counters .Rack-local map tasks:316,Map-Reduce Framework.
// Map input records:9410696067,Map-Reduce Framework.Map output
// records:9410696067,Map-Reduce Framework.Map input
// bytes:801599188816,Map-Reduce Framework.Map output
// bytes:784427968116,
// Map-Reduce Framework.Combine input records:0,Map-Reduce
// Framework.Combine output records:0,Map-Reduce
// Framework.Reduce input groups:477265,Map-Reduce
// Framework.Reduce input records:739000,
// Map-Reduce Framework.Reduce output records:739000"
record = new ChukwaRecord();
key = new ChukwaRecordKey();
buildGenericRecord(record, null, Long
.parseLong(keys.get("FINISH_TIME")), "MRJobCounters");
extractCounters(record, keys.get("COUNTERS"));
String jobId = keys.get("JOBID").replace("_", "").substring(3);
record.add("JobId", "" + jobId);
// FIXME validate this when HodId will be available
if (keys.containsKey("HODID")) {
record.add("HodId", keys.get("HODID"));
}
// log.info("MRJobCounters +1");
output.collect(key, record);
}
if (keys.containsKey("TASK_TYPE")
&& keys.containsKey("COUNTERS")
&& (keys.get("TASK_TYPE").equalsIgnoreCase("REDUCE") || keys.get(
"TASK_TYPE").equalsIgnoreCase("MAP"))) {
// MAP
// Task TASKID="tip_200804210403_0005_m_000154" TASK_TYPE="MAP"
// TASK_STATUS="SUCCESS" FINISH_TIME="1208760463883"
// COUNTERS="File Systems.Local bytes read:159265655,File
// Systems.Local bytes written:318531310,
// File Systems.HDFS bytes read:145882417,Map-Reduce
// Framework.Map input records:1706604,
// Map-Reduce Framework.Map output records:1706604,Map-Reduce
// Framework.Map input bytes:145882057,
// Map-Reduce Framework.Map output bytes:142763253,Map-Reduce
// Framework.Combine input records:0,Map-Reduce
// Framework.Combine output records:0"
// REDUCE
// Task TASKID="tip_200804210403_0005_r_000524"
// TASK_TYPE="REDUCE" TASK_STATUS="SUCCESS"
// FINISH_TIME="1208760877072"
// COUNTERS="File Systems.Local bytes read:1179319677,File
// Systems.Local bytes written:1184474889,File Systems.HDFS
// bytes written:59021,
// Map-Reduce Framework.Reduce input groups:684,Map-Reduce
// Framework.Reduce input records:1000,Map-Reduce
// Framework.Reduce output records:1000"
record = new ChukwaRecord();
key = new ChukwaRecordKey();
buildGenericRecord(record, null, Long
.parseLong(keys.get("FINISH_TIME")), "SizeVsFinishTime");
extractCounters(record, keys.get("COUNTERS"));
record.add("JOBID", keys.get("JOBID"));
record.add("TASKID", keys.get("TASKID"));
record.add("TASK_TYPE", keys.get("TASK_TYPE"));
// log.info("MR_Graph +1");
output.collect(key, record);
}
} catch (IOException e) {
log.warn("Unable to collect output in JobLogHistoryProcessor ["
+ recordEntry + "]", e);
e.printStackTrace();
throw e;
}
}
protected void extractCounters(ChukwaRecord record, String input) {
String[] data = null;
String[] counters = input.split(",");
for (String counter : counters) {
data = counter.split(":");
record.add(data[0].replaceAll(" ", "_").replaceAll("\\.", "_")
.toUpperCase(), data[1]);
}
}
public String getDataType() {
return Log4jJobHistoryProcessor.recordType;
}
}
| 8,386 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/HBaseMasterProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.nio.charset.Charset;
import java.util.Calendar;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.TimeZone;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Table;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Tables;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.record.Buffer;
import org.apache.log4j.Logger;
import org.json.simple.JSONObject;
import org.json.simple.JSONValue;
@Tables(annotations = { @Table(name = "HBase", columnFamily = "master") })
public class HBaseMasterProcessor extends AbstractProcessor {
static Map<String, Long> rateMap = new ConcurrentHashMap<String, Long>();
static {
long zero = 0L;
rateMap.put("splitSizeNumOps", zero);
rateMap.put("splitTimeNumOps", zero);
}
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws Throwable {
Logger log = Logger.getLogger(HBaseMasterProcessor.class);
long timeStamp = Calendar.getInstance(TimeZone.getTimeZone("UTC"))
.getTimeInMillis();
ChukwaRecord record = new ChukwaRecord();
Map<String, Buffer> metricsMap = new HashMap<String, Buffer>();
try {
JSONObject obj = (JSONObject) JSONValue.parse(recordEntry);
String ttTag = chunk.getTag("timeStamp");
if (ttTag == null) {
log.warn("timeStamp tag not set in JMX adaptor for hbase master");
} else {
timeStamp = Long.parseLong(ttTag);
}
@SuppressWarnings("unchecked")
Iterator<Map.Entry<String, ?>> keys = obj.entrySet().iterator();
while (keys.hasNext()) {
Map.Entry<String, ?> entry = keys.next();
String key = entry.getKey();
Object value = entry.getValue();
String valueString = value == null ? "" : value.toString();
// Calculate rate for some of the metrics
if (rateMap.containsKey(key)) {
long oldValue = rateMap.get(key);
long curValue = Long.parseLong(valueString);
rateMap.put(key, curValue);
long newValue = curValue - oldValue;
if (newValue < 0) {
log.warn("HBaseMaster rateMap might be reset or corrupted for metric "
+ key);
newValue = 0L;
}
valueString = Long.toString(newValue);
}
Buffer b = new Buffer(valueString.getBytes(Charset.forName("UTF-8")));
metricsMap.put(key, b);
}
TreeMap<String, Buffer> t = new TreeMap<String, Buffer>(metricsMap);
record.setMapFields(t);
buildGenericRecord(record, null, timeStamp, "master");
output.collect(key, record);
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
}
}
}
| 8,387 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/HBaseRegionServerProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.nio.charset.Charset;
import java.util.Calendar;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.TimeZone;
import java.util.TreeMap;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Table;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Tables;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.record.Buffer;
import org.apache.log4j.Logger;
import org.json.simple.JSONObject;
import org.json.simple.JSONValue;
@Tables(annotations = { @Table(name = "HBase", columnFamily = "regionserver") })
public class HBaseRegionServerProcessor extends AbstractProcessor {
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws Throwable {
Logger log = Logger.getLogger(HBaseRegionServerProcessor.class);
long timeStamp = Calendar.getInstance(TimeZone.getTimeZone("UTC"))
.getTimeInMillis();
ChukwaRecord record = new ChukwaRecord();
Map<String, Buffer> metricsMap = new HashMap<String, Buffer>();
try {
JSONObject obj = (JSONObject) JSONValue.parse(recordEntry);
String ttTag = chunk.getTag("timeStamp");
if (ttTag == null) {
log.warn("timeStamp tag not set in JMX adaptor for hbase region server");
} else {
timeStamp = Long.parseLong(ttTag);
}
@SuppressWarnings("unchecked")
Iterator<Map.Entry<String, ?>> keys = obj.entrySet().iterator();
while (keys.hasNext()) {
Map.Entry<String, ?> entry = keys.next();
String key = entry.getKey();
Object value = entry.getValue();
String valueString = value == null ? "" : value.toString();
Buffer b = new Buffer(valueString.getBytes(Charset.forName("UTF-8")));
metricsMap.put(key, b);
}
TreeMap<String, Buffer> t = new TreeMap<String, Buffer>(metricsMap);
record.setMapFields(t);
buildGenericRecord(record, null, timeStamp, "regionserver");
output.collect(key, record);
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
}
}
}
| 8,388 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/JobTrackerProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.util.Calendar;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TimeZone;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Table;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Tables;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
import org.json.simple.JSONObject;
import org.json.simple.JSONValue;
@Tables(annotations = { @Table(name = "JobTracker", columnFamily = "jt"),
@Table(name = "JobTracker", columnFamily = "jvm"),
@Table(name = "JobTracker", columnFamily = "rpc") })
public class JobTrackerProcessor extends AbstractProcessor {
static Map<String, Long> rateMap = new ConcurrentHashMap<String, Long>();
static {
long zero = 0L;
rateMap.put("SentBytes", zero);
rateMap.put("ReceivedBytes", zero);
rateMap.put("rpcAuthorizationSuccesses", zero);
rateMap.put("rpcAuthorizationFailures", zero);
rateMap.put("RpcQueueTime_num_ops", zero);
rateMap.put("RpcProcessingTime_num_ops", zero);
rateMap.put("heartbeats", zero);
rateMap.put("jobs_submitted", zero);
rateMap.put("jobs_completed", zero);
rateMap.put("jobs_failed", zero);
rateMap.put("jobs_killed", zero);
rateMap.put("maps_launched", zero);
rateMap.put("maps_completed", zero);
rateMap.put("maps_failed", zero);
rateMap.put("maps_killed", zero);
rateMap.put("reduces_launched", zero);
rateMap.put("reduces_completed", zero);
rateMap.put("reduces_failed", zero);
rateMap.put("reduces_killed", zero);
rateMap.put("gcCount", zero);
}
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws Throwable {
Logger log = Logger.getLogger(JobTrackerProcessor.class);
long timeStamp = Calendar.getInstance(TimeZone.getTimeZone("UTC"))
.getTimeInMillis();
final ChukwaRecord mapred_jt = new ChukwaRecord();
final ChukwaRecord jt_jvm = new ChukwaRecord();
final ChukwaRecord jt_rpc = new ChukwaRecord();
Map<String, ChukwaRecord> metricsMap = new HashMap<String, ChukwaRecord>() {
private static final long serialVersionUID = 1L;
{
put("gcCount", jt_jvm);
put("gcTimeMillis", jt_jvm);
put("logError", jt_jvm);
put("logFatal", jt_jvm);
put("logInfo", jt_jvm);
put("logWarn", jt_jvm);
put("memHeapCommittedM", jt_jvm);
put("memHeapUsedM", jt_jvm);
put("threadsBlocked", jt_jvm);
put("threadsNew", jt_jvm);
put("threadsRunnable", jt_jvm);
put("threadsTerminated", jt_jvm);
put("threadsTimedWaiting", jt_jvm);
put("threadsWaiting", jt_jvm);
put("ReceivedBytes", jt_rpc);
put("RpcProcessingTime_avg_time", jt_rpc);
put("RpcProcessingTime_num_ops", jt_rpc);
put("RpcQueueTime_avg_time", jt_rpc);
put("RpcQueueTime_num_ops", jt_rpc);
put("SentBytes", jt_rpc);
put("rpcAuthorizationSuccesses", jt_rpc);
put("rpcAuthorizationnFailures", jt_rpc);
}
};
try {
JSONObject obj = (JSONObject) JSONValue.parse(recordEntry);
String ttTag = chunk.getTag("timeStamp");
if (ttTag == null) {
log.warn("timeStamp tag not set in JMX adaptor for jobtracker");
} else {
timeStamp = Long.parseLong(ttTag);
}
for(Entry<String, Object> entry : (Set<Entry<String, Object>>) obj.entrySet()) {
String key = entry.getKey();
String valueString = entry.getValue().toString();
// Calculate rate for some of the metrics
if (rateMap.containsKey(key)) {
long oldValue = rateMap.get(key);
long curValue = Long.parseLong(valueString);
rateMap.put(key, curValue);
long newValue = curValue - oldValue;
if (newValue < 0) {
log.warn("JobTrackerProcessor's rateMap might be reset or corrupted for metric "
+ key);
newValue = 0L;
}
valueString = Long.toString(newValue);
}
// These metrics are string types with JSON structure. So we parse them
// and get the count
if (key.indexOf("Json") >= 0) {
// ignore these for now. Parsing of JSON array is throwing class cast
// exception.
} else if (metricsMap.containsKey(key)) {
ChukwaRecord rec = metricsMap.get(key);
rec.add(key, valueString);
} else {
mapred_jt.add(key, valueString);
}
}
buildGenericRecord(mapred_jt, null, timeStamp, "jt");
output.collect(key, mapred_jt);
buildGenericRecord(jt_jvm, null, timeStamp, "jvm");
output.collect(key, jt_jvm);
buildGenericRecord(jt_rpc, null, timeStamp, "rpc");
output.collect(key, jt_rpc);
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
}
}
}
| 8,389 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/HadoopLogProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Table;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Tables;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
@Tables(annotations={
@Table(name="HadoopLog",columnFamily="NameNode"),
@Table(name="HadoopLog",columnFamily="DataNode"),
@Table(name="HadoopLog",columnFamily="Audit")
})
public class HadoopLogProcessor extends AbstractProcessor {
static Logger log = Logger.getLogger(HadoopLogProcessor.class);
private static final String recordType = "HadoopLog";
private static final String nameNodeType = "NameNode";
private static final String dataNodeType = "DataNode";
private static final String auditType = "Audit";
private SimpleDateFormat sdf = null;
public HadoopLogProcessor() {
// TODO move that to config
sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
}
@Override
public void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws Throwable {
try {
String dStr = recordEntry.substring(0, 23);
Date d = sdf.parse(dStr);
ChukwaRecord record = new ChukwaRecord();
if (this.chunk.getStreamName().indexOf("datanode") > 0) {
buildGenericRecord(record, recordEntry, d.getTime(), dataNodeType);
} else if (this.chunk.getStreamName().indexOf("namenode") > 0) {
buildGenericRecord(record, recordEntry, d.getTime(), nameNodeType);
} else if (this.chunk.getStreamName().indexOf("audit") > 0) {
buildGenericRecord(record, recordEntry, d.getTime(), auditType);
} else {
buildGenericRecord(record, recordEntry, d.getTime(), recordType);
}
output.collect(key, record);
} catch (ParseException e) {
log.warn("Unable to parse the date in DefaultProcessor [" + recordEntry
+ "]", e);
e.printStackTrace();
throw e;
} catch (IOException e) {
log.warn("Unable to collect output in DefaultProcessor [" + recordEntry
+ "]", e);
e.printStackTrace();
throw e;
}
}
public String getDataType() {
return HadoopLogProcessor.recordType;
}
}
| 8,390 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/Log4JMetricsContextProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
import org.json.simple.JSONObject;
import org.json.simple.JSONValue;
public class Log4JMetricsContextProcessor extends AbstractProcessor {
static Logger log = Logger.getLogger(Log4JMetricsContextProcessor.class);
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws Throwable
{
Log4JMetricsContextChukwaRecord record = new Log4JMetricsContextChukwaRecord(recordEntry);
ChukwaRecord chukwaRecord = record.getChukwaRecord();
this.buildGenericRecord(chukwaRecord, null, record.getTimestamp(), record.getRecordType());
output.collect(key, chukwaRecord);
}
// create a static class to cove most of the code for unit test
static class Log4JMetricsContextChukwaRecord {
private String recordType = null;
private long timestamp = 0;
private ChukwaRecord chukwaRecord = new ChukwaRecord();
@SuppressWarnings("unchecked")
public Log4JMetricsContextChukwaRecord(String recordEntry) throws Throwable {
LogEntry log = new LogEntry(recordEntry);
JSONObject json = (JSONObject) JSONValue.parse(log.getBody());
// round timestamp
timestamp = (Long) json.get("timestamp");
timestamp = (timestamp / 60000) * 60000;
// get record type
String contextName = (String) json.get("contextName");
String recordName = (String) json.get("recordName");
recordType = contextName;
if (!contextName.equals(recordName)) {
recordType += "_" + recordName;
}
for(Entry<String, Object> entry : (Set<Map.Entry>) json.entrySet()) {
String key = entry.getKey();
String value = String.valueOf(entry.getValue());
if(value != null) {
chukwaRecord.add(key, value);
}
}
}
public String getRecordType() {
return recordType;
}
public long getTimestamp() {
return timestamp;
}
public ChukwaRecord getChukwaRecord() {
return chukwaRecord;
}
}
}
| 8,391 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/reducer/UnknownReduceTypeException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.reducer;
public class UnknownReduceTypeException extends Exception {
/**
*
*/
private static final long serialVersionUID = 5760553864088487836L;
public UnknownReduceTypeException() {
}
public UnknownReduceTypeException(String message) {
super(message);
}
public UnknownReduceTypeException(Throwable cause) {
super(cause);
}
public UnknownReduceTypeException(String message, Throwable cause) {
super(message, cause);
}
}
| 8,392 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/reducer/JobLogHistoryReduceProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.reducer;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.engine.Record;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
public class JobLogHistoryReduceProcessor implements ReduceProcessor {
static Logger log = Logger.getLogger(JobLogHistoryReduceProcessor.class);
@Override
public String getDataType() {
return this.getClass().getName();
}
@Override
public void process(ChukwaRecordKey key, Iterator<ChukwaRecord> values,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter) {
try {
String action = key.getKey();
int count = 0;
ChukwaRecord record = null;
while (values.hasNext()) {
record = values.next();
if (record.containsField("START_TIME")) {
count++;
} else {
count--;
}
}
ChukwaRecordKey newKey = new ChukwaRecordKey();
newKey.setKey("" + record.getTime());
newKey.setReduceType("MSSRGraph");
ChukwaRecord newRecord = new ChukwaRecord();
newRecord.add(Record.tagsField, record.getValue(Record.tagsField));
newRecord.setTime(record.getTime());
newRecord.add("count", "" + count);
newRecord.add("JOBID", record.getValue("JOBID"));
if (action.indexOf("JobLogHist/Map/") >= 0) {
newRecord.add("type", "MAP");
} else if (action.indexOf("JobLogHist/SHUFFLE/") >= 0) {
newRecord.add("type", "SHUFFLE");
} else if (action.indexOf("JobLogHist/SORT/") >= 0) {
newRecord.add("type", "SORT");
} else if (action.indexOf("JobLogHist/REDUCE/") >= 0) {
newRecord.add("type", "REDUCE");
}
output.collect(newKey, newRecord);
} catch (IOException e) {
log.warn("Unable to collect output in JobLogHistoryReduceProcessor ["
+ key + "]", e);
e.printStackTrace();
}
}
}
| 8,393 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/reducer/DuplicateReduceProcessorException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.reducer;
public class DuplicateReduceProcessorException extends RuntimeException {
/**
*
*/
private static final long serialVersionUID = 7396161798611603019L;
public DuplicateReduceProcessorException() {
}
public DuplicateReduceProcessorException(String message) {
super(message);
}
public DuplicateReduceProcessorException(Throwable cause) {
super(cause);
}
public DuplicateReduceProcessorException(String message, Throwable cause) {
super(message, cause);
}
}
| 8,394 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/reducer/ReduceProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.reducer;
import java.util.Iterator;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
public interface ReduceProcessor {
public String getDataType();
public void process(ChukwaRecordKey key, Iterator<ChukwaRecord> values,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter);
}
| 8,395 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/reducer/IdentityReducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.reducer;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
public class IdentityReducer implements ReduceProcessor {
@Override
public String getDataType() {
// TODO Auto-generated method stub
return null;
}
@Override
public void process(ChukwaRecordKey key, Iterator<ChukwaRecord> values,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter) {
while (values.hasNext()) {
try {
output.collect(key, values.next());
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
| 8,396 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/reducer/SystemMetrics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.reducer;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
public class SystemMetrics implements ReduceProcessor {
static Logger log = Logger.getLogger(SystemMetrics.class);
@Override
public String getDataType() {
return this.getClass().getName();
}
@Override
public void process(ChukwaRecordKey key, Iterator<ChukwaRecord> values,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter) {
try {
ChukwaRecord record = null;
ChukwaRecord newRecord = new ChukwaRecord();
while (values.hasNext()) {
record = values.next();
newRecord.setTime(record.getTime());
if (record.containsField("IFACE")) {
if (record.containsField("rxpck/s")) {
if (record.containsField("rxbyt/s")
&& record.containsField("txbyt/s")) {
double netBusyPcnt = 0, netRxByts = 0, netTxByts = 0, netSpeed = 128000000.00;
netRxByts = Double.parseDouble(record.getValue("rxbyt/s"));
netTxByts = Double.parseDouble(record.getValue("txbyt/s"));
netBusyPcnt = (netRxByts / netSpeed * 100)
+ (netTxByts / netSpeed * 100);
record.add(record.getValue("IFACE") + "_busy_pcnt", ""
+ netBusyPcnt);
record.add("csource", record.getValue("csource"));
}
record.add(record.getValue("IFACE") + ".rxbyt/s", record
.getValue("rxbyt/s"));
record.add(record.getValue("IFACE") + ".rxpck/s", record
.getValue("rxpck/s"));
record.add(record.getValue("IFACE") + ".txbyt/s", record
.getValue("txbyt/s"));
record.add(record.getValue("IFACE") + ".txpck/s", record
.getValue("txpck/s"));
record.removeValue("rxbyt/s");
record.removeValue("rxpck/s");
record.removeValue("txbyt/s");
record.removeValue("txpck/s");
}
if (record.containsField("rxerr/s")) {
record.add(record.getValue("IFACE") + ".rxerr/s", record
.getValue("rxerr/s"));
record.add(record.getValue("IFACE") + ".rxdrop/s", record
.getValue("rxdrop/s"));
record.add(record.getValue("IFACE") + ".txerr/s", record
.getValue("txerr/s"));
record.add(record.getValue("IFACE") + ".txdrop/s", record
.getValue("txdrop/s"));
record.removeValue("rxerr/s");
record.removeValue("rxdrop/s");
record.removeValue("txerr/s");
record.removeValue("txdrop/s");
}
record.removeValue("IFACE");
}
if (record.containsField("Device:")) {
record.add(record.getValue("Device:") + ".r/s", record
.getValue("r/s"));
record.add(record.getValue("Device:") + ".w/s", record
.getValue("w/s"));
record.add(record.getValue("Device:") + ".rkB/s", record
.getValue("rkB/s"));
record.add(record.getValue("Device:") + ".wkB/s", record
.getValue("wkB/s"));
record.add(record.getValue("Device:") + ".%util", record
.getValue("%util"));
record.removeValue("r/s");
record.removeValue("w/s");
record.removeValue("rkB/s");
record.removeValue("wkB/s");
record.removeValue("%util");
record.removeValue("Device:");
}
if (record.containsField("swap_free")) {
float swapUsedPcnt = 0, swapUsed = 0, swapTotal = 0;
swapUsed = Long.parseLong(record.getValue("swap_used"));
swapTotal = Long.parseLong(record.getValue("swap_total"));
swapUsedPcnt = swapUsed / swapTotal * 100;
record.add("swap_used_pcnt", "" + swapUsedPcnt);
record.add("csource", record.getValue("csource"));
}
if (record.containsField("mem_used")) {
double memUsedPcnt = 0, memTotal = 0, memUsed = 0;
memTotal = Double.parseDouble(record.getValue("mem_total"));
memUsed = Double.parseDouble(record.getValue("mem_used"));
memUsedPcnt = memUsed / memTotal * 100;
record.add("mem_used_pcnt", "" + memUsedPcnt);
record.add("csource", record.getValue("csource"));
}
if (record.containsField("mem_buffers")) {
double memBuffersPcnt = 0, memTotal = 0, memBuffers = 0;
memTotal = Double.parseDouble(record.getValue("mem_total"));
memBuffers = Double.parseDouble(record.getValue("mem_buffers"));
memBuffersPcnt = memBuffers / memTotal * 100;
record.add("mem_buffers_pcnt", "" + memBuffersPcnt);
record.add("csource", record.getValue("csource"));
}
// Copy over all fields
String[] fields = record.getFields();
for (String f : fields) {
newRecord.add(f, record.getValue(f));
}
}
record.add("capp", "systemMetrics");
output.collect(key, newRecord);
} catch (IOException e) {
log.warn("Unable to collect output in SystemMetricsReduceProcessor ["
+ key + "]", e);
e.printStackTrace();
}
}
}
| 8,397 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/reducer/ClientTrace.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.reducer;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.chukwa.extraction.engine.Record;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
public class ClientTrace implements ReduceProcessor {
static final Logger log = Logger.getLogger(ClientTrace.class);
@Override
public String getDataType() {
return this.getClass().getName();
}
@Override
public void process(ChukwaRecordKey key,
Iterator<ChukwaRecord> values,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output,
Reporter reporter) {
try {
long bytes = 0L;
ChukwaRecord rec = null;
while (values.hasNext()) {
/* aggregate bytes for current key */
rec = values.next();
bytes += Long.parseLong(rec.getValue("bytes"));
/* output raw values to different data type for uses which
* require detailed per-operation data */
ChukwaRecordKey detailed_key = new ChukwaRecordKey();
String [] k = key.getKey().split("/");
String full_timestamp = null;
full_timestamp = rec.getValue("actual_time");
detailed_key.setReduceType("ClientTraceDetailed");
detailed_key.setKey(k[0]+"/"+k[1]+"_"+k[2]+"/"+full_timestamp);
output.collect(detailed_key, rec);
}
if (null == rec) {
return;
}
ChukwaRecord emit = new ChukwaRecord();
emit.add(Record.tagsField, rec.getValue(Record.tagsField));
emit.add(Record.sourceField, "undefined"); // TODO
emit.add(Record.applicationField, rec.getValue(Record.applicationField));
String[] k = key.getKey().split("/");
emit.add(k[1] + "_" + k[2], String.valueOf(bytes));
emit.setTime(Long.parseLong(k[3]));
output.collect(key, emit);
} catch (IOException e) {
log.warn("Unable to collect output in SystemMetricsReduceProcessor [" + key + "]", e);
}
}
}
| 8,398 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/extraction/demux/processor/reducer/MRJobReduceProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.reducer;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.engine.Record;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
public class MRJobReduceProcessor implements ReduceProcessor {
static Logger log = Logger.getLogger(MRJobReduceProcessor.class);
@Override
public String getDataType() {
return MRJobReduceProcessor.class.getName();
}
@Override
public void process(ChukwaRecordKey key, Iterator<ChukwaRecord> values,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter) {
try {
HashMap<String, String> data = new HashMap<String, String>();
ChukwaRecord record = null;
String[] fields = null;
while (values.hasNext()) {
record = values.next();
fields = record.getFields();
for (String field : fields) {
data.put(field, record.getValue(field));
}
}
// Extract initial time: SUBMIT_TIME
long initTime = Long.parseLong(data.get("SUBMIT_TIME"));
// Extract HodId
// maybe use a regex to extract this and load it from configuration
// JOBCONF=
// "/user/xxx/mapredsystem/563976.xxx.yyy.com/job_200809062051_0001/job.xml"
String jobConf = data.get("JOBCONF");
int idx = jobConf.indexOf("mapredsystem/");
idx += 13;
int idx2 = jobConf.indexOf(".", idx);
data.put("HodId", jobConf.substring(idx, idx2));
ChukwaRecordKey newKey = new ChukwaRecordKey();
newKey.setKey("" + initTime);
newKey.setReduceType("MRJob");
ChukwaRecord newRecord = new ChukwaRecord();
newRecord.add(Record.tagsField, record.getValue(Record.tagsField));
newRecord.setTime(initTime);
newRecord.add(Record.tagsField, record.getValue(Record.tagsField));
Iterator<Map.Entry<String, String>> it = data.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<String, ?> entry = it.next();
String field = entry.getKey();
String value = entry.getValue().toString();
newRecord.add(field, value);
}
output.collect(newKey, newRecord);
} catch (IOException e) {
log.warn("Unable to collect output in JobLogHistoryReduceProcessor ["
+ key + "]", e);
e.printStackTrace();
}
}
}
| 8,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.