index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/JMXAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.Charset;
import java.rmi.ConnectException;
import java.util.Calendar;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.TimeZone;
import java.util.Timer;
import java.util.TimerTask;
import java.util.TreeSet;
import javax.management.Descriptor;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanInfo;
import javax.management.MBeanServerConnection;
import javax.management.ObjectName;
import javax.management.openmbean.CompositeData;
import javax.management.openmbean.CompositeType;
import javax.management.openmbean.OpenType;
import javax.management.openmbean.TabularData;
import javax.management.openmbean.TabularType;
import javax.management.remote.JMXConnector;
import javax.management.remote.JMXConnectorFactory;
import javax.management.remote.JMXServiceURL;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.datacollection.ChunkReceiver;
import org.apache.hadoop.chukwa.datacollection.adaptor.AbstractAdaptor;
import org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorException;
import org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorShutdownPolicy;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.log4j.Logger;
import org.json.simple.JSONObject;
/**
* Query metrics through JMX interface. <br>
* 1. Enable remote jmx monitoring for the target
* jvm by specifying -Dcom.sun.management.jmxremote.port=jmx_port<br>
* 2. Enable authentication with -Dcom.sun.management.jmxremote.authenticate=true <br>
* -Dcom.sun.management.jmxremote.password.file=${CHUKWA_CONF_DIR}/jmxremote.password <br>
* -Dcom.sun.management.jmxremote.access.file=${CHUKWA_CONF_DIR}/jmxremote.access <br>
* 3. Optionally specify these jvm options <br>
* -Djava.net.preferIPv4Stack=true -Dcom.sun.management.jmxremote.ssl=false <br>
* 4. Connect to the jmx agent using jconsole and find out which domain you want to collect data for
* 5. Add the jmx adaptor. Ex: To collect metrics from a hadoop datanode that has enabled jmx on 8007, at 60s interval, use command<br>
* "add JMXAdaptor DatanodeProcessor localhost 8007 60 Hadoop:*" <br><br>
* Your JMX adaptor is now good to go and will send out the collected metrics as chunks to the collector.
*/
public class JMXAdaptor extends AbstractAdaptor{
private static Logger log = Logger.getLogger(JMXAdaptor.class);
private MBeanServerConnection mbsc = null;
private String port ="", server="localhost";
private JMXServiceURL url;
private JMXConnector jmxc = null;
private long period = 10;
private Timer timer;
private JMXTimer runner;
private String pattern = "";
long sendOffset = 0;
volatile boolean shutdown = false;
/**
* A thread which creates a new connection to JMX and retries every 10s if the connection is not
* successful. It uses the credentials specified in $CHUKWA_CONF_DIR/jmxremote.password.
*/
public class JMXConnect implements Runnable{
@Override
public void run() {
String hadoop_conf = System.getenv("CHUKWA_CONF_DIR");
StringBuffer sb = new StringBuffer(hadoop_conf);
if(!hadoop_conf.endsWith("/")){
sb.append(File.separator);
}
sb.append("jmxremote.password");
File jmx_pw_file = new File(sb.toString());
shutdown = false;
while(!shutdown){
try{
BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(jmx_pw_file.getAbsolutePath()), Charset.forName("UTF-8")));
String buffer = br.readLine();
String[] creds = null;
if(buffer != null ) {
creds = buffer.split(" ");
}
br.close();
Map<String, String[]> env = new HashMap<String, String[]>();
if(creds!=null) {
env.put(JMXConnector.CREDENTIALS, creds);
}
jmxc = JMXConnectorFactory.connect(url, env);
mbsc = jmxc.getMBeanServerConnection();
if(timer == null) {
timer = new Timer();
runner = new JMXTimer(dest, JMXAdaptor.this,mbsc);
}
timer.scheduleAtFixedRate(runner, 0, period * 1000);
shutdown = true;
} catch (IOException e) {
log.error("IOException in JMXConnect thread prevented connect to JMX on port:"+port+", retrying after 10s");
log.error(ExceptionUtil.getStackTrace(e));
try {
Thread.sleep(10000);
} catch (InterruptedException e1) {
log.error("JMXConnect thread interrupted in sleep, bailing");
shutdown = true;
}
} catch (Exception e) {
log.error("Something bad happened in JMXConnect thread, bailing");
log.error(ExceptionUtil.getStackTrace(e));
timer.cancel();
timer = null;
shutdown = true;
}
}
}
}
/**
* A TimerTask which queries the mbean server for all mbeans that match the pattern specified in
* the JMXAdaptor arguments, constructs a json object of all data and sends it as a chunk. The
* CompositeType, TabularType and Array open mbean types return the numerical values (sizes).
* This task is scheduled to run at the interval specified in the adaptor arguments. If the
* connection to mbean server is broken, this task cancels the existing timer and tries to
* re-connect to the mbean server.
*/
public class JMXTimer extends TimerTask{
private Logger log = Logger.getLogger(JMXTimer.class);
private ChunkReceiver receiver = null;
private JMXAdaptor adaptor = null;
private MBeanServerConnection mbsc = null;
//private long sendOffset = 0;
public JMXTimer(ChunkReceiver receiver, JMXAdaptor adaptor, MBeanServerConnection mbsc){
this.receiver = receiver;
this.adaptor = adaptor;
this.mbsc = mbsc;
}
@SuppressWarnings("unchecked")
@Override
public void run() {
try{
ObjectName query = null;
if(!pattern.equals("")){
query = new ObjectName(pattern);
}
Set<ObjectName> names = new TreeSet<ObjectName>(mbsc.queryNames(query, null));
Object val = null;
JSONObject json = new JSONObject();
for (ObjectName oname: names) {
MBeanInfo mbinfo = mbsc.getMBeanInfo(oname);
MBeanAttributeInfo [] mbinfos = mbinfo.getAttributes();
for (MBeanAttributeInfo mb: mbinfos) {
try{
Descriptor d = mb.getDescriptor();
val = mbsc.getAttribute(oname, mb.getName());
if(d.getFieldNames().length > 0){ //this is an open mbean
OpenType<?> openType = (OpenType<?>)d.getFieldValue("openType");
if(openType.isArray()){
Object[] valarray = (Object[])val;
val = Integer.toString(valarray.length);
}
else if(openType instanceof CompositeType){
CompositeData data = (CompositeData)val;
val = Integer.toString(data.values().size());
}
else if(openType instanceof TabularType){
TabularData data = (TabularData)val;
val = Integer.toString(data.size());
}
//else it is SimpleType
}
json.put(mb.getName(),val);
}
catch(Exception e){
log.warn("Exception "+ e.getMessage() +" getting attribute - "+mb.getName() + " Descriptor:"+mb.getDescriptor().getFieldNames().length);
}
}
}
byte[] data = json.toString().getBytes(Charset.forName("UTF-8"));
sendOffset+=data.length;
ChunkImpl c = new ChunkImpl(type, "JMX", sendOffset, data, adaptor);
long rightNow = Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTimeInMillis();
c.addTag("timeStamp=\""+rightNow+"\"");
receiver.add(c);
}
catch(ConnectException e1){
log.error("Got connect exception for the existing MBeanServerConnection");
log.error(ExceptionUtil.getStackTrace(e1));
log.info("Make sure the target process is running. Retrying connection to JMX on port:"+port);
timer.cancel();
timer = null;
Thread connectThread = new Thread(new JMXConnect());
connectThread.start();
}
catch(Exception e){
log.error(ExceptionUtil.getStackTrace(e));
}
}
}
@Override
public String getCurrentStatus() {
StringBuilder buffer = new StringBuilder();
buffer.append(type);
buffer.append(" ");
buffer.append(server);
buffer.append(" ");
buffer.append(port);
buffer.append(" ");
buffer.append(period);
buffer.append(" ");
buffer.append(pattern);
return buffer.toString();
}
@Override
public long shutdown(AdaptorShutdownPolicy shutdownPolicy)
throws AdaptorException {
log.info("Enter Shutdown:" + shutdownPolicy.name()+ " - ObjectId:" + this);
try {
if(jmxc != null){
jmxc.close();
}
if(timer != null){
timer.cancel();
}
} catch (IOException e) {
log.error("JMXAdaptor shutdown failed due to IOException");
throw new AdaptorException(ExceptionUtil.getStackTrace(e));
} catch (Exception e) {
log.error("JMXAdaptor shutdown failed");
throw new AdaptorException(ExceptionUtil.getStackTrace(e));
}
//in case the start thread is still retrying
shutdown = true;
return sendOffset;
}
@Override
public void start(long offset) throws AdaptorException {
try {
sendOffset = offset;
Thread connectThread = new Thread(new JMXConnect());
connectThread.start();
} catch(Exception e) {
log.error("Failed to schedule JMX connect thread");
throw new AdaptorException(ExceptionUtil.getStackTrace(e));
}
}
@Override
public String parseArgs(String s) {
//JMXAdaptor MBeanServer port [interval] DomainNamePattern-Ex:"Hadoop:*"
String[] tokens = s.split(" ");
if(tokens.length == 4){
server = tokens[0];
port = tokens[1];
period = Integer.parseInt(tokens[2]);
pattern = tokens[3];
}
else if(tokens.length == 3){
server = tokens[0];
port = tokens[1];
pattern = tokens[2];
}
else{
log.warn("bad syntax in JMXAdaptor args");
return null;
}
String url_string = "service:jmx:rmi:///jndi/rmi://"+server+ ":"+port+"/jmxrmi";
try{
url = new JMXServiceURL(url_string);
return s;
}
catch(Exception e){
log.error(ExceptionUtil.getStackTrace(e));
}
return null;
}
}
| 8,200 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/RestAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
import java.io.FileInputStream;
import java.nio.charset.Charset;
import java.security.KeyStore;
import java.security.SecureRandom;
import java.util.Calendar;
import java.util.TimeZone;
import java.util.Timer;
import java.util.TimerTask;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.datacollection.ChunkReceiver;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.apache.log4j.Logger;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.chukwa.datacollection.agent.ChukwaConstants.*;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.api.client.config.ClientConfig;
import com.sun.jersey.api.client.config.DefaultClientConfig;
import com.sun.jersey.client.urlconnection.HTTPSProperties;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManager;
import javax.net.ssl.TrustManagerFactory;
import javax.ws.rs.core.MediaType;
public class RestAdaptor extends AbstractAdaptor {
private String uri;
private long period = 60;
private static Logger log = Logger.getLogger(RestAdaptor.class);
private WebResource resource;
private Client c;
private String bean;
private Timer timer;
private TimerTask runner;
private long sendOffset;
class RestTimer extends TimerTask {
private ChunkReceiver receiver;
private RestAdaptor adaptor;
RestTimer(ChunkReceiver receiver, RestAdaptor adaptor) {
this.receiver = receiver;
this.adaptor = adaptor;
}
@Override
public void run() {
try {
resource = c.resource(uri);
bean = resource.accept(MediaType.APPLICATION_JSON_TYPE).get(
String.class);
byte[] data = bean.getBytes(Charset.forName("UTF-8"));
sendOffset += data.length;
ChunkImpl c = new ChunkImpl(type, "REST", sendOffset, data, adaptor);
long rightNow = Calendar.getInstance(TimeZone.getTimeZone("UTC"))
.getTimeInMillis();
c.addTag("timeStamp=\"" + rightNow + "\"");
receiver.add(c);
} catch (com.sun.jersey.api.client.ClientHandlerException e) {
Throwable t = e.getCause();
if (t instanceof java.net.ConnectException) {
log.warn("Connect exception trying to connect to " + uri
+ ". Make sure the service is running");
} else {
log.error("RestAdaptor: Interrupted exception");
log.error(ExceptionUtil.getStackTrace(e));
}
} catch (Exception e) {
log.error("RestAdaptor: Interrupted exception");
log.error(ExceptionUtil.getStackTrace(e));
}
}
}
@Override
public String getCurrentStatus() {
StringBuilder buffer = new StringBuilder();
buffer.append(type);
buffer.append(" ");
buffer.append(uri);
buffer.append(" ");
buffer.append(period);
return buffer.toString();
}
@Override
public long shutdown(AdaptorShutdownPolicy shutdownPolicy)
throws AdaptorException {
timer.cancel();
return sendOffset;
}
@Override
public void start(long offset) throws AdaptorException {
sendOffset = offset;
if (timer == null) {
timer = new Timer();
runner = new RestTimer(dest, RestAdaptor.this);
}
timer.scheduleAtFixedRate(runner, 0, period * 1000);
}
@Override
public String parseArgs(String s) {
// RestAdaptor [Host] port uri [interval]
String[] tokens = s.split(" ");
if (tokens.length == 2) {
uri = tokens[0];
try {
period = Integer.parseInt(tokens[1]);
} catch (NumberFormatException e) {
log.warn("RestAdaptor: incorrect argument for period. Expecting number");
return null;
}
} else {
log.warn("bad syntax in RestAdaptor args");
return null;
}
try {
initClient();
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
return null;
}
return s;
}
private void initClient() throws Exception {
if (uri.contains("https")) {
Configuration conf = ChukwaAgent.getAgent().getConfiguration();
String trustStoreFile = conf.get(TRUSTSTORE_STORE);
String trustStorePw = conf.get(TRUST_PASSWORD);
if (trustStoreFile == null || trustStorePw == null) {
throw new Exception(
"Cannot instantiate RestAdaptor to uri "
+ uri
+ " due to missing trust store configurations chukwa.ssl.truststore.store and chukwa.ssl.trust.password");
}
String trustStoreType = conf.get(TRUSTSTORE_TYPE, DEFAULT_STORE_TYPE);
KeyStore trustStore = KeyStore.getInstance(trustStoreType);
FileInputStream fis = null;
try {
fis = new FileInputStream(trustStoreFile);
trustStore.load(fis, trustStorePw.toCharArray());
} finally {
if (fis != null) {
fis.close();
}
}
TrustManagerFactory tmf = TrustManagerFactory
.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init(trustStore);
TrustManager[] trustManagers = tmf.getTrustManagers();
SSLContext ctx = null;
String protocol = conf.get(SSL_PROTOCOL, DEFAULT_SSL_PROTOCOL);
ctx = SSLContext.getInstance(protocol);
ctx.init(null, trustManagers, new SecureRandom());
ClientConfig cc = new DefaultClientConfig();
HTTPSProperties props = new HTTPSProperties(null, ctx);
cc.getProperties().put(HTTPSProperties.PROPERTY_HTTPS_PROPERTIES, props);
c = Client.create(cc);
} else {
c = Client.create();
}
}
}
| 8,201 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/ExecAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.inputtools.plugin.ExecPlugin;
import org.apache.log4j.Logger;
import org.apache.log4j.helpers.ISO8601DateFormat;
import org.json.simple.JSONObject;
import java.nio.charset.Charset;
import java.util.*;
/**
* Runs a command inside chukwa. Takes as params the interval in seconds at
* which to run the command, and the path and args to execute.
*
* Interval is optional, and defaults to 5 seconds.
*
* Example usage: add
* org.apache.hadoop.chukwa.datacollection.adaptor.ExecAdaptor Ps 2 /bin/ps aux
* 0
*
*/
public class ExecAdaptor extends AbstractAdaptor {
public static final boolean FULL_PATHS = false;
static class EmbeddedExec extends ExecPlugin {
String cmd;
public EmbeddedExec(String c) {
cmd = c;
}
@Override
public String getCmde() {
return cmd;
}
}
EmbeddedExec exec;
static final boolean FAKE_LOG4J_HEADER = true;
static final boolean SPLIT_LINES = false;
static Logger log = Logger.getLogger(ExecAdaptor.class);
class RunToolTask extends TimerTask {
public void run() {
log.info("calling exec");
JSONObject o = exec.execute();
try {
if (((Integer) o.get("status")).intValue() == exec.statusKO) {
deregisterAndStop();
return;
}
// FIXME: downstream customers would like timestamps here.
// Doing that efficiently probably means cutting out all the
// excess buffer copies here, and appending into an OutputBuffer.
byte[] data;
if (FAKE_LOG4J_HEADER) {
StringBuilder result = new StringBuilder();
ISO8601DateFormat dateFormat = new org.apache.log4j.helpers.ISO8601DateFormat();
result.append(dateFormat.format(new java.util.Date()));
result.append(" INFO org.apache.hadoop.chukwa.");
result.append(type);
result.append("= ");
result.append(o.get("exitValue"));
result.append(": ");
result.append((String) o.get("stdout"));
data = result.toString().getBytes(Charset.forName("UTF-8"));
} else {
String stdout = (String) o.get("stdout");
data = stdout.getBytes();
}
sendOffset += data.length;
ChunkImpl c = new ChunkImpl(ExecAdaptor.this.type, "results from "
+ cmd, sendOffset, data, ExecAdaptor.this);
if (SPLIT_LINES) {
ArrayList<Integer> carriageReturns = new ArrayList<Integer>();
for (int i = 0; i < data.length; ++i)
if (data[i] == '\n')
carriageReturns.add(i);
c.setRecordOffsets(carriageReturns);
} // else we get default one record
//We can't replay exec data, so we might as well commit to it now.
control.reportCommit(ExecAdaptor.this, sendOffset);
dest.add(c);
} catch (InterruptedException e) {
log.debug(e);
}
}
};
String cmd;
final java.util.Timer timer;
long period = 5;
volatile long sendOffset = 0;
public ExecAdaptor() {
timer = new java.util.Timer();
}
@Override
public String getCurrentStatus() {
return type + " " + period + " " + cmd;
}
@Override
public long shutdown(AdaptorShutdownPolicy shutdownPolicy)
throws AdaptorException {
log.info("Enter Shutdown:" + shutdownPolicy.name()+ " - ObjectId:" + this);
switch(shutdownPolicy) {
case GRACEFULLY :
case WAIT_TILL_FINISHED :
try {
timer.cancel();
exec.waitFor();
} catch (InterruptedException e) {
}
break;
default:
timer.cancel();
exec.stop();
break;
}
log.info("Exist Shutdown:" + shutdownPolicy.name()+ " - ObjectId:" + this);
return sendOffset;
}
@Override
public void start(long offset) throws AdaptorException {
this.sendOffset = offset;
this.exec = new EmbeddedExec(cmd);
TimerTask execTimer = new RunToolTask();
timer.schedule(execTimer, 0L, period*1000L);
}
@Override
public String parseArgs(String status) {
int spOffset = status.indexOf(' ');
if (spOffset > 0) {
try {
period = Integer.parseInt(status.substring(0, spOffset));
cmd = status.substring(spOffset + 1);
} catch (NumberFormatException e) {
log.warn("ExecAdaptor: sample interval "
+ status.substring(0, spOffset) + " can't be parsed");
cmd = status;
}
} else
cmd = status;
return cmd;
}
}
| 8,202 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/AdaptorException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
public class AdaptorException extends Exception {
private static final long serialVersionUID = -8490279345367308690L;
public AdaptorException() {
super();
}
public AdaptorException(String arg0, Throwable arg1) {
super(arg0, arg1);
}
public AdaptorException(String arg0) {
super(arg0);
}
public AdaptorException(Throwable arg0) {
super(arg0);
}
}
| 8,203 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/MemBuffered.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
import static org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorShutdownPolicy.RESTARTING;
import java.util.*;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.ChunkReceiver;
public class MemBuffered extends AbstractWrapper {
static final String BUF_SIZE_OPT = "adaptor.memBufWrapper.size";
static final int DEFAULT_BUF_SIZE = 1024*1024; //1 MB
//true by default. If you were willing to discard data, you didn't need Mem Buffers
static boolean BLOCK_WHEN_FULL = true;
static class MemBuf {
long dataSizeBytes;
final long maxDataSize;
final ArrayDeque<Chunk> chunks;
public MemBuf(long maxDataSize) {
dataSizeBytes = 0;
this.maxDataSize = maxDataSize;
chunks = new ArrayDeque<Chunk>();
}
synchronized void add(Chunk c) throws InterruptedException{
int len = c.getData().length;
if(BLOCK_WHEN_FULL)
while(len + dataSizeBytes > maxDataSize)
wait();
else
chunks.remove();
dataSizeBytes += len;
chunks.add(c);
}
synchronized void removeUpTo(long l) {
long bytesFreed = 0;
while(!chunks.isEmpty()) {
Chunk c = chunks.getFirst();
if(c.getSeqID() > l)
chunks.addFirst(c);
else
bytesFreed += c.getData().length;
}
if(bytesFreed > 0) {
dataSizeBytes -= bytesFreed;
notifyAll();
}
}
}
static Map<String, MemBuf> buffers;
static {
buffers = new HashMap<String, MemBuf>();
}
MemBuf myBuffer;
@Override
public void add(Chunk event) throws InterruptedException {
myBuffer.add(event);
dest.add(event);
}
@Override
public void start(String adaptorID, String type, long offset,
ChunkReceiver dest) throws AdaptorException {
try {
String dummyAdaptorID = adaptorID;
this.adaptorID = adaptorID;
this.dest = dest;
long bufSize = manager.getConfiguration().getInt(BUF_SIZE_OPT, DEFAULT_BUF_SIZE);
synchronized(buffers) {
myBuffer = buffers.get(adaptorID);
if(myBuffer == null) {
myBuffer = new MemBuf(bufSize);
buffers.put(adaptorID, myBuffer);
}
}
//Drain buffer into output queue
long offsetToStartAt = offset;
for(Chunk c:myBuffer.chunks) {
dest.add(c);
long seq = c.getSeqID();
if(seq > offsetToStartAt)
offsetToStartAt = seq;
}
inner.start(dummyAdaptorID, innerType, offsetToStartAt, this);
} catch(InterruptedException e) {
throw new AdaptorException(e);
}
}
@Override
public void committed(long l) {
myBuffer.removeUpTo(l);
}
@Override
public long shutdown(AdaptorShutdownPolicy p) throws AdaptorException {
if(p != RESTARTING)
buffers.remove(adaptorID);
return inner.shutdown(p);
}
}
| 8,204 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/AbstractAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
import org.apache.hadoop.chukwa.datacollection.ChunkReceiver;
import org.apache.hadoop.chukwa.datacollection.agent.AdaptorManager;
public abstract class AbstractAdaptor implements Adaptor {
protected String type;
protected ChunkReceiver dest;
protected String adaptorID;
protected AdaptorManager control;
@Override
public final String getType() {
return type;
}
@Override
public final void start(String adaptorID, String type, long offset,
ChunkReceiver dest) throws AdaptorException {
this.adaptorID = adaptorID;
this.type = type;
this.dest=dest;
start(offset);
}
public abstract void start(long offset) throws AdaptorException;
public abstract String parseArgs(String s);
public void deregisterAndStop() {
control.stopAdaptor(adaptorID, AdaptorShutdownPolicy.HARD_STOP);
}
public String parseArgs(String d, String s, AdaptorManager c) {
control = c;
return parseArgs(s);
}
@Deprecated
public void hardStop() throws AdaptorException {
shutdown(AdaptorShutdownPolicy.HARD_STOP);
}
@Deprecated
public long shutdown() throws AdaptorException {
return shutdown(AdaptorShutdownPolicy.GRACEFULLY);
}
}
| 8,205 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/WriteaheadBuffered.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
import java.io.*;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.datacollection.ChunkReceiver;
import org.apache.log4j.Logger;
import static org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorShutdownPolicy.*;
public class WriteaheadBuffered extends AbstractWrapper {
Logger log = Logger.getLogger(WriteaheadBuffered.class);
static final String BUF_DIR_OPT = "adaptor.writeaheadWrapper.dir";
static String BUF_DIR = "/tmp"; //1 MB
static long COMPACT_AT = 1024 * 1024; //compact when it can free at least this much storage
File outBuf;
DataOutputStream outToDisk;
long fSize, highestSentOffset;
@Override
public void add(Chunk event) throws InterruptedException {
try {
event.write(outToDisk);
outToDisk.flush();
fSize += event.getData().length;
long seq = event.getSeqID();
if(seq > highestSentOffset)
highestSentOffset = seq;
} catch(IOException e) {
log.error(e);
}
dest.add(event);
}
@Override
public void start(String adaptorID, String type, long offset,
ChunkReceiver dest) throws AdaptorException {
try {
String dummyAdaptorID = adaptorID;
this.dest = dest;
outBuf = new File(BUF_DIR, adaptorID);
long newOffset = offset;
if(outBuf.length() > 0) {
DataInputStream dis = new DataInputStream(new FileInputStream(outBuf));
while(dis.available() > 0) {
Chunk c = ChunkImpl.read(dis);
fSize += c.getData().length;
long seq = c.getSeqID();
if(seq >offset) {
dest.add(c);
newOffset = seq;
}
}
//send chunks that are outstanding
dis.close();
}
outToDisk = new DataOutputStream(new FileOutputStream(outBuf, true));
inner.start(dummyAdaptorID, innerType, newOffset, this);
} catch(IOException e) {
throw new AdaptorException(e);
} catch(InterruptedException e) {
throw new AdaptorException(e);
}
}
@Override
public void committed(long l) {
try {
long bytesOutstanding = highestSentOffset - l;
if(fSize - bytesOutstanding > COMPACT_AT) {
fSize = 0;
outToDisk.close();
File outBufTmp = new File(outBuf.getAbsoluteFile(), outBuf.getName() + ".tmp");
if(!outBuf.renameTo(outBufTmp)) {
log.warn("Cannot rename temp file "+outBuf.getAbsolutePath()+
" to "+outBufTmp.getAbsolutePath());
};
outToDisk = new DataOutputStream(new FileOutputStream(outBuf, false));
DataInputStream dis = new DataInputStream(new FileInputStream(outBufTmp));
while(dis.available() > 0) {
Chunk c = ChunkImpl.read(dis);
if(c.getSeqID() > l) { //not yet committed
c.write(outToDisk);
fSize += c.getData().length;
}
}
dis.close();
if(!outBufTmp.delete()) {
log.warn("Can not delete temp file: "+outBufTmp.getAbsolutePath());
};
}
} catch(IOException e) {
log.error(e);
//should this be fatal?
}
}
@Override
public long shutdown(AdaptorShutdownPolicy p) throws AdaptorException {
if(p != RESTARTING) {
if(outBuf.delete()) {
log.warn("Cannot delete output buffer file:"+outBuf.getAbsolutePath());
};
}
return inner.shutdown(p);
}
}
| 8,206 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/DirTailingAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
import java.io.File;
import java.io.IOException;
import org.apache.log4j.Logger;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.filefilter.FileFilterUtils;
import org.apache.commons.io.filefilter.IOFileFilter;
import org.apache.commons.io.filefilter.WildcardFileFilter;
/**
* Explore a whole directory hierarchy, looking for files to tail.
* DirTailingAdaptor will not try to start tailing a file more than once,
* if the file hasn't been modified in the interim.
*
* Offset param is used to track last finished scan.
*
* Mandatory first parameter is a directory with an optional unix style file
* filter. Mandatory second parameter
* is the name of an adaptor to start.
*
* If the specified directory does not exist, the DirTailer will continue
* running, and will start tailing if the directory is later created.
*
*/
public class DirTailingAdaptor extends AbstractAdaptor implements Runnable {
static Logger log = Logger.getLogger(DirTailingAdaptor.class);
Thread scanThread = new Thread(this);
long lastSweepStartTime;
volatile boolean continueScanning=true;
File baseDir;
String baseDirName;
long scanInterval;
protected String adaptorName; // name of adaptors to start
IOFileFilter fileFilter;
String wildCharacter;
@Override
public void start(long offset) throws AdaptorException {
scanInterval = control.getConfiguration().getInt("adaptor.dirscan.intervalMs", 10000);
scanThread.start();
lastSweepStartTime = offset;
try {
baseDirName = baseDir.getCanonicalPath();
} catch(IOException e) {
throw new AdaptorException(e);
}
}
public void run() {
try {
log.debug("dir tailer starting to scan");
while(continueScanning) {
try {
long sweepStartTime = System.currentTimeMillis();
scanDirHierarchy(baseDir);
lastSweepStartTime=sweepStartTime;
control.reportCommit(this, lastSweepStartTime);
} catch(IOException e) {
log.warn(e);
}
Thread.sleep(scanInterval);
}
} catch(InterruptedException e) {
}
}
/*
* Coded recursively. Base case is a single non-dir file.
*/
private void scanDirHierarchy(File dir) throws IOException {
if(!dir.exists())
return;
if(!dir.isDirectory()) {
//Don't start tailing if we would have gotten it on the last pass
if(dir.lastModified() >= lastSweepStartTime) {
String newAdaptorID = control.processAddCommand(getAdaptorAddCommand(dir));
log.info("DirTailingAdaptor " + adaptorID + " started new adaptor " + newAdaptorID);
}
} else {
log.info("Scanning directory: " + dir.getName());
for(Object f: FileUtils.listFiles(dir, fileFilter, FileFilterUtils.trueFileFilter())) {
scanDirHierarchy((File)f);
}
}
}
protected String getAdaptorAddCommand(File dir) throws IOException {
return "add " + adaptorName + " " + type + " " + dir.getCanonicalPath() + " 0";
}
@Override
public String getCurrentStatus() {
return this.wildCharacter == null ? (type + " " + baseDirName + " " + adaptorName)
:(type + " " + baseDirName + " " + this.wildCharacter + " " + adaptorName);
}
@Override
public String parseArgs(String status) {
String[] args = status.split(" ");
if(args.length == 2){
baseDir = new File(args[0]);
fileFilter = FileFilterUtils.trueFileFilter();
adaptorName = args[1];
}else if(args.length == 3){
baseDir = new File(args[0]);
this.wildCharacter = args[ 1 ];
fileFilter = getFileFilter();
adaptorName = args[2];
}else{
log.warn("bad syntax in DirTailingAdaptor args");
return null;
}
return (args.length == 2)? baseDir + " " + adaptorName : baseDir + " " + this.wildCharacter + " " + adaptorName; //both params mandatory
}
@Override
public long shutdown(AdaptorShutdownPolicy shutdownPolicy)
throws AdaptorException {
continueScanning = false;
return lastSweepStartTime;
}
/**
* Returns {@link IOFileFilter} constructed using the wild character. Subclasses can override this method
* return their own version of {@link IOFileFilter} instance.
*
* @return {@link IOFileFilter} constructed using the wild character. Subclasses can override this method
* return their own version of {@link IOFileFilter} instance.
*/
protected IOFileFilter getFileFilter() {
return new WildcardFileFilter( this.wildCharacter );
}
}
| 8,207 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/UDPAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
import java.io.IOException;
import java.net.*;
import java.util.Arrays;
import org.apache.hadoop.chukwa.*;
import org.apache.log4j.Logger;
public class UDPAdaptor extends AbstractAdaptor {
static Logger log = Logger.getLogger(UDPAdaptor.class);
int portno;
DatagramSocket ds;
volatile boolean running = true;
volatile long bytesReceived = 0;
String source;
class ListenThread extends Thread {
public void run() {
log.info("UDP adaptor " + adaptorID + " started on port " + portno + " offset =" + bytesReceived);
byte[] buf = new byte[65535];
DatagramPacket dp = new DatagramPacket(buf, buf.length);
try {
while(running) {
ds.receive(dp);
send(buf, dp);
}
} catch(Exception e) {
if(running)
log.error("can't read UDP messages in " + adaptorID, e);
}
}
}
ListenThread lt;
public void send(byte[] buf, DatagramPacket dp) throws InterruptedException, IOException {
byte[] trimmedBuf = Arrays.copyOf(buf, dp.getLength());
bytesReceived += trimmedBuf.length;
Chunk c = new ChunkImpl(type, source, bytesReceived, trimmedBuf, UDPAdaptor.this);
dest.add(c);
}
@Override
public String parseArgs(String s) {
portno = Integer.parseInt(s);
source = "udp:"+portno;
return s;
}
@Override
public void start(long offset) throws AdaptorException {
try {
bytesReceived = offset;
ds = new DatagramSocket(portno);
portno = ds.getLocalPort();
lt = new ListenThread();
lt.start();
} catch(Exception e) {
throw new AdaptorException(e);
}
}
@Override
public String getCurrentStatus() {
return type + " " + portno;
}
@Override
public long shutdown(AdaptorShutdownPolicy shutdownPolicy)
throws AdaptorException {
try {
running = false;
ds.close();
// if(shutdownPolicy == AdaptorShutdownPolicy.GRACEFULLY)
lt.join();
} catch(InterruptedException e) {}
return bytesReceived;
}
}
| 8,208 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/RegExDirTailingAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
import org.apache.commons.io.filefilter.IOFileFilter;
import org.apache.commons.io.filefilter.RegexFileFilter;
/**
*
*/
public class RegExDirTailingAdaptor extends DirTailingAdaptor {
/*
* (non-Javadoc)
* @see org.apache.hadoop.chukwa.datacollection.adaptor.DirTailingAdaptor#getFileFilter()
*/
@Override
public IOFileFilter getFileFilter ( ) {
return new RegexFileFilter( this.wildCharacter);
}
}
| 8,209 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/SyslogAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
import java.io.IOException;
import java.net.*;
import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.HashMap;
import org.apache.hadoop.chukwa.*;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.log4j.Logger;
/**
* SyslogAdaptor reads UDP syslog message from a port and convert the message to Chukwa
* Chunk for transport from Chukwa Agent to Chukwa Collector. Usage:
*
* add SyslogAdaptor [DataType] [Port] [SequenceNumber]
*
* Syslog protocol facility name is mapped to Chukwa Data Type
* by SyslogAdaptor, hence each UDP port can support up to 24 data streams.
*
* Data Type mapping can be overwritten in Chukwa Agent Configuration file, i.e.:
*
* <property>
* <name>syslog.adaptor.port.9095.facility.LOCAL1</name>
* <value>HADOOP</value>
* </property>
*
* When demux takes place, data received on port 9095 with facility name LOCAL0 will
* be processed by demux parser for data type "HADOOP".
*/
public class SyslogAdaptor extends UDPAdaptor {
private final static Logger log = Logger.getLogger(SyslogAdaptor.class);
public enum FacilityType { KERN, USER, MAIL, DAEMON, AUTH, SYSLOG, LPR, NEWS, UUCP, CRON, AUTHPRIV, FTP, NTP, AUDIT, ALERT, CLOCK, LOCAL0, LOCAL1, LOCAL2, LOCAL3, LOCAL4, LOCAL5, LOCAL6, LOCAL7 }
public HashMap<Integer, String> facilityMap;
DatagramSocket ds;
volatile boolean running = true;
volatile long bytesReceived = 0;
public SyslogAdaptor() {
facilityMap = new HashMap<Integer, String>(FacilityType.values().length);
}
public void send(byte[] buf, DatagramPacket dp) throws InterruptedException, IOException {
StringBuilder source = new StringBuilder();
source.append(dp.getAddress());
String dataType = type;
byte[] trimmedBuf = Arrays.copyOf(buf, dp.getLength());
String rawPRI = new String(trimmedBuf, 1, 4, Charset.forName("UTF-8"));
int i = rawPRI.indexOf(">");
if (i <= 3 && i > -1) {
String priorityStr = rawPRI.substring(0,i);
int priority = 0;
int facility = 0;
try {
priority = Integer.parseInt(priorityStr);
facility = (priority >> 3) << 3;
facility = facility / 8;
dataType = facilityMap.get(facility);
} catch (NumberFormatException nfe) {
log.warn("Unsupported format detected by SyslogAdaptor:"+Arrays.toString(trimmedBuf));
}
}
bytesReceived += trimmedBuf.length;
Chunk c = new ChunkImpl(dataType, source.toString(), bytesReceived, trimmedBuf, SyslogAdaptor.this);
dest.add(c);
}
@Override
public String parseArgs(String s) {
portno = Integer.parseInt(s);
ChukwaConfiguration cc = new ChukwaConfiguration();
for(FacilityType e : FacilityType.values()) {
StringBuilder buffer = new StringBuilder();
buffer.append("syslog.adaptor.port.");
buffer.append(portno);
buffer.append(".facility.");
buffer.append(e.name());
String dataType = cc.get(buffer.toString(), e.name());
facilityMap.put(e.ordinal(), dataType);
}
return s;
}
}
| 8,210 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/Adaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
import org.apache.hadoop.chukwa.datacollection.ChunkReceiver;
import org.apache.hadoop.chukwa.datacollection.agent.AdaptorManager;
/**
* An adaptor is a component that runs within the Local Agent, producing chunks
* of monitoring data.
*
* An adaptor can, but need not, have an associated thread. If an adaptor lacks
* a thread, it needs to arrange some mechanism to periodically get control and
* send reports such as a callback somewhere.
*
* Adaptors must be able to stop and resume without losing data, using a byte
* offset in the stream.
*
* If an adaptor crashes at byte offset n, and is restarted at byte offset k,
* with k < n, it is allowed to send different values for bytes k through n the
* second time around. However, the stream must still be parseable, assuming
* that bytes 0-k come from the first run,and bytes k - n come from the second.
*
* Note that Adaptor implements neither equals() nor hashCode(). It is never
* safe to compare two adaptors with equals(). It is safe to use adaptors
* as hash table keys, though two distinct Adaptors will appear as two distinct
* keys. This is the desired behavior, since it means that messages intended
* for one Adaptor will never be received by another, even across Adaptor
* restarts.
*/
public interface Adaptor {
/**
* Start this adaptor
* @param adaptorID Adaptor ID
*
* @param type the application type, who is starting this adaptor
* @param offset the stream offset of the first byte sent by this adaptor
* @param dest Chunk receiving destination
* @throws AdaptorException if adaptor can not be started
*/
public void start(String adaptorID, String type, long offset,
ChunkReceiver dest) throws AdaptorException;
/**
* Return the adaptor's state Should not include class name or byte
* offset, which are written by caller. The datatype should, however,
* be written by this method.
*
* @return the adaptor state as a string
*/
public String getCurrentStatus();
public String getType();
/**
* Parse args, return stream name. Do not start running.
*
* Return the stream name, given params.
* The stream name is the part of the Adaptor status that's used to
* determine uniqueness.
* @param datatype Data type
* @param params Adaptor parameters
* @param c Adaptor Manager
*
* @return Stream name as a string, null if params are malformed
*/
public String parseArgs(String datatype, String params, AdaptorManager c);
/**
* Signals this adaptor to come to an orderly stop. The adaptor ought to push
* out all the data it can before exiting depending of the shutdown policy
* @param shutdownPolicy is defined as forcefully or gracefully
*
* @return the logical offset at which the adaptor was when the method return
* @throws AdaptorException Exception on shutdown
*/
public long shutdown(AdaptorShutdownPolicy shutdownPolicy) throws AdaptorException;
}
| 8,211 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/FileAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
import java.io.File;
import java.io.RandomAccessFile;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.datacollection.ChunkReceiver;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
class FileAdaptorTailer extends Thread {
static Logger log = Logger.getLogger(FileAdaptorTailer.class);
private List<FileAdaptor> adaptors = null;
/**
* How often to call each adaptor.
*/
int DEFAULT_SAMPLE_PERIOD_MS = 1000 * 10;
int SAMPLE_PERIOD_MS = DEFAULT_SAMPLE_PERIOD_MS;
public FileAdaptorTailer() {
ChukwaAgent agent = ChukwaAgent.getAgent();
if (agent != null) {
Configuration conf = agent.getConfiguration();
if (conf != null) {
SAMPLE_PERIOD_MS = conf.getInt(
"chukwaAgent.adaptor.context.switch.time",
DEFAULT_SAMPLE_PERIOD_MS);
}
}
// iterations are much more common than adding a new adaptor
adaptors = new CopyOnWriteArrayList<FileAdaptor>();
setDaemon(true);
start();// start the FileAdaptorTailer thread
}
@Override
public void run() {
while(true) {
try {
long startTime = System.currentTimeMillis();
for (FileAdaptor adaptor: adaptors) {
log.info("calling sendFile for " + adaptor.toWatch.getCanonicalPath());
adaptor.sendFile();
}
long timeToReadFiles = System.currentTimeMillis() - startTime;
if (timeToReadFiles < SAMPLE_PERIOD_MS) {
Thread.sleep(SAMPLE_PERIOD_MS);
}
}catch (Throwable e) {
log.warn("Exception in FileAdaptorTailer:",e);
}
}
}
public void addFileAdaptor(FileAdaptor adaptor) {
adaptors.add(adaptor);
}
public void removeFileAdaptor(FileAdaptor adaptor) {
adaptors.remove(adaptor);
}
}
/**
* File Adaptor push small size file in one chunk to collector
*/
public class FileAdaptor extends AbstractAdaptor {
static Logger log = Logger.getLogger(FileAdaptor.class);
static FileAdaptorTailer tailer = null;
static final int DEFAULT_TIMEOUT_PERIOD = 5*60*1000;
int TIMEOUT_PERIOD = DEFAULT_TIMEOUT_PERIOD;
static {
tailer = new FileAdaptorTailer();
}
private long startTime = 0;
private long timeOut = 0;
protected volatile boolean finished = false;
File toWatch;
protected RandomAccessFile reader = null;
protected long fileReadOffset;
protected boolean deleteFileOnClose = false;
protected boolean shutdownCalled = false;
/**
* The logical offset of the first byte of the file
*/
private long offsetOfFirstByte = 0;
public void start(long bytes) {
// in this case params = filename
log.info("adaptor id: " + adaptorID + " started file adaptor on file "
+ toWatch);
this.startTime = System.currentTimeMillis();
TIMEOUT_PERIOD = control.getConfiguration().getInt(
"chukwaAgent.adaptor.fileadaptor.timeoutperiod",
DEFAULT_TIMEOUT_PERIOD);
this.timeOut = startTime + TIMEOUT_PERIOD;
tailer.addFileAdaptor(this);
}
void sendFile() {
long now = System.currentTimeMillis() ;
long oneMinAgo = now - (60*1000);
if (toWatch.exists()) {
if (toWatch.lastModified() > oneMinAgo && now < timeOut) {
log.info("Last modified time less than one minute, keep waiting");
return;
} else {
try {
long bufSize = toWatch.length();
byte[] buf = new byte[(int) bufSize];
reader = new RandomAccessFile(toWatch, "r");
reader.read(buf);
reader.close();
reader = null;
long fileTime = toWatch.lastModified();
int bytesUsed = extractRecords(dest, 0, buf, fileTime);
this.fileReadOffset = bytesUsed;
finished = true;
deregisterAndStop();
cleanUp();
} catch(Exception e) {
log.warn("Exception while trying to read: " + toWatch.getAbsolutePath(),e);
} finally {
if (reader != null) {
try {
reader.close();
} catch (Exception e) {
log.debug(ExceptionUtil.getStackTrace(e));
}
reader = null;
}
}
}
} else {
if (now > timeOut) {
finished = true;
log.warn("Couldn't read this file: " + toWatch.getAbsolutePath());
deregisterAndStop();
cleanUp() ;
}
}
}
private void cleanUp() {
tailer.removeFileAdaptor(this);
if (reader != null) {
try {
reader.close();
} catch (Exception e) {
log.debug(ExceptionUtil.getStackTrace(e));
}
reader = null;
}
}
@Override
public long shutdown(AdaptorShutdownPolicy shutdownPolicy) {
log.info("Enter Shutdown:" + shutdownPolicy.name()+ " - ObjectId:" + this);
switch(shutdownPolicy) {
case GRACEFULLY : {
int retry = 0;
while (!finished && retry < 60) {
try {
log.info("GRACEFULLY Retry:" + retry);
Thread.sleep(1000);
retry++;
} catch (InterruptedException ex) {
}
}
}
break;
case WAIT_TILL_FINISHED : {
int retry = 0;
while (!finished) {
try {
if (retry%100 == 0) {
log.info("WAIT_TILL_FINISHED Retry:" + retry);
}
Thread.sleep(1000);
retry++;
} catch (InterruptedException ex) {
}
}
}
break;
default :
cleanUp();
break;
}
if (deleteFileOnClose && toWatch != null) {
if (log.isDebugEnabled()) {
log.debug("About to delete " + toWatch.getAbsolutePath());
}
if (toWatch.delete()) {
if (log.isInfoEnabled()) {
log.debug("Successfully deleted " + toWatch.getAbsolutePath());
}
} else {
if (log.isEnabledFor(Level.WARN)) {
log.warn("Could not delete " + toWatch.getAbsolutePath() + " (for unknown reason)");
}
}
}
log.info("Exist Shutdown:" + shutdownPolicy.name()+ " - ObjectId:" + this);
return fileReadOffset + offsetOfFirstByte;
}
public String parseArgs(String params) {
String[] words = params.split(" ");
if (words.length == 2) {
if (words[1].equals("delete")) {
deleteFileOnClose = true;
toWatch = new File(words[0]);
} else {
offsetOfFirstByte = Long.parseLong(words[0]);
toWatch = new File(words[1]);
}
} else if (words.length == 3) {
offsetOfFirstByte = Long.parseLong(words[0]);
toWatch = new File(words[1]);
deleteFileOnClose = words[2].equals("delete");
} else {
toWatch = new File(params);
}
return toWatch.getAbsolutePath();
}
/**
* Extract records from a byte sequence
*
* @param eq
* the queue to stick the new chunk[s] in
* @param buffOffsetInFile
* the byte offset in the stream at which buf[] begins
* @param buf
* the byte buffer to extract records from
* @return the number of bytes processed
* @throws InterruptedException
*/
protected int extractRecords(final ChunkReceiver eq, long buffOffsetInFile,
byte[] buf, long fileTime) throws InterruptedException {
final ChunkImpl chunk = new ChunkImpl(type, toWatch.getAbsolutePath(),
buffOffsetInFile + buf.length, buf, this);
chunk.addTag("time=\"" + fileTime + "\"");
log.info("Adding " + toWatch.getAbsolutePath() + " to the queue");
eq.add(chunk);
log.info( toWatch.getAbsolutePath() + " added to the queue");
return buf.length;
}
@Override
public String getCurrentStatus() {
return type.trim() + " " + offsetOfFirstByte + " " + toWatch.getPath();
}
}
| 8,212 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/NotifyOnCommitAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
public interface NotifyOnCommitAdaptor extends Adaptor {
abstract void committed(long commitedByte);
}
| 8,213 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/AdaptorShutdownPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
public enum AdaptorShutdownPolicy {
HARD_STOP,GRACEFULLY,WAIT_TILL_FINISHED,RESTARTING;
public String toString() {
if(this.equals(GRACEFULLY))
return "Gracefully";
else if(this.equals(HARD_STOP))
return "Abruptly";
else if(this.equals(WAIT_TILL_FINISHED))
return "Once finished";
else if(this.equals(RESTARTING))
return "Prepare to restart";
else
return "unknown mode";
}
}
| 8,214 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/OozieAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
import java.io.IOException;
import java.nio.charset.Charset;
import java.security.PrivilegedExceptionAction;
import java.util.Calendar;
import java.util.TimeZone;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.util.ChukwaUtil;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.chukwa.util.RestUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Logger;
public class OozieAdaptor extends AbstractAdaptor {
private static Logger log = Logger.getLogger(OozieAdaptor.class);
private String uri;
private long sendOffset;
private Configuration chukwaConfiguration = null;
private static UserGroupInformation UGI = null;
private boolean isKerberosEnabled = false;
private int length = 0;
private final ScheduledExecutorService scheduler = Executors
.newScheduledThreadPool(1);
private static final long initialDelay = 60; // seconds
private long periodicity = 60; // seconds
private ScheduledFuture<?> scheduledCollectorThread;
@Override
public String parseArgs(String s) {
String[] tokens = s.split(" ");
if (tokens.length == 2) {
uri = tokens[0];
try {
periodicity = Integer.parseInt(tokens[1]);
} catch (NumberFormatException e) {
log.warn("OozieAdaptor: incorrect argument for period. Expecting number");
return null;
}
} else {
log.warn("bad syntax in OozieAdaptor args");
return null;
}
return s;
}
@Override
public void start(long offset) throws AdaptorException {
sendOffset = offset;
init(); // initialize the configuration
log.info("Starting Oozie Adaptor with [ " + sendOffset + " ] offset");
scheduledCollectorThread = scheduler.scheduleAtFixedRate(
new OozieMetricsCollector(), initialDelay, periodicity,
TimeUnit.SECONDS);
log.info("scheduled");
}
@Override
public String getCurrentStatus() {
StringBuilder buffer = new StringBuilder();
buffer.append(type);
buffer.append(" ");
buffer.append(uri);
buffer.append(" ");
buffer.append(periodicity);
return buffer.toString();
}
@Override
public long shutdown(AdaptorShutdownPolicy shutdownPolicy)
throws AdaptorException {
scheduledCollectorThread.cancel(true);
scheduler.shutdown();
return sendOffset;
}
private class OozieMetricsCollector implements Runnable {
@Override
public void run() {
try {
if (isKerberosEnabled) {
if (UGI == null) {
throw new IllegalStateException("UGI Login context is null");
}
UGI.checkTGTAndReloginFromKeytab();
length = UGI.doAs(new PrivilegedExceptionAction<Integer>() {
@Override
public Integer run() throws Exception {
return processMetrics();
}
});
} else {
length = processMetrics();
}
if (length <= 0) {
log.warn("Oozie is either not responding or sending zero payload");
} else {
log.info("Processing a oozie instrumentation payload of [" + length
+ "] bytes");
}
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
log.error("Exception occured while getting oozie metrics " + e);
}
}
}
private void init() {
if (getChukwaConfiguration() == null) {
setChukwaConfiguration(ChukwaUtil.readConfiguration());
}
String authType = getChukwaConfiguration().get(
"chukwaAgent.hadoop.authentication.type");
if (authType != null && authType.equalsIgnoreCase("kerberos")) {
login(); // get the UGI context
isKerberosEnabled = true;
}
}
private void login() {
try {
String principalConfig = getChukwaConfiguration().get(
"chukwaAgent.hadoop.authentication.kerberos.principal",
System.getProperty("user.name"));
String hostname = null;
String principalName = SecurityUtil.getServerPrincipal(principalConfig,
hostname);
UGI = UserGroupInformation.loginUserFromKeytabAndReturnUGI(
principalName,
getChukwaConfiguration().get(
"chukwaAgent.hadoop.authentication.kerberos.keytab"));
} catch (IOException e) {
log.error(ExceptionUtil.getStackTrace(e));
}
}
private int processMetrics() {
return addChunkToReceiver(getOozieMetrics().getBytes(Charset.forName("UTF-8")));
}
private String getOozieMetrics() {
return RestUtil.getResponseAsString(uri);
}
public int addChunkToReceiver(byte[] data) {
try {
sendOffset += data.length;
ChunkImpl c = new ChunkImpl(type, "REST", sendOffset, data, this);
long rightNow = Calendar.getInstance(TimeZone.getTimeZone("UTC"))
.getTimeInMillis();
c.addTag("timeStamp=\"" + rightNow + "\"");
dest.add(c);
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
}
return data.length;
}
public Configuration getChukwaConfiguration() {
return chukwaConfiguration;
}
public void setChukwaConfiguration(Configuration chukwaConfiguration) {
this.chukwaConfiguration = chukwaConfiguration;
}
}
| 8,215 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/sigar/SystemMetrics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Adaptor that is able to collect system metrics by using Hyperic Sigar.
* <P>
* This adaptor is added to an Agent like so:
* <code>
* add SystemMetrics [dataType] [seconds]
* </code>
* <ul>
* <li><code>dataType</code> - The chukwa data type, use SystemMetrics to map to
* default SystemMetrics demux parser.</li>
* <li><code>seconds</code> - Interval to collect system metrics, default is 60 seconds.</li>
* </ul>
* </P>
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.sigar;
import java.util.Timer;
import org.apache.hadoop.chukwa.datacollection.adaptor.AbstractAdaptor;
import org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorException;
import org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorShutdownPolicy;
import org.apache.log4j.Logger;
public class SystemMetrics extends AbstractAdaptor {
static Logger log = Logger.getLogger(SystemMetrics.class);
private long period = 5 * 1000;
private SigarRunner runner;
private Timer timer;
@Override
public String parseArgs(String args) {
int spOffset = args.indexOf(' ');
if (spOffset > 0) {
try {
period = Long.parseLong(args.substring(0, spOffset));
period = period * 1000;
start(spOffset);
} catch (NumberFormatException e) {
StringBuilder buffer = new StringBuilder();
buffer.append("SystemMetrics: sample interval ");
buffer.append(args.substring(0, spOffset));
buffer.append(" can't be parsed.");
log.warn(buffer.toString());
} catch (AdaptorException e) {
log.warn("Error parsing parameter for SystemMetrics adaptor.");
}
}
return args;
}
@Override
public void start(long offset) throws AdaptorException {
if(timer == null) {
timer = new Timer();
runner = new SigarRunner(dest, SystemMetrics.this);
}
timer.scheduleAtFixedRate(runner, 0, period);
}
@Override
public String getCurrentStatus() {
StringBuilder buffer = new StringBuilder();
buffer.append(type);
buffer.append(" ");
buffer.append(period/1000);
return buffer.toString();
}
@Override
public long shutdown(AdaptorShutdownPolicy shutdownPolicy)
throws AdaptorException {
timer.cancel();
return 0;
}
}
| 8,216 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/sigar/SigarRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.sigar;
import java.nio.charset.Charset;
import java.util.HashMap;
import java.util.TimerTask;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.datacollection.ChunkReceiver;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.log4j.Logger;
import org.hyperic.sigar.CpuInfo;
import org.hyperic.sigar.CpuPerc;
import org.hyperic.sigar.FileSystem;
import org.hyperic.sigar.FileSystemUsage;
import org.hyperic.sigar.Mem;
import org.hyperic.sigar.SigarException;
import org.hyperic.sigar.Swap;
import org.hyperic.sigar.NetInterfaceStat;
import org.hyperic.sigar.Sigar;
import org.hyperic.sigar.Uptime;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
/**
* TimerTask for collect system metrics from Hyperic Sigar.
*/
public class SigarRunner extends TimerTask {
private static Sigar sigar = new Sigar();
private static Logger log = Logger.getLogger(SigarRunner.class);
private ChunkReceiver receiver = null;
private long sendOffset = 0;
private SystemMetrics systemMetrics;
private HashMap<String, JSONObject> previousNetworkStats = new HashMap<String, JSONObject>();
private HashMap<String, JSONObject> previousDiskStats = new HashMap<String, JSONObject>();
public SigarRunner(ChunkReceiver dest, SystemMetrics systemMetrics) {
receiver = dest;
this.systemMetrics = systemMetrics;
}
@SuppressWarnings("unchecked")
@Override
public void run() {
boolean skip = false;
CpuInfo[] cpuinfo = null;
CpuPerc[] cpuPerc = null;
Mem mem = null;
Swap swap = null;
FileSystem[] fs = null;
String[] netIf = null;
Uptime uptime = null;
double[] loadavg = null;
JSONObject json = new JSONObject();
try {
// CPU utilization
JSONArray load = new JSONArray();
try {
cpuinfo = sigar.getCpuInfoList();
cpuPerc = sigar.getCpuPercList();
JSONArray cpuList = new JSONArray();
for (int i = 0; i < cpuinfo.length; i++) {
JSONObject cpuMap = new JSONObject();
cpuMap.putAll(cpuinfo[i].toMap());
cpuMap.put("combined", cpuPerc[i].getCombined() * 100);
cpuMap.put("user", cpuPerc[i].getUser() * 100);
cpuMap.put("sys", cpuPerc[i].getSys() * 100);
cpuMap.put("idle", cpuPerc[i].getIdle() * 100);
cpuMap.put("wait", cpuPerc[i].getWait() * 100);
cpuMap.put("nice", cpuPerc[i].getNice() * 100);
cpuMap.put("irq", cpuPerc[i].getIrq() * 100);
cpuList.add(cpuMap);
}
sigar.getCpuPerc();
json.put("cpu", cpuList);
// Uptime
uptime = sigar.getUptime();
json.put("uptime", uptime.getUptime());
// Load Average
loadavg = sigar.getLoadAverage();
load.add(loadavg[0]);
load.add(loadavg[1]);
load.add(loadavg[2]);
} catch(SigarException se) {
log.error("SigarException caused during collection of CPU utilization");
log.error(ExceptionUtils.getStackTrace(se));
} finally {
json.put("loadavg", load);
}
// Memory Utilization
JSONObject memMap = new JSONObject();
JSONObject swapMap = new JSONObject();
try {
mem = sigar.getMem();
memMap.putAll(mem.toMap());
// Swap Utilization
swap = sigar.getSwap();
swapMap.putAll(swap.toMap());
} catch(SigarException se){
log.error("SigarException caused during collection of Memory utilization");
log.error(ExceptionUtils.getStackTrace(se));
} finally {
json.put("memory", memMap);
json.put("swap", swapMap);
}
// Network Utilization
JSONArray netInterfaces = new JSONArray();
try {
netIf = sigar.getNetInterfaceList();
for (int i = 0; i < netIf.length; i++) {
NetInterfaceStat net = new NetInterfaceStat();
try {
net = sigar.getNetInterfaceStat(netIf[i]);
} catch(SigarException e){
// Ignore the exception when trying to stat network interface
log.warn("SigarException trying to stat network device "+netIf[i]);
continue;
}
JSONObject netMap = new JSONObject();
netMap.putAll(net.toMap());
if(previousNetworkStats.containsKey(netIf[i])) {
JSONObject deltaMap = previousNetworkStats.get(netIf[i]);
deltaMap.put("RxBytes", Long.parseLong(netMap.get("RxBytes").toString()) - Long.parseLong(deltaMap.get("RxBytes").toString()));
deltaMap.put("RxDropped", Long.parseLong(netMap.get("RxDropped").toString()) - Long.parseLong(deltaMap.get("RxDropped").toString()));
deltaMap.put("RxErrors", Long.parseLong(netMap.get("RxErrors").toString()) - Long.parseLong(deltaMap.get("RxErrors").toString()));
deltaMap.put("RxPackets", Long.parseLong(netMap.get("RxPackets").toString()) - Long.parseLong(deltaMap.get("RxPackets").toString()));
deltaMap.put("TxBytes", Long.parseLong(netMap.get("TxBytes").toString()) - Long.parseLong(deltaMap.get("TxBytes").toString()));
deltaMap.put("TxCollisions", Long.parseLong(netMap.get("TxCollisions").toString()) - Long.parseLong(deltaMap.get("TxCollisions").toString()));
deltaMap.put("TxErrors", Long.parseLong(netMap.get("TxErrors").toString()) - Long.parseLong(deltaMap.get("TxErrors").toString()));
deltaMap.put("TxPackets", Long.parseLong(netMap.get("TxPackets").toString()) - Long.parseLong(deltaMap.get("TxPackets").toString()));
netInterfaces.add(deltaMap);
skip = false;
} else {
netInterfaces.add(netMap);
skip = true;
}
previousNetworkStats.put(netIf[i], netMap);
}
} catch(SigarException se){
log.error("SigarException caused during collection of Network utilization");
log.error(ExceptionUtils.getStackTrace(se));
} finally {
json.put("network", netInterfaces);
}
// Filesystem Utilization
JSONArray fsList = new JSONArray();
try {
fs = sigar.getFileSystemList();
for (int i = 0; i < fs.length; i++) {
FileSystemUsage usage = sigar.getFileSystemUsage(fs[i].getDirName());
JSONObject fsMap = new JSONObject();
fsMap.putAll(fs[i].toMap());
fsMap.put("ReadBytes", usage.getDiskReadBytes());
fsMap.put("Reads", usage.getDiskReads());
fsMap.put("WriteBytes", usage.getDiskWriteBytes());
fsMap.put("Writes", usage.getDiskWrites());
if(previousDiskStats.containsKey(fs[i].getDevName())) {
JSONObject deltaMap = previousDiskStats.get(fs[i].getDevName());
deltaMap.put("ReadBytes", usage.getDiskReadBytes() - (Long) deltaMap.get("ReadBytes"));
deltaMap.put("Reads", usage.getDiskReads() - (Long) deltaMap.get("Reads"));
deltaMap.put("WriteBytes", usage.getDiskWriteBytes() - (Long) deltaMap.get("WriteBytes"));
deltaMap.put("Writes", usage.getDiskWrites() - (Long) deltaMap.get("Writes"));
deltaMap.put("Total", usage.getTotal());
deltaMap.put("Used", usage.getUsed());
deltaMap.putAll(fs[i].toMap());
fsList.add(deltaMap);
skip = false;
} else {
fsList.add(fsMap);
skip = true;
}
previousDiskStats.put(fs[i].getDevName(), fsMap);
}
} catch(SigarException se){
log.error("SigarException caused during collection of FileSystem utilization");
log.error(ExceptionUtils.getStackTrace(se));
} finally {
json.put("disk", fsList);
}
json.put("timestamp", System.currentTimeMillis());
byte[] data = json.toString().getBytes(Charset.forName("UTF-8"));
sendOffset += data.length;
ChunkImpl c = new ChunkImpl("SystemMetrics", "Sigar", sendOffset, data, systemMetrics);
if(!skip) {
receiver.add(c);
}
} catch (InterruptedException se) {
log.error(ExceptionUtil.getStackTrace(se));
}
}
}
| 8,217 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/heartbeat/StatusCheckerException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.heartbeat;
public class StatusCheckerException extends Exception {
private static final long serialVersionUID = -1039172824878846049L;
public StatusCheckerException() {
super();
}
public StatusCheckerException(String arg0, Throwable arg1) {
super(arg0, arg1);
}
public StatusCheckerException(String arg0) {
super(arg0);
}
public StatusCheckerException(Throwable arg0) {
super(arg0);
}
}
| 8,218 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/heartbeat/ChukwaStatusChecker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.heartbeat;
import java.net.InetAddress;
import java.net.UnknownHostException;
import org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorException;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.json.simple.JSONObject;
public class ChukwaStatusChecker implements StatusChecker {
JSONObject status = new JSONObject();
ChukwaAgent agent;
@SuppressWarnings("unchecked")
public ChukwaStatusChecker() throws AdaptorException{
agent = ChukwaAgent.getAgent();
status.put("component", "Chukwa.Agent");
try {
status.put("host", InetAddress.getLocalHost().getHostName());
} catch (UnknownHostException e) {
throw new AdaptorException("Could not get localhost name", e.getCause());
}
}
@SuppressWarnings("unchecked")
@Override
public JSONObject getStatus() {
status.put("adaptor.count", agent.getAdaptorList().size());
return status;
}
@Override
public void init(String... args) {
//not used
}
}
| 8,219 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/heartbeat/StatusChecker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.heartbeat;
import org.json.simple.JSONObject;
/**
* Any service status being sent through HeartbeatAdaptor should
* implement this interface
*/
public interface StatusChecker {
public void init(String... args) throws StatusCheckerException;
public JSONObject getStatus();
}
| 8,220 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/heartbeat/HttpStatusChecker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.heartbeat;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
import org.apache.log4j.Logger;
import org.json.simple.JSONObject;
/**
* Check the status through http interface. Takes the component name to be included ion
* the status and the uri as the arguments.
*
*/
public class HttpStatusChecker implements StatusChecker {
private String componentName, uri;
private JSONObject status = new JSONObject();
Logger log = Logger.getLogger(HttpStatusChecker.class);
@SuppressWarnings("unchecked")
@Override
public void init(String... args) throws StatusCheckerException {
if(args.length != 2){
throw new StatusCheckerException("Insufficient number of arguments for HttpStatusChecker");
}
componentName = args[0];
uri = args[1];
status.put("component", componentName);
status.put("uri", uri);
}
@SuppressWarnings("unchecked")
@Override
public JSONObject getStatus() {
HttpURLConnection connection = null;
try{
URL url = new URL(uri);
connection = (HttpURLConnection)url.openConnection();
connection.connect();
status.put("status", "running");
} catch (IOException e) {
status.put("status", "stopped");
} finally {
if(connection != null){
connection.disconnect();
}
}
return status;
}
}
| 8,221 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/CharFileTailingAdaptorUTF8.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.filetailer;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.datacollection.ChunkReceiver;
import java.util.ArrayList;
/**
* A subclass of FileTailingAdaptor that reads UTF8/ascii files and splits
* records at carriage returns.
*
*/
public class CharFileTailingAdaptorUTF8 extends FileTailingAdaptor {
private static final char SEPARATOR = '\n';
private ArrayList<Integer> offsets = new ArrayList<Integer>();
/**
*
* Note: this method uses a temporary ArrayList (shared across instances).
* This means we're copying ints each time. This could be a performance issue.
* Also, 'offsets' never shrinks, and will be of size proportional to the
* largest number of lines ever seen in an event.
*/
@Override
protected int extractRecords(ChunkReceiver eq, long buffOffsetInFile,
byte[] buf) throws InterruptedException {
for (int i = 0; i < buf.length; ++i) {
if (buf[i] == SEPARATOR) {
offsets.add(i);
}
}
if (offsets.size() > 0) {
int[] offsets_i = new int[offsets.size()];
for (int i = 0; i < offsets_i.length; ++i)
offsets_i[i] = offsets.get(i);
int bytesUsed = offsets_i[offsets_i.length - 1] + 1; // char at last
// offset uses a byte
assert bytesUsed > 0 : " shouldn't send empty events";
ChunkImpl event = new ChunkImpl(type, toWatch.getAbsolutePath(),
buffOffsetInFile + bytesUsed, buf, this);
event.setRecordOffsets(offsets_i);
eq.add(event);
offsets.clear();
return bytesUsed;
} else
return 0;
}
}
| 8,222 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/RCheckFTAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.filetailer;
import java.io.File;
import java.io.FileFilter;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.Collections;
import java.util.LinkedList;
import org.apache.commons.lang3.builder.HashCodeBuilder;
/**
* Checkpoint state:
* date modified of most-recently tailed file, offset of first byte of that file,
* then regular FTA arts
*
*/
public class RCheckFTAdaptor extends LWFTAdaptor implements FileFilter {
private static class FPair implements Comparable<FPair> {
File f;
long mod;
FPair(File f) {
this.f = f;
mod = f.lastModified();
}
/**
* -1 implies this is LESS THAN o
*/
@Override
public int compareTo(FPair o) {
if(mod < o.mod)
return -1;
else if (mod > o.mod)
return 1;
//want toWatch to be last after a rotation; otherwise, this is basically
//just a heuristic that hasn't been tuned yet
else return (o.f.getName().compareTo(f.getName()));
}
@Override
public boolean equals(Object o) {
if(o instanceof FPair) {
return mod == ((FPair) o).mod;
} else {
return false;
}
}
@Override
public int hashCode() {
return new HashCodeBuilder(643, 1321).
append(this.mod).
toHashCode();
}
}
long prevFileLastModDate = 0;
LinkedList<FPair> fileQ = new LinkedList<FPair>();
String fBaseName;
File cur; //this is the actual physical file being watched.
// in contrast, toWatch is the path specified by the user
boolean caughtUp = false;
/**
* Check for date-modified and offset; if absent assume we just got a name.
*/
@Override
public String parseArgs(String params) {
Pattern cmd = Pattern.compile("d:(\\d+)\\s+(\\d+)\\s+(.+)\\s?");
Matcher m = cmd.matcher(params);
if (m.matches()) {
prevFileLastModDate = Long.parseLong(m.group(1));
offsetOfFirstByte = Long.parseLong(m.group(2));
toWatch = new File(m.group(3)).getAbsoluteFile();
} else {
toWatch = new File(params.trim()).getAbsoluteFile();
}
fBaseName = toWatch.getName();
return toWatch.getAbsolutePath();
}
public String getCurrentStatus() {
return type.trim() + " d:" + prevFileLastModDate + " " + offsetOfFirstByte + " " + toWatch.getPath();
}
@Override
public boolean accept(File pathname) {
return pathname.getName().startsWith(fBaseName) &&
( pathname.getName().equals(fBaseName) ||
pathname.lastModified() > prevFileLastModDate);
}
protected void mkFileQ() {
File toWatchDir = toWatch.getParentFile();
File[] candidates = toWatchDir.listFiles(this);
if(candidates == null) {
log.error(toWatchDir + " is not a directory in "+adaptorID);
} else {
log.debug("saw " + candidates.length + " files matching pattern");
fileQ = new LinkedList<FPair>();
for(File f:candidates)
fileQ.add(new FPair(f));
Collections.sort(fileQ);
}
}
protected void advanceQ() {
FPair next = fileQ.poll();
if(next != null) {
cur = next.f;
caughtUp = toWatch.equals(cur);
if(caughtUp && !fileQ.isEmpty())
log.warn("expected rotation queue to be empty when caught up...");
}
else {
cur = null;
caughtUp = true;
}
}
@Override
public void start(long offset) {
mkFileQ(); //figure out what to watch
advanceQ();
super.start(offset);
}
@Override
public boolean tailFile()
throws InterruptedException {
boolean hasMoreData = false;
try {
if(caughtUp) {
//we're caught up and watching an unrotated file
mkFileQ(); //figure out what to watch
advanceQ();
}
if(cur == null) //file we're watching doesn't exist
return false;
long len = cur.length();
long tsPreTail = cur.exists() ? cur.lastModified() : prevFileLastModDate;
if(log.isDebugEnabled())
log.debug(adaptorID + " treating " + cur + " as " + toWatch + " len = " + len);
if(len < fileReadOffset) {
log.info("file "+ cur +" shrank from " + fileReadOffset + " to " + len);
//no unseen changes to prev version, since mod date is older than last scan.
offsetOfFirstByte += fileReadOffset;
fileReadOffset = 0;
} else if(len > fileReadOffset) {
log.debug("slurping from " + cur+ " at offset " + fileReadOffset);
RandomAccessFile reader = new RandomAccessFile(cur, "r");
slurp(len, reader);
reader.close();
} else {
//we're either caught up or at EOF
if (!caughtUp) {
prevFileLastModDate = cur.lastModified();
//Hit EOF on an already-rotated file. Move on!
offsetOfFirstByte += fileReadOffset;
fileReadOffset = 0;
advanceQ();
log.debug("not caught up, and hit EOF. Moving forward in queue to " + cur);
} else
prevFileLastModDate = tsPreTail;
}
} catch(IOException e) {
log.warn("IOException in "+adaptorID, e);
deregisterAndStop();
}
return hasMoreData;
}
public String toString() {
return "Rotation-aware Tailer on " + toWatch;
}
}
| 8,223 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/CharFileTailingAdaptorUTF8NewLineEscaped.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.filetailer;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.datacollection.ChunkReceiver;
import org.apache.hadoop.chukwa.util.RecordConstants;
import java.util.ArrayList;
import java.util.Arrays;
/**
* A subclass of FileTailingAdaptor that reads UTF8/ascii files and splits
* records at non-escaped carriage returns
*
*/
public class CharFileTailingAdaptorUTF8NewLineEscaped extends
FileTailingAdaptor {
private static final char SEPARATOR = '\n';
private ArrayList<Integer> offsets = new ArrayList<Integer>();
/**
*
* Note: this method uses a temporary ArrayList (shared across instances).
* This means we're copying ints each time. This could be a performance issue.
* Also, 'offsets' never shrinks, and will be of size proportional to the
* largest number of lines ever seen in an event.
*/
@Override
protected int extractRecords(ChunkReceiver eq, long buffOffsetInFile,
byte[] buf) throws InterruptedException {
String es = RecordConstants.RECORD_SEPARATOR_ESCAPE_SEQ;
for (int i = 0; i < buf.length; ++i) {
// if this is a separator
if (buf[i] == SEPARATOR) {
// if possibly preceded by escape sequence (avoid outOfBounds here)
boolean escaped = false; // was it escaped?
if (i - es.length() >= 0) {
escaped = true; // maybe (at least there was room for an escape
// sequence, so let's check for the e.s.)
for (int j = 0; j < es.length(); j++) {
if (buf[i - es.length() + j] != es.charAt(j)) {
escaped = false;
}
}
}
if (!escaped) {
offsets.add(i);
}
}
}
if (offsets.size() > 0) {
int[] offsets_i = new int[offsets.size()];
for (int i = 0; i < offsets.size(); i++) {
try {
offsets_i[i] = offsets.get(i);
} catch(NullPointerException e) {
// Skip offsets 0 where it can be null.
}
}
// make the stream unique to this adaptor
int bytesUsed = 0;
if(buf.length==offsets_i[offsets_i.length -1]) {
// If Separator is last character of stream,
// send the record.
bytesUsed = offsets_i[offsets_i.length - 2] + 1;
} else {
// If the last record is partial read,
// truncate the record to the n -1 new line.
bytesUsed = offsets_i[offsets_i.length - 1] + 1; // char at last
}
// offset uses a byte
assert bytesUsed > 0 : " shouldn't send empty events";
ChunkImpl chunk = new ChunkImpl(type, toWatch.getAbsolutePath(),
buffOffsetInFile + bytesUsed, Arrays.copyOf(buf, bytesUsed), this);
chunk.setSeqID(buffOffsetInFile + bytesUsed);
chunk.setRecordOffsets(offsets_i);
eq.add(chunk);
offsets.clear();
return bytesUsed;
} else
return 0;
}
public String toString() {
return "escaped newline CFTA-UTF8";
}
}
| 8,224 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/TerminatorThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.filetailer;
import org.apache.log4j.Logger;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
public class TerminatorThread extends Thread {
private static Logger log = Logger.getLogger(TerminatorThread.class);
private FileTailingAdaptor adaptor = null;
public TerminatorThread(FileTailingAdaptor adaptor) {
this.adaptor = adaptor;
}
public void run() {
long endTime = System.currentTimeMillis() + (10 * 60 * 1000); // now + 10
// mins
int count = 0;
log.info("Terminator thread started." + adaptor.toWatch.getPath());
try {
while (adaptor.tailFile()) {
if (log.isDebugEnabled()) {
log.debug("Terminator thread:" + adaptor.toWatch.getPath()
+ " still working");
}
long now = System.currentTimeMillis();
if (now > endTime) {
log.warn("TerminatorThread should have been finished by now! count="
+ count);
count++;
endTime = System.currentTimeMillis() + (10 * 60 * 1000); // now + 10
// mins
if (count > 3) {
log.warn("TerminatorThread should have been finished by now, stopping it now! count="
+ count);
break;
}
}
}
} catch (InterruptedException e) {
log.info("InterruptedException on Terminator thread:"
+ adaptor.toWatch.getPath(), e);
} catch (Throwable e) {
log.warn("Exception on Terminator thread:" + adaptor.toWatch.getPath(),
e);
}
log.info("Terminator thread finished." + adaptor.toWatch.getPath());
try {
adaptor.reader.close();
} catch (Throwable ex) {
log.debug(ExceptionUtil.getStackTrace(ex));
}
}
}
| 8,225 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/FileTailingAdaptorPreserveLines.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.filetailer;
import java.util.Arrays;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.datacollection.ChunkReceiver;
/**
* A subclass of FileTailingAdaptor that reads UTF8/ascii files and only send
* chunks with complete lines.
*/
public class FileTailingAdaptorPreserveLines extends FileTailingAdaptor {
private static final char SEPARATOR = '\n';
@Override
protected int extractRecords(ChunkReceiver eq, long buffOffsetInFile,
byte[] buf) throws InterruptedException {
int lastNewLineOffset = 0;
for (int i = buf.length - 1; i >= 0; --i) {
if (buf[i] == SEPARATOR) {
lastNewLineOffset = i;
break;
}
}
if (lastNewLineOffset > 0) {
int[] offsets_i = { lastNewLineOffset };
int bytesUsed = lastNewLineOffset + 1; // char at last
// offset uses a byte
assert bytesUsed > 0 : " shouldn't send empty events";
ChunkImpl event = new ChunkImpl(type, toWatch.getAbsolutePath(),
buffOffsetInFile + bytesUsed, Arrays.copyOf(buf, bytesUsed), this);
event.setRecordOffsets(offsets_i);
eq.add(event);
return bytesUsed;
} else
return 0;
}
}
| 8,226 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/FileTailer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.filetailer;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
/**
* A shared thread used by all FileTailingAdaptors.
*
* For now, it tries each file in succession. If it gets through every file
* within two seconds, and no more data remains, it will sleep.
*
* If there was still data available in any file, the adaptor will loop again.
*
*/
class FileTailer extends Thread {
static Logger log = Logger.getLogger(FileTailer.class);
private List<LWFTAdaptor> adaptors;
private volatile boolean isRunning = true;
// ChunkQueue eq; // not private -- useful for file tailing adaptor classes
/**
* How often to tail each file.
*/
int DEFAULT_SAMPLE_PERIOD_MS = 1000 * 2;
int SAMPLE_PERIOD_MS = DEFAULT_SAMPLE_PERIOD_MS;
// private Configuration conf = null;
public static final int MAX_SAMPLE_PERIOD = 60 * 1000;
FileTailer(Configuration conf) {
// this.conf = conf;
SAMPLE_PERIOD_MS = conf.getInt(
"chukwaAgent.adaptor.context.switch.time",
DEFAULT_SAMPLE_PERIOD_MS);
// eq = DataFactory.getInstance().getEventQueue();
// iterations are much more common than adding a new adaptor
adaptors = new CopyOnWriteArrayList<LWFTAdaptor>();
this.setDaemon(true);
start();// start the file-tailing thread
}
// called by FileTailingAdaptor, only
void startWatchingFile(LWFTAdaptor f) {
adaptors.add(f);
}
// called by FileTailingAdaptor, only
void stopWatchingFile(LWFTAdaptor f) {
adaptors.remove(f);
}
public void run() {
while (isRunning) {
try {
boolean shouldISleep = true;
long startTime = System.currentTimeMillis();
for (LWFTAdaptor f : adaptors) {
boolean hasMoreData = f.tailFile();
shouldISleep &= !hasMoreData;
}
long timeToReadFiles = System.currentTimeMillis() - startTime;
if(timeToReadFiles > MAX_SAMPLE_PERIOD)
log.warn("took " + timeToReadFiles + " ms to check all files being tailed");
if (timeToReadFiles < SAMPLE_PERIOD_MS && shouldISleep) {
Thread.sleep(SAMPLE_PERIOD_MS);
}
} catch (Throwable e) {
log.warn("Exception in FileTailer, while loop", e);
e.printStackTrace();
}
}
}
}
| 8,227 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/LWFTAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.filetailer;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.datacollection.ChunkReceiver;
import org.apache.hadoop.chukwa.datacollection.adaptor.AbstractAdaptor;
import org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorException;
import org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorShutdownPolicy;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
/**
* A base class for file tailing adaptors.
* Intended to mandate as little policy as possible, and to use as
* few system resources as possible.
*
*
* If the file does not exist, this class will continue to retry quietly
* forever and will start tailing if it's eventually created.
*/
public class LWFTAdaptor extends AbstractAdaptor {
/**
* This is the maximum amount we'll read from any one file before moving on to
* the next. This way, we get quick response time for other files if one file
* is growing rapidly.
*
*/
public static final int DEFAULT_MAX_READ_SIZE = 128 * 1024;
public static final String MAX_READ_SIZE_OPT =
"chukwaAgent.fileTailingAdaptor.maxReadSize";
int MAX_READ_SIZE = DEFAULT_MAX_READ_SIZE;
static Logger log;
static FileTailer tailer;
static {
tailer = null;
log = Logger.getLogger(FileTailingAdaptor.class);
}
/**
* next PHYSICAL offset to read
*/
protected long fileReadOffset;
/**
* The logical offset of the first byte of the file
*/
protected long offsetOfFirstByte = 0;
protected Configuration conf = null;
/**
* The timestamp of last slurping.
*/
protected long lastSlurpTime = 0l;
File toWatch;
@Override
public void start(long offset) {
synchronized(LWFTAdaptor.class) {
if(tailer == null)
tailer = new FileTailer(control.getConfiguration());
}
this.fileReadOffset = offset - offsetOfFirstByte;
tailer.startWatchingFile(this);
}
/**
* @see org.apache.hadoop.chukwa.datacollection.adaptor.Adaptor#getCurrentStatus()
*/
public String getCurrentStatus() {
return type.trim() + " " + offsetOfFirstByte + " " + toWatch.getPath();
}
public String toString() {
return "Lightweight Tailer on " + toWatch;
}
public String getStreamName() {
return toWatch.getPath();
}
@Override
public String parseArgs(String params) {
conf = control.getConfiguration();
MAX_READ_SIZE = conf.getInt(MAX_READ_SIZE_OPT, DEFAULT_MAX_READ_SIZE);
Pattern cmd = Pattern.compile("(\\d+)\\s+(.+)\\s?");
Matcher m = cmd.matcher(params);
if (m.matches()) { //check for first-byte offset. If absent, assume we just got a path.
offsetOfFirstByte = Long.parseLong(m.group(1));
toWatch = new File(m.group(2));
} else {
toWatch = new File(params.trim());
}
return toWatch.getAbsolutePath();
}
@Override
public long shutdown(AdaptorShutdownPolicy shutdownPolicy)
throws AdaptorException {
tailer.stopWatchingFile(this);
return fileReadOffset + offsetOfFirstByte;
}
/**
* Extract records from a byte sequence
*
* @param eq the queue to stick the new chunk[s] in
* @param buffOffsetInFile the byte offset in the stream at which buf[] begins
* @param buf the byte buffer to extract records from
* @return the number of bytes processed
* @throws InterruptedException
*/
protected int extractRecords(ChunkReceiver eq, long buffOffsetInFile,
byte[] buf) throws InterruptedException {
if(buf.length == 0)
return 0;
ChunkImpl chunk = new ChunkImpl(type, toWatch.getAbsolutePath(),
buffOffsetInFile + buf.length, buf, this);
eq.add(chunk);
return buf.length;
}
protected boolean slurp(long len, RandomAccessFile reader) throws IOException,
InterruptedException{
boolean hasMoreData = false;
log.debug("Adaptor|" + adaptorID + "|seeking|" + fileReadOffset);
reader.seek(fileReadOffset);
long bufSize = len - fileReadOffset;
if (bufSize > MAX_READ_SIZE) {
bufSize = MAX_READ_SIZE;
hasMoreData = true;
}
byte[] buf = new byte[(int) bufSize];
long curOffset = fileReadOffset;
lastSlurpTime = System.currentTimeMillis();
int bufferRead = reader.read(buf);
assert reader.getFilePointer() == fileReadOffset + bufSize : " event size arithmetic is broken: "
+ " pointer is "
+ reader.getFilePointer()
+ " but offset is "
+ fileReadOffset + bufSize;
int bytesUsed = extractRecords(dest,
fileReadOffset + offsetOfFirstByte, buf);
// === WARNING ===
// If we couldn't found a complete record AND
// we cannot read more, i.e bufferRead == MAX_READ_SIZE
// it's because the record is too BIG
// So log.warn, and drop current buffer so we can keep moving
// instead of being stopped at that point for ever
if (bytesUsed == 0 && bufferRead == MAX_READ_SIZE) {
log.warn("bufferRead == MAX_READ_SIZE AND bytesUsed == 0, dropping current buffer: startOffset="
+ curOffset
+ ", MAX_READ_SIZE="
+ MAX_READ_SIZE
+ ", for "
+ toWatch.getPath());
bytesUsed = buf.length;
}
fileReadOffset = fileReadOffset + bytesUsed;
log.debug("Adaptor|" + adaptorID + "|start|" + curOffset + "|end|"
+ fileReadOffset);
return hasMoreData;
}
public boolean tailFile()
throws InterruptedException {
boolean hasMoreData = false;
try {
//if file doesn't exist, length =0 and we just keep waiting for it.
//if(!toWatch.exists())
// deregisterAndStop(false);
long len = toWatch.length();
if(len < fileReadOffset) {
//file shrank; probably some data went missing.
handleShrunkenFile(len);
} else if(len > fileReadOffset) {
RandomAccessFile reader = new RandomAccessFile(toWatch, "r");
hasMoreData = slurp(len, reader);
reader.close();
}
} catch(IOException e) {
log.warn("IOException in tailer", e);
deregisterAndStop();
}
return hasMoreData;
}
private void handleShrunkenFile(long measuredLen) {
log.info("file "+ toWatch +"shrank from " + fileReadOffset + " to " + measuredLen);
offsetOfFirstByte = measuredLen;
fileReadOffset = 0;
}
}
| 8,228 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/filetailer/FileTailingAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.filetailer;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.io.File;
import org.apache.hadoop.chukwa.datacollection.adaptor.*;
/**
* An adaptor that repeatedly tails a specified file, sending the new bytes.
* This class does not split out records, but just sends everything up to end of
* file. Subclasses can alter this behavior by overriding extractRecords().
*
*/
public class FileTailingAdaptor extends LWFTAdaptor {
public static int MAX_RETRIES = 300;
static int GRACEFUL_PERIOD = 3 * 60 * 1000; // 3 minutes
private int attempts = 0;
private long gracefulPeriodExpired = 0l;
private boolean adaptorInError = false;
protected RandomAccessFile reader = null;
public void start(long bytes) {
super.start(bytes);
log.info("chukwaAgent.fileTailingAdaptor.maxReadSize: " + MAX_READ_SIZE);
this.attempts = 0;
log.info("started file tailer " + adaptorID + " on file " + toWatch
+ " with first byte at offset " + offsetOfFirstByte);
}
@Override
public long shutdown(AdaptorShutdownPolicy shutdownPolicy) {
log.info("Enter Shutdown:" + shutdownPolicy.name() + " - ObjectId:" + this);
switch(shutdownPolicy) {
case GRACEFULLY :
case WAIT_TILL_FINISHED :{
if (toWatch.exists()) {
int retry = 0;
tailer.stopWatchingFile(this);
TerminatorThread lastTail = new TerminatorThread(this);
lastTail.setDaemon(true);
lastTail.start();
if (shutdownPolicy.ordinal() == AdaptorShutdownPolicy.GRACEFULLY.ordinal()) {
while (lastTail.isAlive() && retry < 60) {
try {
log.info("GRACEFULLY Retry:" + retry);
Thread.sleep(1000);
retry++;
} catch (InterruptedException ex) {
}
}
} else {
while (lastTail.isAlive()) {
try {
if (retry%100 == 0) {
log.info("WAIT_TILL_FINISHED Retry:" + retry);
}
Thread.sleep(1000);
retry++;
} catch (InterruptedException ex) {
}
}
}
}
}
break;
case HARD_STOP:
default:
tailer.stopWatchingFile(this);
try {
if (reader != null) {
reader.close();
}
reader = null;
} catch(Throwable e) {
log.warn("Exception while closing reader:",e);
}
break;
}
log.info("Exit Shutdown:" + shutdownPolicy.name()+ " - ObjectId:" + this);
return fileReadOffset + offsetOfFirstByte;
}
/**
* Looks at the tail of the associated file, adds some of it to event queue
* This method is not thread safe. Returns true if there's more data in the
* file
*/
@Override
public boolean tailFile()
throws InterruptedException {
boolean hasMoreData = false;
try {
if ((adaptorInError == true)
&& (System.currentTimeMillis() > gracefulPeriodExpired)) {
if (!toWatch.exists()) {
log.warn("Adaptor|" + adaptorID + "|attempts=" + attempts
+ "| File does not exist: " + toWatch.getAbsolutePath()
+ ", streaming policy expired. File removed from streaming.");
} else if (!toWatch.canRead()) {
log.warn("Adaptor|" + adaptorID + "|attempts=" + attempts
+ "| File cannot be read: " + toWatch.getAbsolutePath()
+ ", streaming policy expired. File removed from streaming.");
} else {
// Should have never been there
adaptorInError = false;
gracefulPeriodExpired = 0L;
attempts = 0;
return false;
}
deregisterAndStop();
return false;
} else if (!toWatch.exists() || !toWatch.canRead()) {
if (adaptorInError == false) {
long now = System.currentTimeMillis();
gracefulPeriodExpired = now + GRACEFUL_PERIOD;
adaptorInError = true;
attempts = 0;
log.warn("failed to stream data for: " + toWatch.getAbsolutePath()
+ ", graceful period will Expire at now:" + now + " + "
+ GRACEFUL_PERIOD + " secs, i.e:" + gracefulPeriodExpired);
} else if (attempts % 10 == 0) {
log.info("failed to stream data for: " + toWatch.getAbsolutePath()
+ ", attempt: " + attempts);
}
attempts++;
return false; // no more data
}
if (reader == null) {
reader = new RandomAccessFile(toWatch, "r");
log.info("Adaptor|" + adaptorID
+ "|Opening the file for the first time|seek|" + fileReadOffset);
}
long len = 0L;
try {
len = reader.length();
if (lastSlurpTime == 0) {
lastSlurpTime = System.currentTimeMillis();
}
if (offsetOfFirstByte > fileReadOffset) {
// If the file rotated, the recorded offsetOfFirstByte is greater than
// file size,reset the first byte position to beginning of the file.
fileReadOffset = 0;
offsetOfFirstByte = 0L;
log.warn("offsetOfFirstByte>fileReadOffset, resetting offset to 0");
}
if (len == fileReadOffset) {
File fixedNameFile = new File(toWatch.getAbsolutePath());
long fixedNameLastModified = fixedNameFile.lastModified();
if (fixedNameLastModified > lastSlurpTime) {
// If len == fileReadOffset,the file stops rolling log or the file
// has rotated.
// But fixedNameLastModified > lastSlurpTime , this means after the
// last slurping,the file has been written ,
// so the file has been rotated.
boolean hasLeftData = true;
while (hasLeftData) {// read the possiblly generated log
hasLeftData = slurp(len, reader);
}
RandomAccessFile newReader = new RandomAccessFile(toWatch, "r");
if (reader != null) {
reader.close();
}
reader = newReader;
fileReadOffset = 0L;
len = reader.length();
log.debug("Adaptor|" + adaptorID
+ "| File size mismatched, rotating: "
+ toWatch.getAbsolutePath());
hasMoreData = slurp(len, reader);
}
} else if (len < fileReadOffset) {
// file has rotated and no detection
if (reader != null) {
reader.close();
}
reader = null;
fileReadOffset = 0L;
offsetOfFirstByte = 0L;
hasMoreData = true;
log.warn("Adaptor|" + adaptorID + "| file: " + toWatch.getPath()
+ ", has rotated and no detection - reset counters to 0L");
} else {
hasMoreData = slurp(len, reader);
}
} catch (IOException e) {
// do nothing, if file doesn't exist.
}
} catch (IOException e) {
log.warn("failure reading " + toWatch, e);
}
attempts = 0;
adaptorInError = false;
return hasMoreData;
}
}
| 8,229 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/jms/JMSMessageTransformer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.jms;
import javax.jms.Message;
import javax.jms.JMSException;
/**
* Class that knows how to transform a JMS Message to a byte array. The byte
* array will become the bytes bound to the Chukwa chunk.
*/
public interface JMSMessageTransformer {
/**
* Parse any transformer-specific args to initialize the transformer. Return
* a null if the arguments could not be parsed. This method will always be
* invoked before transform is called only if transformer arguments were
* passed. If they weren't, this method will never be called.
*
* @param args Arguments needed to configur the transformer.
* @return adaptor id
*/
public String parseArgs(String args);
/**
* Transform a Message to an array of bytes. Return null for a message that
* should be ignored.
*
* @param message JMS message received by a JMS Adaptor.
* @return the bytes that should be bound to the Chukwa chunk.
* @throws JMSException if there is problem process message
*/
public byte[] transform(Message message) throws JMSException;
}
| 8,230 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/jms/JMSTextMessageTransformer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.jms;
import java.nio.charset.Charset;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import javax.jms.Message;
import javax.jms.TextMessage;
import javax.jms.JMSException;
/**
* Basic JMSMessageTransformer that uses the payload message of a JMS
* TextMessage as the Chukwa record payload. If the message is not an instance
* of TextMessage, or it is, but the payload is null or empty, returns null.
*/
public class JMSTextMessageTransformer implements JMSMessageTransformer {
protected Log log = LogFactory.getLog(getClass());
public String parseArgs(String s) {
return s;
}
public byte[] transform(Message message) throws JMSException {
if (!(message instanceof TextMessage)) {
log.warn("Invalid message type received: " + message);
return null;
}
String text = ((TextMessage)message).getText();
if (text != null && text.length() > 0) {
return text.getBytes(Charset.forName("UTF-8"));
}
return null;
}
}
| 8,231 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/jms/JMSMessagePropertyTransformer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.jms;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import javax.jms.Message;
import javax.jms.JMSException;
import java.nio.charset.Charset;
import java.util.ArrayList;
/**
* JMSMessageTransformer that uses the properties of a JMS Message to build a
* Chukwa record payload. The value for each property configured will be used
* to create the record, with the delimiter value between each. The default
* delimiter is a tab (i.e., '\t').
* <P>
* To configure this transformer, set the -p field of the adaptor to the
* following (surrounded with double quotes):
* <code>
* <propertyNames> [-d <delimiter>] [-r <requiredPropertyNames>]
* </code>
* <ul>
* <li><code>propertyNames</code> - Comma-separated list of JMS properties.</li>
* <li><code>delimiter</code> - Delimiter to use, in single quotes.</li>
* <li><code>requiredPropertyNames</code> - Comma-separated list of required
* JMS properties. Default behavior is that all properties are required.</li>
* </ul>
*
*/
public class JMSMessagePropertyTransformer implements JMSMessageTransformer {
protected Log log = LogFactory.getLog(getClass());
private static final String DEFAULT_DELIMITER = "\t";
ArrayList<String> propertyNames = null;
ArrayList<String> requiredPropertyNames = null;
String delimiter = DEFAULT_DELIMITER;
public String parseArgs(String args) {
if (args == null || args.length() == 0) {
log.error("propertyNames must be set for this transformer");
return null;
}
log.info("Initializing JMSMessagePropertyTransformer: args=" + args);
propertyNames = new ArrayList<String>();
String[] tokens = args.split(" ");
for (String propertyName : tokens[0].split(",")) {
propertyNames.add(propertyName);
}
for(int i = 1; i < tokens.length; i++) {
String token = tokens[i];
if ("-d".equals(token) && i <= tokens.length - 2) {
StringBuilder value = new StringBuilder();
value.append(tokens[++i]);
// we lost all spaces with the split, so we have to put them back, yuck.
while (i <= tokens.length - 2 && !tokens[i + 1].startsWith("-")) {
value.append(" ");
value.append(tokens[++i]);
}
delimiter = trimSingleQuotes(value.toString());
}
else if ("-r".equals(token) && i <= tokens.length - 2) {
// requiredPropertyNames = null means all are required.
requiredPropertyNames = new ArrayList<String>();
String[] required = tokens[++i].split(",");
for (String r : required) {
requiredPropertyNames.add(r);
}
}
}
log.info("Initialized JMSMessagePropertyTransformer: delimiter='" +
delimiter + "', propertyNames=" + propertyNames +
", requiredProperties=" +
(requiredPropertyNames == null ? "ALL" : requiredPropertyNames));
return args;
}
/**
* Transforms message propertes into a byte array delimtied by delimiter. If
* all of the configured message properties are not found, returns null.
* <P>
* The could be enhanced to support the concept of optional/required properties.
* @param message is data to be transported
* @return byte array
* @throws JMSException if problem transforming data
*/
public byte[] transform(Message message) throws JMSException {
if (propertyNames == null || propertyNames.size() == 0) {
log.error("No message properties configured for this JMS transformer.");
return null;
}
int valuesFound = 0;
StringBuilder sb = new StringBuilder();
for (String propertyName : propertyNames) {
Object propertyValue = message.getObjectProperty(propertyName);
String value = transformValue(propertyName, propertyValue);
// is a required value not found?
if (value == null && (requiredPropertyNames == null ||
requiredPropertyNames.contains(propertyName))) {
return null;
}
if (valuesFound > 0) {
sb.append(delimiter);
}
if (value != null) {
sb.append(value);
}
valuesFound++;
}
if (sb.length() == 0 || valuesFound != propertyNames.size()) {
return null;
}
return sb.toString().getBytes(Charset.forName("UTF-8"));
}
/**
* Transforms the propertyValue found into the string that should be used for
* the message. Can handle String values and Number values. Override this method
* to handle other Java types, or to apply other value transformation logic.
*
* @param propertyName The name of the JMS property
* @param propertyValue The value of the property, which might be null.
* @return
*/
protected String transformValue(String propertyName, Object propertyValue) {
if (propertyValue == null) {
return null;
}
else if (propertyValue instanceof String) {
return (String)propertyValue;
}
else if (propertyValue instanceof Number) {
return propertyValue.toString();
}
return null;
}
private static String trimSingleQuotes(String value) {
if (value.length() == 0) {
return value;
}
// trim leading and trailing quotes
if (value.charAt(0) == '\'') {
value = value.substring(1);
}
if (value.length() > 0 && value.charAt(value.length() - 1) == '\'') {
value = value.substring(0, value.length() - 1);
}
return value;
}
}
| 8,232 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/jms/JMSAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor.jms;
import java.nio.charset.Charset;
import org.apache.hadoop.chukwa.datacollection.adaptor.AbstractAdaptor;
import org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorException;
import org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorShutdownPolicy;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.log4j.Logger;
import org.apache.activemq.ActiveMQConnectionFactory;
import javax.jms.TopicConnection;
import javax.jms.TopicSession;
import javax.jms.Session;
import javax.jms.Topic;
import javax.jms.Connection;
import javax.jms.MessageListener;
import javax.jms.Message;
import javax.jms.JMSException;
import javax.jms.ConnectionFactory;
import javax.jms.QueueConnection;
import javax.jms.QueueSession;
import javax.jms.Queue;
import javax.jms.MessageConsumer;
/**
* Adaptor that is able to listen to a JMS topic or queue for messages, receive
* the message, and transform it to a Chukwa chunk. Transformation is handled by
* a JMSMessageTransformer. The default JMSMessageTransformer used is the
* JMSTextMessageTransformer.
* <P>
* This adaptor is added to an Agent like so:
* <code>
* add JMSAdaptor <dataType> <brokerURL> <-t <topicName> |-q <queueName> [-s <JMSSelector>]
* [-x <transformerName>] [-p <transformerConfigs>] <offset>
* </code>
* <ul>
* <li><code>dataType</code> - The chukwa data type.</li>
* <li><code>brokerURL</code> - The JMS broker URL to bind to.</li>
* <li><code>topicName</code> - The JMS topic to listen on.</li>
* <li><code>queueName</code> - The JMS queue to listen on.</li>
* <li><code>JMSSelector</code> - The JMS selector to filter with. Surround
* with quotes if selector contains multiple words.</li>
* <li><code>transformerName</code> - Class name of the JMSMessageTransformer to
* use.</li>
* <li><code>transformerConfigs</code> - Properties to be passed to the
* JMSMessageTransformer to use. Surround with quotes if configs contain
* multiple words.</li>
* </ul>
*
* @see JMSMessageTransformer
* @see JMSTextMessageTransformer
*/
public class JMSAdaptor extends AbstractAdaptor {
static Logger log = Logger.getLogger(JMSAdaptor.class);
ConnectionFactory connectionFactory = null;
Connection connection;
String brokerURL;
String topic;
String queue;
String selector = null;
JMSMessageTransformer transformer;
volatile long bytesReceived = 0;
String status; // used to write checkpoint info. See getStatus() below
String source; // added to the chunk to identify the stream
class JMSListener implements MessageListener {
public void onMessage(Message message) {
if (log.isDebugEnabled()) {
log.debug("got a JMS message");
}
try {
byte[] bytes = transformer.transform(message);
if (bytes == null) {
return;
}
bytesReceived += bytes.length;
if (log.isDebugEnabled()) {
log.debug("Adding Chunk from JMS message: " + new String(bytes, Charset.forName("UTF-8")));
}
Chunk c = new ChunkImpl(type, source, bytesReceived, bytes, JMSAdaptor.this);
dest.add(c);
} catch (JMSException e) {
log.error("can't read JMS messages in " + adaptorID, e);
}
catch (InterruptedException e) {
log.error("can't add JMS messages in " + adaptorID, e);
}
}
}
/**
* This adaptor received configuration like this:
* <brokerURL> <-t <topicName>|-q <queueName>> [-s <JMSSelector>] [-x <transformerName>]
* [-p <transformerProperties>]
*
* @param s is a list of parameters
* @return Adaptor ID
*/
@Override
public String parseArgs(String s) {
if (log.isDebugEnabled()) {
log.debug("Parsing args to initialize adaptor: " + s);
}
String[] tokens = s.split(" ");
if (tokens.length < 1) {
throw new IllegalArgumentException("Configuration must include brokerURL.");
}
brokerURL = tokens[0];
if (brokerURL.length() < 6 || brokerURL.indexOf("://") == -1) {
throw new IllegalArgumentException("Invalid brokerURL: " + brokerURL);
}
String transformerName = null;
String transformerConfs = null;
StringBuilder transformerConfsBuffer = new StringBuilder();
for (int i = 1; i < tokens.length; i++) {
String value = tokens[i];
if ("-t".equals(value)) {
topic = tokens[++i];
}
else if ("-q".equals(value)) {
queue = tokens[++i];
}
else if ("-s".equals(value) && i <= tokens.length - 1) {
selector = tokens[++i];
// selector can have multiple words
if (selector.startsWith("\"")) {
for(int j = i + 1; j < tokens.length; j++) {
selector = selector + " " + tokens[++i];
if(tokens[j].endsWith("\"")) {
break;
}
}
selector = trimQuotes(selector);
}
}
else if ("-x".equals(value)) {
transformerName = tokens[++i];
}
else if ("-p".equals(value)) {
transformerConfsBuffer.append(tokens[++i]);
transformerConfs = transformerConfsBuffer.toString();
// transformerConfs can have multiple words
if (transformerConfsBuffer.toString().startsWith("\"")) {
for(int j = i + 1; j < tokens.length; j++) {
transformerConfsBuffer.append(" ");
transformerConfsBuffer.append(tokens[++i]);
if(tokens[j].endsWith("\"")) {
break;
}
}
transformerConfs = trimQuotes(transformerConfsBuffer.toString());
}
}
}
if (topic == null && queue == null) {
log.error("topicName or queueName must be set");
return null;
}
if (topic != null && queue != null) {
log.error("Either topicName or queueName must be set, but not both");
return null;
}
// create transformer
if (transformerName != null) {
try {
Class<?> classDefinition = Class.forName(transformerName);
Object object = classDefinition.newInstance();
transformer = (JMSMessageTransformer)object;
} catch (Exception e) {
log.error("Couldn't find class for transformerName=" + transformerName, e);
return null;
}
}
else {
transformer = new JMSTextMessageTransformer();
}
// configure transformer
if (transformerConfs != null) {
String result = transformer.parseArgs(transformerConfs);
if (result == null) {
log.error("JMSMessageTransformer couldn't parse transformer configs: " +
transformerConfs);
return null;
}
}
status = s;
if(topic != null) {
source = "jms:"+brokerURL + ",topic:" + topic;
}
else if(queue != null) {
source = "jms:"+brokerURL + ",queue:" + queue;
}
return s;
}
@Override
public void start(long offset) throws AdaptorException {
try {
bytesReceived = offset;
connectionFactory = initializeConnectionFactory(brokerURL);
connection = connectionFactory.createConnection();
log.info("Starting JMS adaptor: " + adaptorID + " started on brokerURL=" + brokerURL +
", topic=" + topic + ", selector=" + selector +
", offset =" + bytesReceived);
// this is where different initialization could be used for a queue
if(topic != null) {
initializeTopic(connection, topic, selector, new JMSListener());
}
else if(queue != null) {
initializeQueue(connection, queue, selector, new JMSListener());
}
connection.start();
} catch(Exception e) {
throw new AdaptorException(e);
}
}
/**
* Override this to initialize with a different connection factory.
* @param brokerURL
* @return
*/
protected ConnectionFactory initializeConnectionFactory(String brokerURL) {
return new ActiveMQConnectionFactory(brokerURL);
}
/**
* Status is used to write checkpoints. Checkpoints are written as:
* ADD <adaptorKey> = <adaptorClass> <currentStatus> <offset>
*
* Once they're reloaded, adaptors are re-initialized with
* <adaptorClass> <currentStatus> <offset>
*
* While doing so, this gets passed by to the parseArgs method:
* <currentStatus>
*
* Without the first token in <currentStatus>, which is expected to be <dataType>.
*
* @return Adaptor status
*/
@Override
public String getCurrentStatus() {
return type + " " + status;
}
@Override
public long shutdown(AdaptorShutdownPolicy shutdownPolicy)
throws AdaptorException {
try {
connection.close();
} catch(Exception e) {
log.error("Exception closing JMS connection.", e);
}
return bytesReceived;
}
private void initializeTopic(Connection connection,
String topic,
String selector,
JMSListener listener) throws JMSException {
TopicSession session = ((TopicConnection)connection).
createTopicSession(false, Session.AUTO_ACKNOWLEDGE);
Topic jmsTopic = session.createTopic(topic);
MessageConsumer consumer = session.createConsumer(jmsTopic, selector, true);
consumer.setMessageListener(listener);
}
private void initializeQueue(Connection connection,
String topic,
String selector,
JMSListener listener) throws JMSException {
QueueSession session = ((QueueConnection)connection).
createQueueSession(false, Session.AUTO_ACKNOWLEDGE);
Queue queue = session.createQueue(topic);
MessageConsumer consumer = session.createConsumer(queue, selector, true);
consumer.setMessageListener(listener);
}
private static String trimQuotes(String value) {
// trim leading and trailing quotes
if (value.charAt(0) == '"') {
value = value.substring(1);
}
if (value.charAt(value.length() - 1) == '"') {
value = value.substring(0, value.length() - 1);
}
return value;
}
} | 8,233 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/controller/ChukwaAgentController.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.controller;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.net.Socket;
import java.net.SocketException;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.apache.log4j.Logger;
/**
* A convenience library for applications to communicate to the
* {@link ChukwaAgent}. Can be used to register and unregister new
* {@link Adaptor}s. Also contains functions for applications to use for
* handling log rations.
*/
public class ChukwaAgentController {
static Logger log = Logger.getLogger(ChukwaAgentController.class);
public class AddAdaptorTask extends TimerTask {
String adaptorName;
String type;
String params;
private long offset;
long numRetries;
long retryInterval;
AddAdaptorTask(String adaptorName, String type, String params, long offset,
long numRetries, long retryInterval) {
this.adaptorName = adaptorName;
this.type = type;
this.params = params;
this.offset = offset;
this.numRetries = numRetries;
this.retryInterval = retryInterval;
}
@Override
public void run() {
try {
log.info("Trying to resend the add command [" + adaptorName + "]["
+ offset + "][" + params + "] [" + numRetries + "]");
addByName(null, adaptorName, type, params, offset, numRetries, retryInterval);
} catch (Exception e) {
log.warn("Exception in AddAdaptorTask.run", e);
e.printStackTrace();
}
}
}
// our default adaptors, provided here for convenience
public static final String CharFileTailUTF8 = "org.apache.hadoop.chukwa.datacollection.adaptor.filetailer.CharFileTailingAdaptorUTF8";
public static final String CharFileTailUTF8NewLineEscaped = "org.apache.hadoop.chukwa.datacollection.adaptor.filetailer.CharFileTailingAdaptorUTF8NewLineEscaped";
static String DEFAULT_FILE_TAILER = CharFileTailUTF8NewLineEscaped;
static int DEFAULT_PORT = 9093;
static String DEFAULT_HOST = "localhost";
static int numArgs = 0;
class Adaptor {
public String id;
final public String className;
final public String params;
final public String appType;
public long offset;
Adaptor(String className, String appType, String params, long offset) {
this.className = className;
this.appType = appType;
this.params = params;
this.offset = offset;
}
Adaptor(String id, String className, String appType, String params,
long offset) {
this.id = id;
this.className = className;
this.appType = appType;
this.params = params;
this.offset = offset;
}
/**
* Registers this {@link Adaptor} with the agent running at the specified
* hostname and portno
*
* @return The id of the this {@link Adaptor}, assigned by the agent
* upon successful registration
* @throws IOException if problem bind to agent controller port
*/
String register() throws IOException {
Socket s = new Socket(hostname, portno);
try {
s.setSoTimeout(60000);
} catch (SocketException e) {
log.warn("Error while settin soTimeout to 60000");
e.printStackTrace();
}
PrintWriter bw = new PrintWriter(new OutputStreamWriter(s
.getOutputStream(), Charset.forName("UTF-8")));
if(id != null)
bw.println("ADD " + id + " = " + className + " " + appType + " " + params + " " + offset);
else
bw.println("ADD " + className + " " + appType + " " + params + " " + offset);
bw.flush();
BufferedReader br = new BufferedReader(new InputStreamReader(s
.getInputStream(), Charset.forName("UTF-8")));
String resp = br.readLine();
if (resp != null) {
String[] fields = resp.split(" ");
if (fields[0].equals("OK")) {
id = fields[fields.length - 1];
}
}
s.close();
return id;
}
void unregister() throws IOException {
Socket s = new Socket(hostname, portno);
try {
s.setSoTimeout(60000);
} catch (SocketException e) {
log.warn("Error while settin soTimeout to 60000");
}
PrintWriter bw = new PrintWriter(new OutputStreamWriter(s
.getOutputStream(), Charset.forName("UTF-8")));
bw.println("SHUTDOWN " + id);
bw.flush();
BufferedReader br = new BufferedReader(new InputStreamReader(s
.getInputStream(), Charset.forName("UTF-8")));
String resp = br.readLine();
if (resp == null || !resp.startsWith("OK")) {
log.error("adaptor unregister error, id: " + id);
} else if (resp.startsWith("OK")) {
String[] respSplit = resp.split(" ");
String newOffset = respSplit[respSplit.length - 1];
try {
offset = Long.parseLong(newOffset);
} catch (NumberFormatException nfe) {
log.error("adaptor didn't shutdown gracefully.\n" + nfe);
}
}
s.close();
}
public String toString() {
String[] namePieces = className.split("\\.");
String shortName = namePieces[namePieces.length - 1];
return id + " " + shortName + " " + appType + " " + params + " " + offset;
}
}
Map<String, ChukwaAgentController.Adaptor> runningAdaptors = new HashMap<String, Adaptor>();
Map<String, ChukwaAgentController.Adaptor> runningInstanceAdaptors = new HashMap<String, Adaptor>();
Map<String, ChukwaAgentController.Adaptor> pausedAdaptors;
String hostname;
int portno;
public ChukwaAgentController() {
portno = DEFAULT_PORT;
hostname = DEFAULT_HOST;
pausedAdaptors = new HashMap<String, Adaptor>();
syncWithAgent();
}
public ChukwaAgentController(String hostname, int portno) {
this.hostname = hostname;
this.portno = portno;
pausedAdaptors = new HashMap<String, Adaptor>();
syncWithAgent();
}
private boolean syncWithAgent() {
// set up adaptors by using list here
try {
runningAdaptors = list();
return true;
} catch (IOException e) {
System.err.println("Error initializing ChukwaClient with list of "
+ "currently registered adaptors, clearing our local list of adaptors");
// e.printStackTrace();
// if we can't connect to the LocalAgent, reset/clear our local view of
// the Adaptors.
runningAdaptors = new HashMap<String, ChukwaAgentController.Adaptor>();
return false;
}
}
/**
* Registers a new adaptor. Makes no guarantee about success. On failure, we
* print a message to stderr and ignore silently so that an application
* doesn't crash if it's attempt to register an adaptor fails. This call does
* not retry a conection. for that use the overloaded version of this which
* accepts a time interval and number of retries
* @param adaptorName is adaptor class name
* @param type is data type
* @param params is adaptor specific parameters
* @param offset is starting sequence id
*
* @return the id number of the adaptor, generated by the agent
*/
public String add(String adaptorName, String type, String params, long offset) {
return addByName(null, adaptorName, type, params, offset, 20, 15 * 1000);// retry for
// five
// minutes,
// every
// fifteen
// seconds
}
/**
* Registers a new adaptor. Makes no guarantee about success. On failure, to
* connect to server, will retry <code>numRetries</code> times, every
* <code>retryInterval</code> milliseconds.
* @param adaptorID is unique adaptor identifier
* @param adaptorName is adaptor class name
* @param type is user defined data type name
* @param params is adaptor specific configuration
* @param offset is starting sequence id
* @param numRetries is number of retries
* @param retryInterval is time between retries
*
* @return the id number of the adaptor, generated by the agent
*/
public String addByName(String adaptorID, String adaptorName, String type, String params, long offset,
long numRetries, long retryInterval) {
ChukwaAgentController.Adaptor adaptor = new ChukwaAgentController.Adaptor(
adaptorName, type, params, offset);
adaptor.id = adaptorID;
if (numRetries >= 0) {
try {
adaptorID = adaptor.register();
if (adaptorID != null) {
runningAdaptors.put(adaptorID, adaptor);
runningInstanceAdaptors.put(adaptorID, adaptor);
} else {
System.err.println("Failed to successfully add the adaptor in AgentClient, adaptorID returned by add() was negative.");
}
} catch (IOException ioe) {
log.warn("AgentClient failed to contact the agent ("
+ hostname + ":" + portno + ")");
log.warn("Scheduling a agent connection retry for adaptor add() in another "
+ retryInterval
+ " milliseconds, "
+ numRetries
+ " retries remaining");
Timer addFileTimer = new Timer();
addFileTimer.schedule(new AddAdaptorTask(adaptorName, type, params,
offset, numRetries - 1, retryInterval), retryInterval);
}
} else {
System.err.println("Giving up on connecting to the local agent");
}
return adaptorID;
}
public synchronized ChukwaAgentController.Adaptor remove(String adaptorID)
throws IOException {
syncWithAgent();
ChukwaAgentController.Adaptor a = runningAdaptors.remove(adaptorID);
if ( a != null ) {
a.unregister();
}
return a;
}
public void remove(String className, String appType, String filename)
throws IOException {
syncWithAgent();
// search for FileTail adaptor with string of this file name
// get its id, tell it to unregister itself with the agent,
// then remove it from the list of adaptors
for (Adaptor a : runningAdaptors.values()) {
if (a.className.equals(className) && a.params.equals(filename)
&& a.appType.equals(appType)) {
remove(a.id);
}
}
}
public void removeAll() {
syncWithAgent();
ArrayList<String> keyset = new ArrayList<String>();
keyset.addAll( runningAdaptors.keySet());
for (String id : keyset) {
try {
remove(id);
} catch (IOException ioe) {
System.err.println("Error removing an adaptor in removeAll()");
ioe.printStackTrace();
}
log.info("Successfully removed adaptor " + id);
}
}
public void removeInstanceAdaptors() {
// Remove adaptors created by this instance of chukwa agent controller.
// Instead of removing using id, this is removed by using the stream name
// and record type. This prevents the system to shutdown the wrong
// adaptor after agent crashes.
for (Adaptor a : runningInstanceAdaptors.values()) {
try {
remove(a.className, a.appType, a.params);
} catch (IOException ioe) {
log.warn("Error removing an adaptor in removeInstanceAdaptors()");
ioe.printStackTrace();
}
}
}
Map<String, ChukwaAgentController.Adaptor> list() throws IOException {
Socket s = new Socket(hostname, portno);
try {
s.setSoTimeout(60000);
} catch (SocketException e) {
log.warn("Error while settin soTimeout to 60000");
e.printStackTrace();
}
PrintWriter bw = new PrintWriter(
new OutputStreamWriter(s.getOutputStream(), Charset.forName("UTF-8")));
bw.println("LIST");
bw.flush();
BufferedReader br = new BufferedReader(new InputStreamReader(s
.getInputStream(), Charset.forName("UTF-8")));
String ln;
Map<String, Adaptor> listResult = new HashMap<String, Adaptor>();
while ((ln = br.readLine()) != null) {
if (ln.equals("")) {
break;
} else {
String[] parts = ln.split("\\s+");
if (parts.length >= 4) { // should have id, className appType, params,
// offset
String id = parts[0].substring(0, parts[0].length() - 1); // chop
// off
// the
// right
// -
// paren
long offset = Long.parseLong(parts[parts.length - 1]);
StringBuilder tmpParams = new StringBuilder();
tmpParams.append(parts[3]);
for (int i = 4; i < parts.length - 1; i++) {
tmpParams.append(" ");
tmpParams.append(parts[i]);
}
listResult.put(id, new Adaptor(id, parts[1], parts[2], tmpParams.toString(),
offset));
}
}
}
s.close();
return listResult;
}
// ************************************************************************
// The following functions are convenience functions, defining an easy
// to use API for application developers to integrate chukwa into their app
// ************************************************************************
/**
* Registers a new "LineFileTailUTF8" adaptor and starts it at offset 0.
* Checks to see if the file is being watched already, if so, won't register
* another adaptor with the agent. If you have run the tail adaptor on this
* file before and rotated or emptied the file you should use
* {@link ChukwaAgentController#pauseFile(String, String)} and
* {@link ChukwaAgentController#resumeFile(String, String)} which will store
* the adaptors metadata and re-use them to pick up where it left off.
* @param appType is user defined name for the data stream
*
* @param filename of the file for the tail adaptor to start monitoring
* @param numRetries is number of retries
* @param retryInterval is time between retries
* @return the id number of the adaptor, generated by the agent
*/
public String addFile(String appType, String filename, long numRetries,
long retryInterval) {
filename = new File(filename).getAbsolutePath();
// TODO: Mabye we want to check to see if the file exists here?
// Probably not because they might be talking to an agent on a different
// machine?
// check to see if this file is being watched already, if yes don't set up
// another adaptor for it
boolean isDuplicate = false;
for (Adaptor a : runningAdaptors.values()) {
if (a.className.equals(DEFAULT_FILE_TAILER) && a.appType.equals(appType)
&& a.params.endsWith(filename)) {
isDuplicate = true;
}
}
if (!isDuplicate) {
return addByName(null, DEFAULT_FILE_TAILER, appType, 0L + " " + filename, 0L,
numRetries, retryInterval);
} else {
log.info("An adaptor for filename \"" + filename
+ "\", type \"" + appType
+ "\", exists already, addFile() command aborted");
return null;
}
}
public String addFile(String appType, String filename) {
return addFile(appType, filename, 0, 0);
}
/**
* Pause all active adaptors of the default file tailing type who are tailing
* this file This means we actually stop the adaptor and it goes away forever,
* but we store it state so that we can re-launch a new adaptor with the same
* state later.
*
* @param appType is application type
* @param filename is file name suffix pattern
* @return array of adaptorID numbers which have been created and assigned the
* state of the formerly paused adaptors
* @throws IOException if error pausing adaptors
*/
public Collection<String> pauseFile(String appType, String filename)
throws IOException {
syncWithAgent();
// store the unique streamid of the file we are pausing.
// search the list of adaptors for this filename
// store the current offset for it
List<String> results = new ArrayList<String>();
for (Adaptor a : runningAdaptors.values()) {
if (a.className.equals(DEFAULT_FILE_TAILER) && a.params.endsWith(filename)
&& a.appType.equals(appType)) {
pausedAdaptors.put(a.id, a); // add it to our list of paused adaptors
remove(a.id); // tell the agent to remove/unregister it
results.add(a.id);
}
}
return results;
}
public boolean isFilePaused(String appType, String filename) {
for (Adaptor a : pausedAdaptors.values()) {
if (a.className.equals(DEFAULT_FILE_TAILER) && a.params.endsWith(filename)
&& a.appType.equals(appType)) {
return true;
}
}
return false;
}
/**
* Resume all adaptors for this filename that have been paused
*
* @param appType is application type
* @param filename filename by which to lookup adaptors which are paused (and
* tailing this file)
* @return an array of the new adaptor ID numbers which have resumed where the
* old adaptors left off
* @throws IOException if unable to resume all adaptors
*/
public Collection<String> resumeFile(String appType, String filename)
throws IOException {
syncWithAgent();
// search for a record of this paused file
List<String> results = new ArrayList<String>();
for (Adaptor a : pausedAdaptors.values()) {
if (a.className.equals(DEFAULT_FILE_TAILER) && a.params.endsWith(filename)
&& a.appType.equals(appType)) {
String newID = add(DEFAULT_FILE_TAILER, a.appType, a.offset + " "
+ filename, a.offset);
pausedAdaptors.remove(a.id);
a.id = newID;
results.add(a.id);
}
}
return results;
}
public void removeFile(String appType, String filename) throws IOException {
syncWithAgent();
// search for FileTail adaptor with string of this file name
// get its id, tell it to unregister itself with the agent,
// then remove it from the list of adaptors
for (Adaptor a : runningAdaptors.values()) {
if (a.className.equals(DEFAULT_FILE_TAILER) && a.params.endsWith(filename)
&& a.appType.equals(appType)) {
remove(a.id);
}
}
}
// ************************************************************************
// command line utilities
// ************************************************************************
public static void main(String[] args) {
ChukwaAgentController c = getClient(args);
if (numArgs >= 3 && args[0].toLowerCase().equals("addfile")) {
doAddFile(c, args[1], args[2]);
} else if (numArgs >= 3 && args[0].toLowerCase().equals("removefile")) {
doRemoveFile(c, args[1], args[2]);
} else if (numArgs >= 1 && args[0].toLowerCase().equals("list")) {
doList(c);
} else if (numArgs >= 1 && args[0].equalsIgnoreCase("removeall")) {
doRemoveAll(c);
} else {
System.err.println("usage: ChukwaClient addfile <apptype> <filename> [-h hostname] [-p portnumber]");
System.err.println(" ChukwaClient removefile adaptorID [-h hostname] [-p portnumber]");
System.err.println(" ChukwaClient removefile <apptype> <filename> [-h hostname] [-p portnumber]");
System.err.println(" ChukwaClient list [IP] [port]");
System.err.println(" ChukwaClient removeAll [IP] [port]");
}
}
private static ChukwaAgentController getClient(String[] args) {
int portno = 9093;
String hostname = "localhost";
numArgs = args.length;
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-h") && args.length > i + 1) {
hostname = args[i + 1];
log.debug("Setting hostname to: " + hostname);
numArgs -= 2; // subtract for the flag and value
} else if (args[i].equals("-p") && args.length > i + 1) {
portno = Integer.parseInt(args[i + 1]);
log.debug("Setting portno to: " + portno);
numArgs -= 2; // subtract for the flat, i.e. -p, and value
}
}
return new ChukwaAgentController(hostname, portno);
}
private static String doAddFile(ChukwaAgentController c, String appType,
String params) {
log.info("Adding adaptor with filename: " + params);
String adaptorID = c.addFile(appType, params);
if (adaptorID != null) {
log.info("Successfully added adaptor, id is:" + adaptorID);
} else {
log.error("Agent reported failure to add adaptor.");
}
return adaptorID;
}
private static void doRemoveFile(ChukwaAgentController c, String appType,
String params) {
try {
log.debug("Removing adaptor with filename: " + params);
c.removeFile(appType, params);
} catch (IOException e) {
e.printStackTrace();
}
}
private static void doList(ChukwaAgentController c) {
try {
Iterator<Adaptor> adptrs = c.list().values().iterator();
while (adptrs.hasNext()) {
log.debug(adptrs.next().toString());
}
} catch (Exception e) {
e.printStackTrace();
}
}
private static void doRemoveAll(ChukwaAgentController c) {
log.info("Removing all adaptors");
c.removeAll();
}
}
| 8,234 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/controller/ClientFinalizer.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.controller;
import org.apache.log4j.helpers.LogLog;
public class ClientFinalizer extends Thread {
private ChukwaAgentController chukwaClient = null;
public ClientFinalizer(ChukwaAgentController chukwaClient) {
this.chukwaClient = chukwaClient;
}
public synchronized void run() {
try {
if (chukwaClient != null) {
chukwaClient.removeInstanceAdaptors();
} else {
LogLog.warn("chukwaClient is null cannot do any cleanup");
}
} catch (Throwable e) {
LogLog.warn("closing the controller threw an exception:\n" + e);
}
}
}
| 8,235 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/SeqFileWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer;
import java.net.InetAddress;
import java.net.URI;
import java.net.UnknownHostException;
import java.util.Calendar;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.io.IOException;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.log4j.Logger;
/**
* This class <b>is</b> thread-safe -- rotate() and save() both synchronize on
* this object.
*
*/
public class SeqFileWriter extends PipelineableWriter implements ChukwaWriter {
static Logger log = Logger.getLogger(SeqFileWriter.class);
private static boolean ENABLE_ROTATION_ON_CLOSE = true;
protected int STAT_INTERVAL_SECONDS = 30;
private int rotateInterval = 1000 * 60 * 5;
private int offsetInterval = 1000 * 30;
private boolean if_fixed_interval = false;
static final int ACQ_WAIT_ON_TERM = 500; //ms to wait for lock on a SIGTERM before aborting
public static final String STAT_PERIOD_OPT = "chukwaCollector.stats.period";
public static final String ROTATE_INTERVAL_OPT = "chukwaCollector.rotateInterval";
public static final String IF_FIXED_INTERVAL_OPT = "chukwaCollector.isFixedTimeRotatorScheme";
public static final String FIXED_INTERVAL_OFFSET_OPT = "chukwaCollector.fixedTimeIntervalOffset";
public static final String OUTPUT_DIR_OPT= "chukwaCollector.outputDir";
public String localHostAddr = null;
protected final Semaphore lock = new Semaphore(1, true);
protected FileSystem fs = null;
protected Configuration conf = null;
protected String outputDir = null;
private Calendar calendar = Calendar.getInstance();
protected Path currentPath = null;
protected String currentFileName = null;
protected FSDataOutputStream currentOutputStr = null;
protected SequenceFile.Writer seqFileWriter = null;
protected long timePeriod = -1;
protected long nextTimePeriodComputation = -1;
protected Timer rotateTimer = null;
protected Timer statTimer = null;
protected volatile long dataSize = 0;
protected volatile long bytesThisRotate = 0;
protected volatile boolean isRunning = false;
public SeqFileWriter() {
try {
localHostAddr = "_" + InetAddress.getLocalHost().getHostName() + "_";
} catch (UnknownHostException e) {
localHostAddr = "-NA-";
}
}
public long getBytesWritten() {
return dataSize;
}
public void init(Configuration conf) throws WriterException {
outputDir = conf.get(OUTPUT_DIR_OPT, "/chukwa");
this.conf = conf;
rotateInterval = conf.getInt(ROTATE_INTERVAL_OPT,rotateInterval);
if_fixed_interval = conf.getBoolean(IF_FIXED_INTERVAL_OPT,if_fixed_interval);
offsetInterval = conf.getInt(FIXED_INTERVAL_OFFSET_OPT,offsetInterval);
STAT_INTERVAL_SECONDS = conf.getInt(STAT_PERIOD_OPT, STAT_INTERVAL_SECONDS);
// check if they've told us the file system to use
String fsname = conf.get("writer.hdfs.filesystem");
if (fsname == null || fsname.equals("")) {
// otherwise try to get the filesystem from hadoop
fsname = conf.get("fs.defaultFS");
}
log.info("rotateInterval is " + rotateInterval);
if(if_fixed_interval)
log.info("using fixed time interval scheme, " +
"offsetInterval is " + offsetInterval);
else
log.info("not using fixed time interval scheme");
log.info("outputDir is " + outputDir);
log.info("fsname is " + fsname);
log.info("filesystem type from core-default.xml is "
+ conf.get("fs.hdfs.impl"));
if (fsname == null) {
log.error("no filesystem name");
throw new WriterException("no filesystem");
}
try {
fs = FileSystem.get(new URI(fsname), conf);
if (fs == null) {
log.error("can't connect to HDFS.");
}
} catch (Throwable e) {
log.error(
"can't connect to HDFS, trying default file system instead (likely to be local)",
e);
}
// Setup everything by rotating
isRunning = true;
rotate();
statTimer = new Timer();
statTimer.schedule(new StatReportingTask(), 1000,
STAT_INTERVAL_SECONDS * 1000);
}
public class StatReportingTask extends TimerTask {
private long lastTs = System.currentTimeMillis();
public void run() {
long time = System.currentTimeMillis();
long currentDs = dataSize;
dataSize = 0;
long interval = time - lastTs;
lastTs = time;
long dataRate = 1000 * currentDs / interval; // kb/sec
log.info("stat:datacollection.writer.hdfs dataSize=" + currentDs
+ " dataRate=" + dataRate);
}
public StatReportingTask() {}
};
void rotate() {
if (rotateTimer != null) {
rotateTimer.cancel();
}
if(!isRunning)
return;
calendar.setTimeInMillis(System.currentTimeMillis());
String newName = new java.text.SimpleDateFormat("yyyyMMddHHmmssSSS")
.format(calendar.getTime());
newName += localHostAddr + new java.rmi.server.UID().toString();
newName = newName.replace("-", "");
newName = newName.replace(":", "");
newName = newName.replace(".", "");
newName = outputDir + "/" + newName.trim();
try {
lock.acquire();
FSDataOutputStream previousOutputStr = currentOutputStr;
Path previousPath = currentPath;
String previousFileName = currentFileName;
if (previousOutputStr != null) {
boolean closed = false;
try {
log.info("closing sink file" + previousFileName);
previousOutputStr.close();
closed = true;
}catch (Throwable e) {
log.error("couldn't close file" + previousFileName, e);
//we probably have an orphaned 0 byte file at this point due to an
//intermitant HDFS outage. Once HDFS comes up again we'll be able to
//close it, although it will be empty.
}
if (bytesThisRotate > 0) {
if (closed) {
log.info("rotating sink file " + previousPath);
fs.rename(previousPath, new Path(previousFileName + ".done"));
}
else {
log.warn(bytesThisRotate + " bytes potentially lost, since " +
previousPath + " could not be closed.");
}
} else {
log.info("no chunks written to " + previousPath + ", deleting");
fs.delete(previousPath, false);
}
}
Path newOutputPath = new Path(newName + ".chukwa");
FSDataOutputStream newOutputStr = fs.create(newOutputPath);
// Uncompressed for now
seqFileWriter = SequenceFile.createWriter(conf, newOutputStr,
ChukwaArchiveKey.class, ChunkImpl.class,
SequenceFile.CompressionType.NONE, null);
// reset these once we know that seqFileWriter was created
currentOutputStr = newOutputStr;
currentPath = newOutputPath;
currentFileName = newName;
bytesThisRotate = 0;
} catch (Throwable e) {
log.warn("Got an exception trying to rotate. Will try again in " +
rotateInterval/1000 + " seconds." ,e);
} finally {
lock.release();
}
// Schedule the next timer
scheduleNextRotation();
}
/**
* Schedules the rotate task using either a fixed time interval scheme or a
* relative time interval scheme as specified by the
* chukwaCollector.isFixedTimeRotatorScheme configuration parameter. If the
* value of this parameter is true then next rotation will be scheduled at a
* fixed offset from the current rotateInterval. This fixed offset is
* provided by chukwaCollector.fixedTimeIntervalOffset configuration
* parameter.
*/
void scheduleNextRotation(){
long delay = rotateInterval;
if (if_fixed_interval) {
long currentTime = System.currentTimeMillis();
delay = getDelayForFixedInterval(currentTime, rotateInterval, offsetInterval);
}
rotateTimer = new Timer();
rotateTimer.schedule(new TimerTask() {
public void run() {
rotate();
}
}, delay);
}
/**
* Calculates delay for scheduling the next rotation in case of
* FixedTimeRotatorScheme. This delay is the time difference between the
* currentTimestamp (t1) and the next time the collector should rotate the
* sequence files (t2). t2 is the time when the current rotateInterval ends
* plus an offset (as set by chukwaCollector.FixedTimeIntervalOffset).
* So, delay = t2 - t1
*
* @param currentTime - the current timestamp
* @param rotateInterval - chukwaCollector.rotateInterval
* @param offsetInterval - chukwaCollector.fixedTimeIntervalOffset
* @return delay for scheduling next rotation
*/
long getDelayForFixedInterval(long currentTime, long rotateInterval, long offsetInterval){
// time since last rounded interval
long remainder = (currentTime % rotateInterval);
long prevRoundedInterval = currentTime - remainder;
long nextRoundedInterval = prevRoundedInterval + rotateInterval;
long delay = nextRoundedInterval - currentTime + offsetInterval;
if (log.isInfoEnabled()) {
log.info("currentTime="+currentTime+" prevRoundedInterval="+
prevRoundedInterval+" nextRoundedInterval" +
"="+nextRoundedInterval+" delay="+delay);
}
return delay;
}
protected void computeTimePeriod() {
synchronized (calendar) {
calendar.setTimeInMillis(System.currentTimeMillis());
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
timePeriod = calendar.getTimeInMillis();
calendar.add(Calendar.HOUR, 1);
nextTimePeriodComputation = calendar.getTimeInMillis();
}
}
@Override
public CommitStatus add(List<Chunk> chunks) throws WriterException {
COMMIT_PENDING result = new COMMIT_PENDING(chunks.size());
if (!isRunning) {
log.info("Collector not ready");
throw new WriterException("Collector not ready");
}
ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
if (System.currentTimeMillis() >= nextTimePeriodComputation) {
computeTimePeriod();
}
try {
lock.acquire();
for (Chunk chunk : chunks) {
archiveKey.setTimePartition(timePeriod);
archiveKey.setDataType(chunk.getDataType());
archiveKey.setStreamName(chunk.getTags() + "/" + chunk.getSource()
+ "/" + chunk.getStreamName());
archiveKey.setSeqId(chunk.getSeqID());
seqFileWriter.append(archiveKey, chunk);
// compute size for stats only if append succeeded. Note though that
// seqFileWriter.append can continue taking data for quite some time
// after HDFS goes down while the client is trying to reconnect. Hence
// these stats might not reflect reality during an HDFS outage.
dataSize += chunk.getData().length;
bytesThisRotate += chunk.getData().length;
String futureName = currentPath.getName().replace(".chukwa", ".done");
result.addPend(futureName, currentOutputStr.getPos());
}
}
catch (IOException e) {
log.error("IOException when trying to write a chunk, Collector will return error and keep running.", e);
return COMMIT_FAIL;
}
catch (Throwable e) {
// We don't want to loose anything
log.fatal("IOException when trying to write a chunk, Collector is going to exit!", e);
isRunning = false;
} finally {
lock.release();
}
return result;
}
public void close() {
isRunning = false;
if (statTimer != null) {
statTimer.cancel();
}
if (rotateTimer != null) {
rotateTimer.cancel();
}
// If we are here it's either because of an HDFS exception
// or Collector has received a kill -TERM
boolean gotLock = false;
try {
gotLock = lock.tryAcquire(ACQ_WAIT_ON_TERM, TimeUnit.MILLISECONDS);
if(gotLock) {
if (this.currentOutputStr != null) {
this.currentOutputStr.close();
}
if(ENABLE_ROTATION_ON_CLOSE)
if(bytesThisRotate > 0)
fs.rename(currentPath, new Path(currentFileName + ".done"));
else
fs.delete(currentPath, false);
}
} catch (Throwable e) {
log.warn("cannot rename dataSink file:" + currentPath,e);
} finally {
if(gotLock)
lock.release();
}
}
public static void setEnableRotationOnClose(boolean b) {
ENABLE_ROTATION_ON_CLOSE = b;
}
}
| 8,236 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/SocketTeeWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer;
import java.util.*;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ArrayBlockingQueue;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.util.Filter;
import org.apache.hadoop.chukwa.util.RegexUtil.CheckedPatternSyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
import java.net.ServerSocket;
import java.net.Socket;
import java.nio.charset.Charset;
import java.io.*;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
/**
* Effectively a "Tee" in the writer pipeline.
* Accepts incoming connections on port specified by chukwaCollector.tee.port.
* Defaults to 9094
*
* Protocol is as follows:
* Client ---> TeeWriter "RAW | WRITABLE <filter>"
* as per DumpChunks.
*
* TeeWriter ---> Client "OK\n"
* In RAW mode
* TeeWriter ---> Client (length(int) byte[length])*
* An indefinite sequence of length, followed by byte array.
*
* In Writable mode
* TeeWriter ---> Client (Chunk serialized as Writable)*
* An indefinite sequence of serialized chunks
*
* In English: clients should connect and say either "RAW " or "WRITABLE "
* followed by a filter. (Note that the keyword is followed by exactly one space.)
* They'll then receive either a sequence of byte arrays or of writable-serialized.
*
* Option chukwaCollector.tee.keepalive controls using TCP keepalive. Defaults to true.
*
*/
public class SocketTeeWriter extends PipelineableWriter {
public static final String WRITABLE = "WRITABLE";
public static final String RAW = "RAW";
public static final String ASCII_HEADER = "HEADER";
static enum DataFormat {Raw, Writable, Header};
static boolean USE_KEEPALIVE = true;
public static final int DEFAULT_PORT = 9094;
static int QUEUE_LENGTH = 1000;
static Logger log = Logger.getLogger(SocketTeeWriter.class);
volatile boolean running = true;
int timeout;
// private final ExecutorService pool;
/**
* Listens for incoming connections, spawns a Tee to deal with each.
*/
class SocketListenThread extends Thread {
ServerSocket s;
public SocketListenThread(Configuration conf) throws IOException {
int portno = conf.getInt("chukwa.tee.port", DEFAULT_PORT);
USE_KEEPALIVE = conf.getBoolean("chukwa.tee.keepalive", true);
s = new ServerSocket(portno);
setDaemon(true);
}
public void run() {
log.info("listen thread started");
try{
while(running) {
Socket sock = s.accept();
log.info("got connection from " + sock.getInetAddress());
new Tee(sock);
}
} catch(IOException e) {
log.debug(ExceptionUtil.getStackTrace(e));
}
}
public void shutdown() {
try{
//running was set to false by caller.
s.close(); //to break out of run loop
this.interrupt();
} catch(IOException e) {
log.debug(ExceptionUtil.getStackTrace(e));
}
}
}
/////////////////Internal class Tee//////////////////////
/**
* Manages a single socket connection
*/
class Tee implements Runnable {
Socket sock;
BufferedReader in;
DataOutputStream out;
Filter rules;
DataFormat fmt;
final BlockingQueue<Chunk> sendQ;
public Tee(Socket s) throws IOException {
sock = s;
//now initialize asynchronously
sendQ = new ArrayBlockingQueue<Chunk>(QUEUE_LENGTH);
Thread t = new Thread(this);
t.setDaemon(true);
t.start();
}
public void run() {
setup();
try {
while(sock.isConnected()) {
Chunk c = sendQ.take();
if(fmt == DataFormat.Raw) {
byte[] data = c.getData();
out.writeInt(data.length);
out.write(data);
} else if(fmt == DataFormat.Writable)
c.write(out);
else {
byte[] data = c.getData();
byte[] header = (c.getSource()+ " " + c.getDataType() + " " + c.getStreamName()+ " "+
c.getSeqID()+"\n").getBytes(Charset.forName("UTF-8"));
out.writeInt(data.length+ header.length);
out.write(header);
out.write(data);
}
}
out.flush();
} catch(IOException e) {
log.info("lost tee: "+ e.toString());
synchronized(tees) {
tees.remove(this);
}
} catch(InterruptedException e) {
//exit quietly
}
}
/**
* initializes the tee.
*/
public void setup() {
try { //outer try catches IOExceptions
try { //inner try catches bad command syntax errors
sock.setSoTimeout(timeout);
sock.setKeepAlive(USE_KEEPALIVE);
in = new BufferedReader(new InputStreamReader(sock.getInputStream(), Charset.forName("UTF-8")));
out = new DataOutputStream(sock.getOutputStream());
String cmd = in.readLine();
if(cmd==null) {
throw new IllegalArgumentException("No input found.");
}
if(!cmd.contains(" ")) {
throw new IllegalArgumentException(
"command should be keyword pattern, but no ' ' seen: " + cmd);
}
String uppercased = cmd.substring(0, cmd.indexOf(' ')).toUpperCase();
if(RAW.equals(uppercased))
fmt = DataFormat.Raw;
else if(WRITABLE.equals(uppercased))
fmt = DataFormat.Writable;
else if(ASCII_HEADER.equals(uppercased))
fmt = DataFormat.Header;
else {
throw new IllegalArgumentException("bad command '" + uppercased+
"' -- starts with neither '"+ RAW+ "' nor '"+ WRITABLE + " nor "
+ ASCII_HEADER+"':" + cmd);
}
String cmdAfterSpace = cmd.substring(cmd.indexOf(' ')+1);
if(cmdAfterSpace.toLowerCase().equals("all"))
rules = Filter.ALL;
else
try {
rules = new Filter(cmdAfterSpace);
} catch (CheckedPatternSyntaxException pse) {
out.write("Error parsing command as a regex: ".getBytes(Charset.forName("UTF-8")));
out.write(pse.getMessage().getBytes(Charset.forName("UTF-8")));
out.writeByte('\n');
out.close();
in.close();
sock.close();
log.warn(pse);
return;
}
//now that we read everything OK we can add ourselves to list, and return.
synchronized(tees) {
tees.add(this);
}
out.write("OK\n".getBytes(Charset.forName("UTF-8")));
log.info("tee to " + sock.getInetAddress() + " established");
} catch(IllegalArgumentException e) {
out.write(e.toString().getBytes(Charset.forName("UTF-8")));
out.writeByte('\n');
out.close();
in.close();
sock.close();
log.warn(e);
}//end inner catch
} catch(IOException e) { //end outer catch
log.warn(e);
}
}
public void close() {
try {
out.close();
in.close();
} catch(Exception e) {
log.debug(ExceptionUtil.getStackTrace(e));
}
}
public void handle(Chunk c) {
//don't ever block; just ignore this chunk if we don't have room for it.
if(rules.matches(c)) {
if(!sendQ.offer(c)) {
log.debug("Queue is full.");
}
}
}
}
/////////////////Main class SocketTeeWriter//////////////////////
SocketListenThread listenThread;
List<Tee> tees;
@Override
public void setNextStage(ChukwaWriter next) {
this.next = next;
}
@Override
public CommitStatus add(List<Chunk> chunks) throws WriterException {
CommitStatus rv = ChukwaWriter.COMMIT_OK;
if (next != null)
rv = next.add(chunks); //pass data through
synchronized(tees) {
Iterator<Tee> loop = tees.iterator();
while(loop.hasNext()) {
Tee t = loop.next();
for(Chunk c: chunks) {
t.handle(c);
}
}
}
return rv;
}
@Override
public void close() throws WriterException {
if (next != null)
next.close();
running = false;
listenThread.shutdown();
}
@Override
public void init(Configuration c) throws WriterException {
try {
listenThread = new SocketListenThread(c);
listenThread.start();
} catch (IOException e) {
throw new WriterException(e);
}
tees = new ArrayList<Tee>();
}
}
| 8,237 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/ExtractorWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer;
import java.util.List;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.collector.servlet.LogDisplayServlet;
import org.apache.hadoop.conf.Configuration;
public class ExtractorWriter extends PipelineableWriter {
private static LogDisplayServlet recipient;
@Override
public void close() throws WriterException {
next.close();
}
@Override
public void init(Configuration c) throws WriterException {
}
public CommitStatus add(List<Chunk> chunks) throws WriterException {
if(recipient != null)
recipient.add(chunks);
if (next != null)
return next.add(chunks); //pass data through
else
return ChukwaWriter.COMMIT_OK;
}
public static void setRecipient(LogDisplayServlet logDisplayServlet) {
recipient = logDisplayServlet;
}
}
| 8,238 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/Dedup.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer;
import java.util.ArrayDeque;
import java.util.HashSet;
import java.util.ArrayList;
import java.util.List;
import java.util.Queue;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.conf.Configuration;
/**
* Uses a fixed size cache to check for and filter out duplicate chunks.
* Duplicate detection uses chunk metadata, not content.
*
*/
public class Dedup extends PipelineableWriter {
static final class DedupKey {
String name;
long val; // sequence number
public DedupKey(String n, long p) {
name = n;
val = p;
}
public int hashCode() {
return (int) (name.hashCode() ^ val ^ (val >> 32));
}
public boolean equals(Object dk) {
if (dk instanceof DedupKey)
return name.equals(((DedupKey) dk).name) && val == ((DedupKey) dk).val;
else
return false;
}
}
static class FixedSizeCache<EntryType> {
final HashSet<EntryType> hs;
final Queue<EntryType> toDrop;
final int maxSize;
long dupchunks = 0;
public FixedSizeCache(int size) {
maxSize = size;
hs = new HashSet<EntryType>(maxSize);
toDrop = new ArrayDeque<EntryType>(maxSize);
}
public synchronized void add(EntryType t) {
if (maxSize == 0)
return;
if (hs.size() >= maxSize)
while (hs.size() >= maxSize) {
EntryType td = toDrop.remove();
hs.remove(td);
}
hs.add(t);
toDrop.add(t);
}
private synchronized boolean addAndCheck(EntryType t) {
if (maxSize == 0)
return false;
boolean b = hs.contains(t);
if (b)
dupchunks++;
else {
hs.add(t);
toDrop.add(t);
}
return b;
}
}
FixedSizeCache<DedupKey> cache;
@Override
public CommitStatus add(List<Chunk> chunks) throws WriterException {
ArrayList<Chunk> passedThrough = new ArrayList<Chunk>();
for (Chunk c : chunks)
if (!cache.addAndCheck(new DedupKey(c.getStreamName(), c.getSeqID())))
passedThrough.add(c);
if (!passedThrough.isEmpty())
return next.add(passedThrough);
else return null;
}
@Override
public void close() throws WriterException {
next.close();
}
@Override
public void init(Configuration c) throws WriterException {
int csize = c.getInt("chukwaCollector.chunkSuppressBufferSize", 0);
cache = new FixedSizeCache<DedupKey>(csize);
}
}
| 8,239 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/InMemoryWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer;
import java.io.*;
import java.util.List;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.conf.Configuration;
public class InMemoryWriter implements ChukwaWriter {
ByteArrayOutputStream buf;
public void close() {
buf.reset();
}
public void init(Configuration conf) throws WriterException {
buf = new ByteArrayOutputStream();
}
public void add(Chunk data) throws WriterException {
DataOutputStream dos = new DataOutputStream(buf);
try {
data.write(dos);
} catch (IOException e) {
e.printStackTrace();
throw new WriterException(e);
}
}
@Override
public CommitStatus add(List<Chunk> chunks) throws WriterException {
for (Chunk chunk : chunks) {
add(chunk);
}
return COMMIT_OK;
}
DataInputStream dis = null;
/**
* Try to read bytes, waiting up to ms
*
* @param bytes amount to try to read
* @param ms time to wait
* @return a newly read-in chunk
* @throws IOException if error reading data
*/
public Chunk readOutChunk(int bytes, int ms) throws IOException {
long readStartTime = System.currentTimeMillis();
try {
while (buf.size() < bytes) {
synchronized (this) {
long timeLeft = ms - System.currentTimeMillis() + readStartTime;
if (timeLeft > 0)
wait(timeLeft);
}
}
if (dis == null)
dis = new DataInputStream(new ByteArrayInputStream(buf.toByteArray()));
return ChunkImpl.read(dis);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return null;
}
}
}
| 8,240 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/PipelineableWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer;
import java.util.List;
import org.apache.hadoop.chukwa.Chunk;
public abstract class PipelineableWriter implements ChukwaWriter {
protected ChukwaWriter next;
public void setNextStage(ChukwaWriter next) {
this.next = next;
}
public CommitStatus add(List<Chunk> chunks) throws WriterException {
if (next != null)
return next.add(chunks); //pass data through
else
return ChukwaWriter.COMMIT_OK;
}
}
| 8,241 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/ConsoleWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer;
import java.nio.charset.Charset;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.conf.Configuration;
public class ConsoleWriter implements ChukwaWriter {
boolean printData;
volatile long dataSize = 0;
final Timer statTimer;
private class StatReportingTask extends TimerTask {
private long lastTs = System.currentTimeMillis();
private long lastDataSize = 0;
public void run() {
long time = System.currentTimeMillis();
long interval = time - lastTs;
lastTs = time;
long ds = dataSize;
long dataRate = 1000 * (ds - lastDataSize) / interval; // bytes/sec
// refers only to data field, not including http or chukwa headers
lastDataSize = ds;
System.out.println("stat=datacollection.writer.ConsoleWriter|dataRate="
+ dataRate);
}
};
public ConsoleWriter() {
this(true);
}
public ConsoleWriter(boolean printData) {
this.printData = printData;
statTimer = new Timer();
}
public void close() {
statTimer.cancel();
}
public void init(Configuration conf) throws WriterException {
System.out.println("---- DUMMY HDFS WRITER IN USE ---");
statTimer.schedule(new StatReportingTask(), 1000, 10 * 1000);
}
public void add(Chunk data) throws WriterException {
int startOffset = 0;
dataSize += data.getData().length;
if (printData) {
System.out.println(data.getData().length + " bytes of data in chunk");
for (int offset : data.getRecordOffsets()) {
System.out.print(data.getStreamName());
System.out.print(" ");
System.out.print(data.getSource());
System.out.print(" ");
System.out.print(data.getDataType());
System.out.print(") ");
System.out.print(new String(data.getData(), startOffset, offset
- startOffset + 1, Charset.forName("UTF-8")));
startOffset = offset + 1;
}
}
}
@Override
public CommitStatus add(List<Chunk> chunks) throws WriterException {
for (Chunk chunk : chunks) {
add(chunk);
}
return COMMIT_OK;
}
}
| 8,242 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/WriterException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer;
import java.io.IOException;
public class WriterException extends IOException {
/**
*
*/
private static final long serialVersionUID = -4207275200546397146L;
public WriterException() {
}
public WriterException(String message) {
super(message);
}
public WriterException(Throwable cause) {
super(cause);
}
public WriterException(String message, Throwable cause) {
super(message, cause);
}
}
| 8,243 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/ClientAck.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer;
import org.apache.log4j.Logger;
public class ClientAck {
static Logger log = Logger.getLogger(ClientAck.class);
// TODO move all constant to config
public static final int OK = 100;
public static final int KO = -100;
public static final int KO_LOCK = -200;
private long ts = 0;
private Object lock = new Object();
private int status = 0;
private Throwable exception = null;
private int waitTime = 6 * 1000;// 6 secs
private int timeOut = 15 * 1000;
public ClientAck() {
this.ts = System.currentTimeMillis() + timeOut;
}
public int getTimeOut() {
return timeOut;
}
public void wait4Ack() {
synchronized (lock) {
// log.info(">>>>>>>>>>>>>>>>>>>>>>>>> Client synch");
while (this.status == 0) {
// log.info(">>>>>>>>>>>>>>>>>>>>>>>>> Client Before wait");
try {
lock.wait(waitTime);
} catch (InterruptedException e) {
}
long now = System.currentTimeMillis();
if (now > ts) {
this.status = KO_LOCK;
this.exception = new RuntimeException("More than maximum time lock ["
+ this.toString() + "]");
}
}
// log.info("[" + Thread.currentThread().getName() +
// "] >>>>>>>>>>>>>>>>> Client after wait status [" + status + "] [" +
// this.toString() + "]");
}
}
public void releaseLock(int status, Throwable exception) {
this.exception = exception;
this.status = status;
// log.info("[" + Thread.currentThread().getName() +
// "] <<<<<<<<<<<<<<<<< Server synch [" + status + "] ----->>>> [" +
// this.toString() + "]");
synchronized (lock) {
// log.info("<<<<<<<<<<<<<<< Server before notify");
lock.notifyAll();
}
// log.info("<<<<<<<<<<<<<<< Server after notify");
}
public int getStatus() {
return status;
}
public void setStatus(int status) {
this.status = status;
}
public Throwable getException() {
return exception;
}
public void setException(Throwable exception) {
this.exception = exception;
}
}
| 8,244 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/NullWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer;
import java.util.List;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
/**
* Minimal writer; does nothing with data.
*
* Useful primarily as an end-of-pipeline stage, if stuff in the middle
* is accomplishing something useful.
*
*/
public class NullWriter implements ChukwaWriter {
private static final Logger log = Logger.getLogger(NullWriter.class);
//in kb per sec
int maxDataRate = Integer.MAX_VALUE;
public static final String RATE_OPT_NAME = "nullWriter.dataRate";
@Override
public CommitStatus add(List<Chunk> chunks) throws WriterException {
try {
int dataBytes =0;
for(Chunk c: chunks)
dataBytes +=c.getData().length;
if(maxDataRate > 0)
Thread.sleep(dataBytes / maxDataRate);
} catch(Exception e) {
log.debug(ExceptionUtil.getStackTrace(e));
}
return COMMIT_OK;
}
@Override
public void close() throws WriterException {
}
@Override
public void init(Configuration c) throws WriterException {
maxDataRate = c.getInt(RATE_OPT_NAME, 0);
}
}
| 8,245 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/PipelineStageWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.log4j.Logger;
/**
* A pipeline of Pipelineable writers
* Controlled by option 'chukwaCollector.pipeline', which should be a comma-
* separated list of classnames.
*
*/
public class PipelineStageWriter implements ChukwaWriter {
Logger log = Logger.getLogger(PipelineStageWriter.class);
ChukwaWriter writer; // head of pipeline
public PipelineStageWriter() throws WriterException {
Configuration conf = new ChukwaConfiguration();
init(conf);
}
public PipelineStageWriter(Configuration conf) throws WriterException {
init(conf);
}
@Override
public CommitStatus add(List<Chunk> chunks) throws WriterException {
return writer.add(chunks);
}
@Override
public void close() throws WriterException {
writer.close();
}
@Override
public void init(Configuration conf) throws WriterException {
if (conf.get("chukwa.pipeline") != null) {
String pipeline = conf.get("chukwa.pipeline");
try {
String[] classes = pipeline.split(",");
log.info("using pipelined writers, pipe length is " + classes.length);
PipelineableWriter lastWriter = null;
if (classes.length > 1) {
lastWriter = (PipelineableWriter) conf.getClassByName(classes[0])
.newInstance();
lastWriter.init(conf);
writer = lastWriter;
}
for (int i = 1; i < classes.length - 1; ++i) {
Class<?> stageClass = conf.getClassByName(classes[i]);
Object st = stageClass.newInstance();
if (!(st instanceof PipelineableWriter))
log.error("class " + classes[i]
+ " in processing pipeline isn't a PipelineableWriter.");
PipelineableWriter stage = (PipelineableWriter) stageClass
.newInstance();
stage.init(conf);
// throws exception if types don't match or class not found; this is
// OK.
lastWriter.setNextStage(stage);
lastWriter = stage;
}
// if authentication type is kerberos; login using the specified kerberos principal and keytab file
for(int i=0; i<classes.length; i++) {
if(classes[i].contains("HBaseWriter")) {
try {
loginToKerberos (conf);
} catch(IOException e) {
throw new WriterException("Unable to login to Kerberos.");
}
}
}
Class<?> stageClass = conf.getClassByName(classes[classes.length - 1]);
Object st = stageClass.newInstance();
if (!(st instanceof ChukwaWriter)) {
log.error("class " + classes[classes.length - 1]
+ " at end of processing pipeline isn't a ChukwaWriter");
throw new WriterException("bad pipeline");
} else {
((ChukwaWriter)st).init(conf);
if (lastWriter != null)
lastWriter.setNextStage((ChukwaWriter) st);
else
writer = (ChukwaWriter) st; // one stage pipeline
}
return;
} catch (WriterException |
ClassNotFoundException |
IllegalAccessException |
InstantiationException e) {
// if anything went wrong (missing class, etc) we wind up here.
log.error("failed to set up pipeline, defaulting to SeqFileWriter", e);
// fall through to default case
throw new WriterException("bad pipeline");
}
} else {
throw new WriterException("must set chukwa.pipeline");
}
}
/**
* If authentication type is "kerberos", this method authenticates the Chukwa agent with Kerberized HBase, using the
* Kerberos principal and keytab file specified in chukwa-agent-conf.xml config file.<br>
* Does nothing for other authentication type.
*
* @throws IOException in event of login failure
*/
private static void loginToKerberos (Configuration config) throws IOException {
String agentAuthType = config.get ("chukwaAgent.hadoop.authentication.type");
if (null != agentAuthType && "kerberos".equalsIgnoreCase (agentAuthType)) {
SecurityUtil.login (config, "chukwaAgent.hadoop.authentication.kerberos.keytab",
"chukwaAgent.hadoop.authentication.kerberos.principal");
}
}
}
| 8,246 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/HttpWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.Socket;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
public class HttpWriter extends PipelineableWriter{
String _host;
int _port;
private Set<String> _whiteListSet = new HashSet<String>();
private final Logger log = Logger.getLogger(HttpWriter.class);
@Override
public void init(Configuration c) throws WriterException {
_host = c.get("chukwa.http.writer.host", "localhost");
String port = c.get("chukwa.http.writer.port", "8802");
String whiteListProp = c.get("chukwa.http.writer.whitelist", "STATUS");
String[] whiteList = whiteListProp.split(",");
for(String adaptor:whiteList){
_whiteListSet.add(adaptor.trim());
}
try{
_port = Integer.parseInt(port);
} catch(NumberFormatException e){
throw new WriterException(e);
}
}
@Override
public void close() throws WriterException {
}
@Override
public CommitStatus add(List<Chunk> chunks) throws WriterException {
CommitStatus rv = ChukwaWriter.COMMIT_OK;
DataOutputStream dos = null;
Socket sock = null;
try{
sock = new Socket(_host, _port);
dos = new DataOutputStream(sock.getOutputStream());
for(Chunk chunk:chunks){
if(!_whiteListSet.contains(chunk.getStreamName())){
continue;
}
dos.writeInt(chunk.getData().length);
dos.write(chunk.getData());
dos.flush();
}
log.info("Written chunks");
} catch(Exception e){
throw new WriterException(e);
} finally {
if(dos != null){
try {
dos.close();
} catch(IOException e) {
log.error("Error closing dataoutput stream:" + e);
}
}
if (sock != null) {
try {
sock.close();
} catch (IOException e) {
log.error("Error closing socket: " + e);
}
}
if (next != null) {
rv = next.add(chunks); //pass data through
}
}
return rv;
}
}
| 8,247 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/ChukwaWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer;
import java.util.List;
import java.util.ArrayList;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.conf.Configuration;
public interface ChukwaWriter {
public static abstract class CommitStatus {}
public static final CommitStatus COMMIT_OK = new CommitStatus() {};
public static final CommitStatus COMMIT_FAIL = new CommitStatus() {};
/**
* COMMIT_PENDING should be returned if a writer has written data, but
* this data may ultimately disappear. Contains a list of strings, format
* unspecified, that agents can use to find out, eventually, if their data
* has committed. String <n> corresponds to the nth chunk passed to add().
*
* At present, the format is <sinkfilename> <offset>,
* where sinkfilename is the name of a sinkfile, without directory but with
* .done suffix, and offset is the last byte of the associated chunk.
*/
public static class COMMIT_PENDING extends CommitStatus {
public List<String> pendingEntries;
public COMMIT_PENDING(int entries) {
pendingEntries = new ArrayList<String>(entries);
}
public void addPend(String currentFileName, long dataSize) {
pendingEntries.add(currentFileName+ " " + dataSize+"\n");
}
}
/**
* Called once to initialize this writer.
*
* @param c is Chukwa configuration
* @throws WriterException if error writing data
*/
public void init(Configuration c) throws WriterException;
/**
* Called repeatedly with data that should be serialized.
*
* Subclasses may assume that init() will be called before any calls to
* add(), and that add() won't be called after close().
*
* @param chunks is a list of data to send
* @return CommitStatus
* @throws WriterException if error writing data
*/
public CommitStatus add(List<Chunk> chunks) throws WriterException;
/**
* Called once, indicating that the writer should close files and prepare
* to exit.
* @throws WriterException if error writing data
*/
public void close() throws WriterException;
}
| 8,248 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/localfs/LocalToRemoteHdfsMover.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer.localfs;
import java.io.FileNotFoundException;
import java.net.URI;
import java.util.concurrent.BlockingQueue;
import org.apache.hadoop.chukwa.datacollection.writer.WriterException;
import org.apache.hadoop.chukwa.util.CopySequenceFile;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.Logger;
/**
* This class is used by LocalWriter.java.
*
* The only role of this class is to move dataSink files
* from the local file system to the remote HDFS.
*
* Those 2 classes are using a blockingQueue to exchange
* information.
*
* This class will also take care of moving all existing
* done dataSink files (.done) and any dataSink file that
* has not been changed for at least (rotatePeriod+2min).
*
*/
public class LocalToRemoteHdfsMover extends Thread {
static Logger log = Logger.getLogger(LocalToRemoteHdfsMover.class);
private FileSystem remoteFs = null;
private FileSystem localFs = null;
private Configuration conf = null;
private String fsname = null;
private String localOutputDir = null;
private String remoteOutputDir = null;
private boolean exitIfHDFSNotavailable = false;
private BlockingQueue<String> fileQueue = null;
private volatile boolean isRunning = true;
public LocalToRemoteHdfsMover(BlockingQueue<String> fileQueue ,Configuration conf) {
this.fileQueue = fileQueue;
this.conf = conf;
this.setDaemon(true);
this.setName("LocalToRemoteHdfsMover");
this.start();
}
protected void init() throws Throwable {
// check if they've told us the file system to use
fsname = conf.get("writer.hdfs.filesystem");
if (fsname == null || fsname.equals("")) {
// otherwise try to get the filesystem from hadoop
fsname = conf.get("fs.defaultFS");
}
if (fsname == null) {
log.error("no filesystem name");
throw new RuntimeException("no filesystem");
}
log.info("remote fs name is " + fsname);
exitIfHDFSNotavailable = conf.getBoolean(
"localToRemoteHdfsMover.exitIfHDFSNotavailable", false);
remoteFs = FileSystem.get(new URI(fsname), conf);
if (remoteFs == null && exitIfHDFSNotavailable) {
log.error("can't connect to HDFS.");
throw new WriterException("can't connect to HDFS.");
}
localFs = FileSystem.getLocal(conf);
remoteOutputDir = conf.get("chukwaCollector.outputDir", "/chukwa/logs/");
if (!remoteOutputDir.endsWith("/")) {
remoteOutputDir += "/";
}
localOutputDir = conf.get("chukwaCollector.localOutputDir",
"/chukwa/datasink/");
if (!localOutputDir.endsWith("/")) {
localOutputDir += "/";
}
}
protected void moveFile(String filePath) throws Exception{
String remoteFilePath = filePath.substring(filePath.lastIndexOf("/")+1,filePath.lastIndexOf("."));
remoteFilePath = remoteOutputDir + remoteFilePath;
try {
Path pLocalPath = new Path(filePath);
Path pRemoteFilePath = new Path(remoteFilePath + ".chukwa");
remoteFs.copyFromLocalFile(false, true, pLocalPath, pRemoteFilePath);
Path pFinalRemoteFilePath = new Path(remoteFilePath + ".done");
if ( remoteFs.rename(pRemoteFilePath, pFinalRemoteFilePath)) {
localFs.delete(pLocalPath,false);
log.info("move done deleting from local: " + pLocalPath);
} else {
throw new RuntimeException("Cannot rename remote file, " + pRemoteFilePath + " to " + pFinalRemoteFilePath);
}
}catch(FileNotFoundException ex) {
log.debug("File not found: " + remoteFilePath);
//do nothing since if the file is no longer there it's
// because it has already been moved over by the cleanup task.
}
catch (Exception e) {
log.warn("Cannot copy to the remote HDFS",e);
throw e;
}
}
protected void cleanup() throws Exception{
try {
int rotateInterval = conf.getInt("chukwaCollector.rotateInterval",
1000 * 60 * 5);// defaults to 5 minutes
Path pLocalOutputDir = new Path(localOutputDir);
FileStatus[] files = localFs.listStatus(pLocalOutputDir);
String fileName = null;
for (FileStatus file: files) {
fileName = file.getPath().getName();
if (fileName.endsWith(".recover")) {
//.recover files indicate a previously failed copying attempt of .chukwa files
Path recoverPath= new Path(localOutputDir+fileName);
localFs.delete(recoverPath, false);
log.info("Deleted .recover file, " + localOutputDir + fileName);
} else if (fileName.endsWith(".recoverDone")) {
//.recoverDone files are valid sink files that have not been renamed to .done
// First, check if there are still any .chukwa files with the same name
String chukwaFileName= fileName.replace(".recoverDone", ".chukwa");
Boolean fileNotFound=true;
int i=0;
while (i<files.length && fileNotFound) {
String currentFileName = files[i].getPath().getName();
if (currentFileName.equals(chukwaFileName)){
//Remove the .chukwa file found as it has already been recovered
fileNotFound = false;
Path chukwaFilePath = new Path(localOutputDir+chukwaFileName);
localFs.delete(chukwaFilePath,false);
log.info(".recoverDone file exists, deleted duplicate .chukwa file, "
+ localOutputDir + fileName);
}
i++;
}
//Finally, rename .recoverDone file to .done
String doneFileName= fileName.replace(".recoverDone", ".done");
Path donePath= new Path(localOutputDir+doneFileName);
Path recoverDonePath= new Path(localOutputDir+fileName);
localFs.rename(recoverDonePath, donePath);
log.info("Renamed .recoverDone file to .done, "+ localOutputDir + fileName);
} else if (fileName.endsWith(".done")) {
moveFile(localOutputDir + fileName);
} else if (fileName.endsWith(".chukwa")) {
long lastPeriod = System.currentTimeMillis() - rotateInterval - (2*60*1000);
if (file.getModificationTime() < lastPeriod) {
//. chukwa file has not modified for some time, may indicate collector had previously crashed
log.info("Copying .chukwa file to valid sink file before moving, " + localOutputDir + fileName);
CopySequenceFile.createValidSequenceFile(conf,localOutputDir,fileName,localFs);
}
}
}
} catch (Exception e) {
log.warn("Cannot copy to the remote HDFS",e);
throw e;
}
}
@Override
public void run() {
boolean inError = true;
String filePath = null;
while (isRunning) {
try {
if (inError) {
init();
cleanup();
inError = false;
}
filePath = fileQueue.take();
if (filePath == null) {
continue;
}
moveFile(filePath);
cleanup();
filePath = null;
} catch (Throwable e) {
log.warn("Error in LocalToHdfsMover", e);
inError = true;
try {
log.info("Got an exception going to sleep for 60 secs");
Thread.sleep(60000);
} catch (Throwable e2) {
log.warn("Exception while sleeping", e2);
}
}
}
log.info(Thread.currentThread().getName() + " is exiting.");
}
public void shutdown() {
this.isRunning = false;
}
}
| 8,249 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/localfs/LocalWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer.localfs;
import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.util.Calendar;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.writer.ChukwaWriter;
import org.apache.hadoop.chukwa.datacollection.writer.WriterException;
import org.apache.hadoop.chukwa.datacollection.writer.parquet.ChukwaAvroSchema;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.Logger;
import org.apache.parquet.avro.AvroParquetWriter;
import org.apache.parquet.hadoop.metadata.CompressionCodecName;
/**
* <p>This class <b>is</b> thread-safe -- rotate() and save() both synchronize on
* lock object.
* </p>
* <p>
* Write data to a local fileSystem then move it to the remote HDFS
* <br>
* Warning:
* <br>
* There's no lock/waiting time for the remote client.
* The connection is released as soon as the last append is done,
* so therefore there is no guarantee that this class will not loose
* any data.
* <br>
* This class has been designed this way for performance reason.
* </p>
* <p>
* In order to use this class, you need to define some parameters,
* in chukwa-collector-conf.xml
* <p>
* <br>
* <property><br>
* <name>chukwaCollector.localOutputDir</name><br>
* <value>/grid/0/gs/chukwa/chukwa-0.1.2/dataSink/</value><br>
* <description>Chukwa data sink directory</description><br>
* </property><br>
*<br>
* <property><br>
* <name>chukwaCollector.writerClass</name><br>
* <value>org.apache.hadoop.chukwa.datacollection.writer.localfs.LocalWriter</value><br>
* <description>Local chukwa writer</description><br>
* </property><br>
* <br>
*/
public class LocalWriter implements ChukwaWriter {
static Logger log = Logger.getLogger(LocalWriter.class);
static final int STAT_INTERVAL_SECONDS = 30;
static String localHostAddr = null;
private int blockSize = 128 * 1024 * 1024;
private int pageSize = 1 * 1024 * 1024;
private final Object lock = new Object();
private BlockingQueue<String> fileQueue = null;
@SuppressWarnings("unused")
private LocalToRemoteHdfsMover localToRemoteHdfsMover = null;
private FileSystem fs = null;
private Configuration conf = null;
private String localOutputDir = null;
private Calendar calendar = Calendar.getInstance();
private Path currentPath = null;
private String currentFileName = null;
private AvroParquetWriter<GenericRecord> parquetWriter = null;
private int rotateInterval = 1000 * 60;
private volatile long dataSize = 0;
private volatile boolean isRunning = false;
private Timer rotateTimer = null;
private Timer statTimer = null;
private Schema avroSchema = null;
private int initWriteChunkRetries = 10;
private int writeChunkRetries = initWriteChunkRetries;
private boolean chunksWrittenThisRotate = false;
private long timePeriod = -1;
private long nextTimePeriodComputation = -1;
private int minPercentFreeDisk = 20;
static {
try {
localHostAddr = "_" + InetAddress.getLocalHost().getHostName() + "_";
} catch (UnknownHostException e) {
localHostAddr = "-NA-";
}
}
public LocalWriter(Configuration conf) throws WriterException {
setup(conf);
}
public void init(Configuration conf) throws WriterException {
}
public void setup(Configuration conf) throws WriterException {
this.conf = conf;
// load Chukwa Avro schema
avroSchema = ChukwaAvroSchema.getSchema();
try {
fs = FileSystem.getLocal(conf);
localOutputDir = conf.get("chukwaCollector.localOutputDir",
"/chukwa/datasink/");
if (!localOutputDir.endsWith("/")) {
localOutputDir += "/";
}
Path pLocalOutputDir = new Path(localOutputDir);
if (!fs.exists(pLocalOutputDir)) {
boolean exist = fs.mkdirs(pLocalOutputDir);
if (!exist) {
throw new WriterException("Cannot create local dataSink dir: "
+ localOutputDir);
}
} else {
FileStatus fsLocalOutputDir = fs.getFileStatus(pLocalOutputDir);
if (!fsLocalOutputDir.isDir()) {
throw new WriterException("local dataSink dir is not a directory: "
+ localOutputDir);
}
}
} catch (Throwable e) {
log.fatal("Cannot initialize LocalWriter", e);
throw new WriterException(e);
}
minPercentFreeDisk = conf.getInt("chukwaCollector.minPercentFreeDisk",20);
rotateInterval = conf.getInt("chukwaCollector.rotateInterval",
1000 * 60 * 5);// defaults to 5 minutes
initWriteChunkRetries = conf
.getInt("chukwaCollector.writeChunkRetries", 10);
writeChunkRetries = initWriteChunkRetries;
log.info("rotateInterval is " + rotateInterval);
log.info("outputDir is " + localOutputDir);
log.info("localFileSystem is " + fs.getUri().toString());
log.info("minPercentFreeDisk is " + minPercentFreeDisk);
if(rotateTimer==null) {
rotateTimer = new Timer();
rotateTimer.schedule(new RotateTask(), 0,
rotateInterval);
}
if(statTimer==null) {
statTimer = new Timer();
statTimer.schedule(new StatReportingTask(), 0,
STAT_INTERVAL_SECONDS * 1000);
}
fileQueue = new LinkedBlockingQueue<String>();
localToRemoteHdfsMover = new LocalToRemoteHdfsMover(fileQueue, conf);
}
private class RotateTask extends TimerTask {
public void run() {
try {
rotate();
} catch(WriterException e) {
log.error(ExceptionUtil.getStackTrace(e));
}
};
}
private class StatReportingTask extends TimerTask {
private long lastTs = System.currentTimeMillis();
public void run() {
long time = System.currentTimeMillis();
long currentDs = dataSize;
dataSize = 0;
long interval = time - lastTs;
lastTs = time;
if(interval <= 0) {
interval = 1;
}
long dataRate = 1000 * currentDs / interval; // kb/sec
log.info("stat:datacollection.writer.local.LocalWriter dataSize="
+ currentDs + " dataRate=" + dataRate);
}
};
protected void computeTimePeriod() {
synchronized (calendar) {
calendar.setTimeInMillis(System.currentTimeMillis());
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
timePeriod = calendar.getTimeInMillis();
calendar.add(Calendar.HOUR, 1);
nextTimePeriodComputation = calendar.getTimeInMillis();
}
}
/**
* Best effort, there's no guarantee that chunks
* have really been written to disk
*/
public CommitStatus add(List<Chunk> chunks) throws WriterException {
if (!isRunning) {
throw new WriterException("Writer not yet ready");
}
long now = System.currentTimeMillis();
if (chunks != null) {
try {
chunksWrittenThisRotate = true;
ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
synchronized (lock) {
if (System.currentTimeMillis() >= nextTimePeriodComputation) {
computeTimePeriod();
}
for (Chunk chunk : chunks) {
archiveKey.setTimePartition(timePeriod);
archiveKey.setDataType(chunk.getDataType());
archiveKey.setStreamName(chunk.getTags() + "/" + chunk.getSource()
+ "/" + chunk.getStreamName());
archiveKey.setSeqId(chunk.getSeqID());
GenericRecord record = new GenericData.Record(avroSchema);
record.put("dataType", chunk.getDataType());
record.put("data", ByteBuffer.wrap(chunk.getData()));
record.put("tags", chunk.getTags());
record.put("seqId", chunk.getSeqID());
record.put("source", chunk.getSource());
record.put("stream", chunk.getStreamName());
parquetWriter.write(record);
// compute size for stats
dataSize += chunk.getData().length;
}
}// End synchro
long end = System.currentTimeMillis();
if (log.isDebugEnabled()) {
log.debug("duration=" + (end-now) + " size=" + chunks.size());
}
} catch (IOException e) {
writeChunkRetries--;
log.error("Could not save the chunk. ", e);
if (writeChunkRetries < 0) {
log
.fatal("Too many IOException when trying to write a chunk, Collector is going to exit!");
}
throw new WriterException(e);
}
}
return COMMIT_OK;
}
protected String getNewFileName() {
calendar.setTimeInMillis(System.currentTimeMillis());
String newName = new java.text.SimpleDateFormat("yyyyddHHmmssSSS")
.format(calendar.getTime());
newName += localHostAddr + new java.rmi.server.UID().toString();
newName = newName.replace("-", "");
newName = newName.replace(":", "");
newName = newName.replace(".", "");
newName = localOutputDir + "/" + newName.trim();
return newName;
}
protected void rotate() throws WriterException {
isRunning = true;
log.info("start Date [" + calendar.getTime() + "]");
log.info("Rotate from " + Thread.currentThread().getName());
String newName = getNewFileName();
synchronized (lock) {
try {
if (currentPath != null) {
Path previousPath = currentPath;
if (chunksWrittenThisRotate) {
String previousFileName = previousPath.getName().replace(".chukwa", ".done");
if(fs.exists(previousPath)) {
fs.rename(previousPath, new Path(previousFileName + ".done"));
}
fileQueue.add(previousFileName + ".done");
} else {
log.info("no chunks written to " + previousPath + ", deleting");
fs.delete(previousPath, false);
}
}
Path newOutputPath = new Path(newName + ".chukwa");
while(fs.exists(newOutputPath)) {
newName = getNewFileName();
newOutputPath = new Path(newName + ".chukwa");
}
currentPath = newOutputPath;
currentFileName = newName;
chunksWrittenThisRotate = false;
parquetWriter = new AvroParquetWriter<GenericRecord>(newOutputPath, avroSchema, CompressionCodecName.SNAPPY, blockSize, pageSize);
} catch (IOException e) {
log.fatal("IO Exception in rotate: ", e);
}
}
// Check for disk space
File directory4Space = new File(localOutputDir);
long totalSpace = directory4Space.getTotalSpace();
long freeSpace = directory4Space.getFreeSpace();
long minFreeAvailable = (totalSpace * minPercentFreeDisk) /100;
if (log.isDebugEnabled()) {
log.debug("Directory: " + localOutputDir + ", totalSpace: " + totalSpace
+ ", freeSpace: " + freeSpace + ", minFreeAvailable: " + minFreeAvailable
+ ", percentFreeDisk: " + minPercentFreeDisk);
}
if (freeSpace < minFreeAvailable) {
log.fatal("No space left on device.");
throw new WriterException("No space left on device.");
}
log.debug("finished rotate()");
}
public void close() {
synchronized (lock) {
if (rotateTimer != null) {
rotateTimer.cancel();
}
if (statTimer != null) {
statTimer.cancel();
}
try {
if (parquetWriter != null) {
parquetWriter.close();
}
if (localToRemoteHdfsMover != null) {
localToRemoteHdfsMover.shutdown();
}
fs.rename(currentPath, new Path(currentFileName + ".done"));
} catch (IOException e) {
log.error("failed to close and rename stream", e);
}
}
}
}
| 8,250 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/solr/SolrWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer.solr;
import java.nio.charset.Charset;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.apache.hadoop.chukwa.datacollection.writer.ChukwaWriter;
import org.apache.hadoop.chukwa.datacollection.writer.PipelineableWriter;
import org.apache.hadoop.chukwa.datacollection.writer.WriterException;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.common.SolrInputDocument;
public class SolrWriter extends PipelineableWriter {
private static Logger log = Logger.getLogger(SolrWriter.class);
private static CloudSolrClient client;
private final static String ID = "id";
private final static String SEQ_ID = "seqId";
private final static String DATA_TYPE = "type";
private final static String STREAM_NAME = "stream";
private final static String TAGS = "tags";
private final static String SOURCE = "source";
private final static String DATA = "data";
private final static String USER = "user";
private final static String SERVICE = "service";
private final static String DATE = "date";
private final static Pattern userPattern = Pattern.compile("user=(.+?)[, ]");
private SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
public SolrWriter() throws WriterException {
init(ChukwaAgent.getStaticConfiguration());
}
@Override
public void init(Configuration c) throws WriterException {
String serverName = c.get("solr.cloud.address");
if (serverName == null) {
throw new WriterException("Solr server address is not defined.");
}
String collection = c.get("solr.collection", "logs");
if(client == null) {
client = new CloudSolrClient(serverName);
client.setDefaultCollection(collection);
}
}
@Override
public void close() throws WriterException {
}
@Override
public CommitStatus add(List<Chunk> chunks) throws WriterException {
if(client == null) {
init(ChukwaAgent.getStaticConfiguration());
}
CommitStatus rv = ChukwaWriter.COMMIT_OK;
for(Chunk chunk : chunks) {
try {
SolrInputDocument doc = new SolrInputDocument();
doc.addField(ID, chunk.getSource() + "_" + chunk.getSeqID());
doc.addField(TAGS, chunk.getTags());
doc.addField(STREAM_NAME, chunk.getStreamName());
doc.addField(SOURCE, chunk.getSource());
doc.addField(SEQ_ID, chunk.getSeqID());
doc.addField(DATA_TYPE, chunk.getDataType());
doc.addField(DATA, new String(chunk.getData(), Charset.forName("UTF-8")));
// TODO: improve parsing logic for more sophisticated tagging
String data = new String(chunk.getData(), Charset.forName("UTF-8"));
Matcher m = userPattern.matcher(data);
if(m.find()) {
doc.addField(USER, m.group(1));
} else {
doc.addField(USER, "Unclassified");
}
if(data.contains("hdfs")) {
doc.addField(SERVICE, "hdfs");
} else if(data.contains("yarn")) {
doc.addField(SERVICE, "yarn");
} else if(data.contains("mapredice")) {
doc.addField(SERVICE, "mapreduce");
} else if(data.contains("hbase")) {
doc.addField(SERVICE, "hbase");
} else {
doc.addField(SERVICE, "Unclassified");
}
try {
Date d = sdf.parse(data);
doc.addField(DATE, d, 1.0f);
} catch(ParseException e) {
}
client.add(doc);
} catch (Exception e) {
log.warn("Failed to store data to Solr Cloud.");
log.warn(ExceptionUtil.getStackTrace(e));
}
}
try {
if(client != null) {
client.commit();
}
} catch (Exception e) {
log.warn("Failed to store data to Solr Cloud.");
log.warn(ExceptionUtil.getStackTrace(e));
}
if (next != null) {
rv = next.add(chunks); //pass data through
}
return rv;
}
}
| 8,251 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/gora/ChukwaChunk.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Autogenerated by Avro
*
* DO NOT EDIT DIRECTLY
*/
package org.apache.hadoop.chukwa.datacollection.writer.gora;
/**
* Chukwa Adaptors emit data in Chunks. A Chunk is a sequence of bytes, with
* some metadata. Several of these are set automatically by the Agent or
* Adaptors. Two of them require user intervention: cluster name and datatype.
* Cluster name is specified in conf/chukwa-agent-conf.xml, and is global to
* each Agent process. Datatype describes the expected format of the data
* collected by an Adaptor instance, and it is specified when that instance is
* started.
*/
@SuppressWarnings("all")
public class ChukwaChunk extends org.apache.gora.persistency.impl.PersistentBase
implements org.apache.avro.specific.SpecificRecord,
org.apache.gora.persistency.Persistent {
public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser()
.parse(
"{\"type\":\"record\",\"name\":\"ChukwaChunk\",\"namespace\":\"org.apache.hadoop.chukwa.datacollection.writer.gora\",\"doc\":\"Chukwa Adaptors emit data in Chunks. A Chunk is a sequence of bytes, with some metadata. Several of these are set automatically by the Agent or Adaptors. Two of them require user intervention: cluster name and datatype. Cluster name is specified in conf/chukwa-agent-conf.xml, and is global to each Agent process. Datatype describes the expected format of the data collected by an Adaptor instance, and it is specified when that instance is started. \",\"fields\":[{\"name\":\"source\",\"type\":[\"null\",\"string\"],\"default\":null},{\"name\":\"tags\",\"type\":[\"null\",\"string\"],\"default\":null},{\"name\":\"datatype\",\"type\":[\"null\",\"string\"],\"default\":null},{\"name\":\"sequenceID\",\"type\":[\"null\",\"long\"],\"default\":null},{\"name\":\"name\",\"type\":[\"null\",\"string\"],\"default\":null},{\"name\":\"data\",\"type\":[\"null\",\"bytes\"],\"default\":null}]}");
/** Enum containing all data bean's fields. */
public static enum Field {
SOURCE(0, "source"), TAGS(1, "tags"), DATATYPE(2, "datatype"), SEQUENCE_ID(
3, "sequenceID"), NAME(4, "name"), DATA(5, "data"),;
/**
* Field's index.
*/
private int index;
/**
* Field's name.
*/
private String name;
/**
* Field's constructor
*
* @param index
* field's index.
* @param name
* field's name.
*/
Field(int index, String name) {
this.index = index;
this.name = name;
}
/**
* Gets field's index.
*
* @return int field's index.
*/
public int getIndex() {
return index;
}
/**
* Gets field's name.
*
* @return String field's name.
*/
public String getName() {
return name;
}
/**
* Gets field's attributes to string.
*
* @return String field's attributes to string.
*/
public String toString() {
return name;
}
};
public static final String[] _ALL_FIELDS = { "source", "tags", "datatype",
"sequenceID", "name", "data", };
/**
* Gets the total field count.
*
* @return int field count
*/
public int getFieldsCount() {
return ChukwaChunk._ALL_FIELDS.length;
}
private java.lang.CharSequence source;
private java.lang.CharSequence tags;
private java.lang.CharSequence datatype;
private java.lang.Long sequenceID;
private java.lang.CharSequence name;
private java.nio.ByteBuffer data;
public org.apache.avro.Schema getSchema() {
return SCHEMA$;
}
// Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
case 0:
return source;
case 1:
return tags;
case 2:
return datatype;
case 3:
return sequenceID;
case 4:
return name;
case 5:
return data;
default:
throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
// Used by DatumReader. Applications should not call.
@SuppressWarnings(value = "unchecked")
public void put(int field$, java.lang.Object value) {
switch (field$) {
case 0:
source = (java.lang.CharSequence) (value);
break;
case 1:
tags = (java.lang.CharSequence) (value);
break;
case 2:
datatype = (java.lang.CharSequence) (value);
break;
case 3:
sequenceID = (java.lang.Long) (value);
break;
case 4:
name = (java.lang.CharSequence) (value);
break;
case 5:
data = (java.nio.ByteBuffer) (value);
break;
default:
throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
/**
* Gets the value of the 'source' field.
*
* @return source
*/
public java.lang.CharSequence getSource() {
return source;
}
/**
* Sets the value of the 'source' field.
*
* @param value is the value to set.
*/
public void setSource(java.lang.CharSequence value) {
this.source = value;
setDirty(0);
}
/**
* Checks the dirty status of the 'source' field. A field is dirty if it
* represents a change that has not yet been written to the database.
*
* @return true if a sourcefield has not been written to database
*/
public boolean isSourceDirty() {
return isDirty(0);
}
/**
* Gets the value of the 'tags' field.
*
* @return value of tags field
*/
public java.lang.CharSequence getTags() {
return tags;
}
/**
* Sets the value of the 'tags' field.
*
* @param value is the value to set.
*/
public void setTags(java.lang.CharSequence value) {
this.tags = value;
setDirty(1);
}
/**
* Checks the dirty status of the 'tags' field. A field is dirty if it
* represents a change that has not yet been written to the database.
*
* @return true if tags field has not been written to database
*/
public boolean isTagsDirty() {
return isDirty(1);
}
/**
* Gets the value of the 'datatype' field.
*
* @return datatype field
*/
public java.lang.CharSequence getDatatype() {
return datatype;
}
/**
* Sets the value of the 'datatype' field.
*
* @param value is the value to set.
*/
public void setDatatype(java.lang.CharSequence value) {
this.datatype = value;
setDirty(2);
}
/**
* Checks the dirty status of the 'datatype' field. A field is dirty if it
* represents a change that has not yet been written to the database.
*
* @return true if datatype field has not been written to database
*/
public boolean isDatatypeDirty() {
return isDirty(2);
}
/**
* Gets the value of the 'sequenceID' field.
*
* @return sequenceID
*/
public java.lang.Long getSequenceID() {
return sequenceID;
}
/**
* Sets the value of the 'sequenceID' field.
*
* @param value is the value to set.
*/
public void setSequenceID(java.lang.Long value) {
this.sequenceID = value;
setDirty(3);
}
/**
* Checks the dirty status of the 'sequenceID' field. A field is dirty if it
* represents a change that has not yet been written to the database.
*
* @return true if sequenceID has not been commit to database
*/
public boolean isSequenceIDDirty() {
return isDirty(3);
}
/**
* Gets the value of the 'name' field.
*
* @return name
*/
public java.lang.CharSequence getName() {
return name;
}
/**
* Sets the value of the 'name' field.
*
* @param value is the value to set.
*/
public void setName(java.lang.CharSequence value) {
this.name = value;
setDirty(4);
}
/**
* Checks the dirty status of the 'name' field. A field is dirty if it
* represents a change that has not yet been written to the database.
*
* @return true if name has not been committed to database
*/
public boolean isNameDirty() {
return isDirty(4);
}
/**
* Gets the value of the 'data' field.
*
* @return data field
*/
public java.nio.ByteBuffer getData() {
return data;
}
/**
* Sets the value of the 'data' field.
*
* @param value is a string
* the value to set.
*/
public void setData(java.nio.ByteBuffer value) {
this.data = value;
setDirty(5);
}
/**
* Checks the dirty status of the 'data' field. A field is dirty if it
* represents a change that has not yet been written to the database.
*
* @return true if data field has not been committed to database
*/
public boolean isDataDirty() {
return isDirty(5);
}
/**
* Creates a new ChukwaChunk RecordBuilder
*
* @return RecordBuilder
*/
public static org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder newBuilder() {
return new org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder();
}
/**
* Creates a new ChukwaChunk RecordBuilder by copying an existing Builder
*
* @param other is Chukwa chunk
* @return RecordBuilder
*/
public static org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder newBuilder(
org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder other) {
return new org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder(
other);
}
/**
* Creates a new ChukwaChunk RecordBuilder by copying an existing ChukwaChunk
* instance
*
* @param other is Chukwa chunk
* @return RecordBuilder
*/
public static org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder newBuilder(
org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk other) {
return new org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder(
other);
}
private static java.nio.ByteBuffer deepCopyToReadOnlyBuffer(
java.nio.ByteBuffer input) {
java.nio.ByteBuffer copy = java.nio.ByteBuffer.allocate(input.capacity());
int position = input.position();
input.reset();
int mark = input.position();
int limit = input.limit();
input.rewind();
input.limit(input.capacity());
copy.put(input);
input.rewind();
copy.rewind();
input.position(mark);
input.mark();
copy.position(mark);
copy.mark();
input.position(position);
copy.position(position);
input.limit(limit);
copy.limit(limit);
return copy.asReadOnlyBuffer();
}
/**
* RecordBuilder for ChukwaChunk instances.
*/
public static class Builder
extends org.apache.avro.specific.SpecificRecordBuilderBase<ChukwaChunk>
implements org.apache.avro.data.RecordBuilder<ChukwaChunk> {
private java.lang.CharSequence source;
private java.lang.CharSequence tags;
private java.lang.CharSequence datatype;
private java.lang.Long sequenceID;
private java.lang.CharSequence name;
private java.nio.ByteBuffer data;
/** Creates a new Builder */
private Builder() {
super(
org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.SCHEMA$);
}
/** Creates a Builder by copying an existing Builder */
private Builder(
org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder other) {
super(other);
}
/** Creates a Builder by copying an existing ChukwaChunk instance */
private Builder(
org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk other) {
super(
org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.SCHEMA$);
if (isValidValue(fields()[0], other.source)) {
this.source = (java.lang.CharSequence) data()
.deepCopy(fields()[0].schema(), other.source);
fieldSetFlags()[0] = true;
}
if (isValidValue(fields()[1], other.tags)) {
this.tags = (java.lang.CharSequence) data()
.deepCopy(fields()[1].schema(), other.tags);
fieldSetFlags()[1] = true;
}
if (isValidValue(fields()[2], other.datatype)) {
this.datatype = (java.lang.CharSequence) data()
.deepCopy(fields()[2].schema(), other.datatype);
fieldSetFlags()[2] = true;
}
if (isValidValue(fields()[3], other.sequenceID)) {
this.sequenceID = (java.lang.Long) data().deepCopy(fields()[3].schema(),
other.sequenceID);
fieldSetFlags()[3] = true;
}
if (isValidValue(fields()[4], other.name)) {
this.name = (java.lang.CharSequence) data()
.deepCopy(fields()[4].schema(), other.name);
fieldSetFlags()[4] = true;
}
if (isValidValue(fields()[5], other.data)) {
this.data = (java.nio.ByteBuffer) data().deepCopy(fields()[5].schema(),
other.data);
fieldSetFlags()[5] = true;
}
}
/**
* Gets the value of the 'source' field
*
* @return source field
*/
public java.lang.CharSequence getSource() {
return source;
}
/**
* Sets the value of the 'source' field
*
* @param value is a string
* @return RecordBuilder
*/
public org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder setSource(
java.lang.CharSequence value) {
validate(fields()[0], value);
this.source = value;
fieldSetFlags()[0] = true;
return this;
}
/**
* Checks whether the 'source' field has been set
*
* @return true if source field has been set
*/
public boolean hasSource() {
return fieldSetFlags()[0];
}
/**
* Clears the value of the 'source' field
*
* @return RecordBuilder
*/
public org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder clearSource() {
source = null;
fieldSetFlags()[0] = false;
return this;
}
/**
* Gets the value of the 'tags' field
*
* @return tags field
*/
public java.lang.CharSequence getTags() {
return tags;
}
/**
* Sets the value of the 'tags' field
*
* @param value is a string
* @return RecordBuilder
*/
public org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder setTags(
java.lang.CharSequence value) {
validate(fields()[1], value);
this.tags = value;
fieldSetFlags()[1] = true;
return this;
}
/**
* Checks whether the 'tags' field has been set
*
* @return true if tags has been set
*/
public boolean hasTags() {
return fieldSetFlags()[1];
}
/**
* Clears the value of the 'tags' field
*
* @return RecordBuilder
*/
public org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder clearTags() {
tags = null;
fieldSetFlags()[1] = false;
return this;
}
/**
* Gets the value of the 'datatype' field
*
* @return datatype field
*/
public java.lang.CharSequence getDatatype() {
return datatype;
}
/**
* Sets the value of the 'datatype' field
*
* @param value is a string
* @return RecordBuilder
*/
public org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder setDatatype(
java.lang.CharSequence value) {
validate(fields()[2], value);
this.datatype = value;
fieldSetFlags()[2] = true;
return this;
}
/**
* Checks whether the 'datatype' field has been set
*
* @return true if datatype field has been set
*/
public boolean hasDatatype() {
return fieldSetFlags()[2];
}
/**
* Clears the value of the 'datatype' field
*
* @return RecordBuilder
*/
public org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder clearDatatype() {
datatype = null;
fieldSetFlags()[2] = false;
return this;
}
/**
* Gets the value of the 'sequenceID' field
*
* @return sequenceID
*/
public java.lang.Long getSequenceID() {
return sequenceID;
}
/**
* Sets the value of the 'sequenceID' field
*
* @param value is a string
* @return RecordBuilder
*/
public org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder setSequenceID(
java.lang.Long value) {
validate(fields()[3], value);
this.sequenceID = value;
fieldSetFlags()[3] = true;
return this;
}
/**
* Checks whether the 'sequenceID' field has been set
*
* @return true if sequenceID has been set
*/
public boolean hasSequenceID() {
return fieldSetFlags()[3];
}
/**
* Clears the value of the 'sequenceID' field
*
* @return RecordBuilder
*/
public org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder clearSequenceID() {
sequenceID = null;
fieldSetFlags()[3] = false;
return this;
}
/**
* Gets the value of the 'name' field
*
* @return name
*/
public java.lang.CharSequence getName() {
return name;
}
/**
* Sets the value of the 'name' field
*
* @param value is a string
* @return RecordBuilder
*/
public org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder setName(
java.lang.CharSequence value) {
validate(fields()[4], value);
this.name = value;
fieldSetFlags()[4] = true;
return this;
}
/**
* Checks whether the 'name' field has been set
*
* @return true if name field has been set
*/
public boolean hasName() {
return fieldSetFlags()[4];
}
/**
* Clears the value of the 'name' field
*
* @return RecordBuilder
*/
public org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder clearName() {
name = null;
fieldSetFlags()[4] = false;
return this;
}
/**
* Gets the value of the 'data' field
*
* @return data
*/
public java.nio.ByteBuffer getData() {
return data;
}
/**
* Sets the value of the 'data' field
*
* @param value is a string
* @return RecordBudiler
*/
public org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder setData(
java.nio.ByteBuffer value) {
validate(fields()[5], value);
this.data = value;
fieldSetFlags()[5] = true;
return this;
}
/**
* Checks whether the 'data' field has been set
*
* @return true if data field has been set
*/
public boolean hasData() {
return fieldSetFlags()[5];
}
/**
* Clears the value of the 'data' field
*
* @return RecordBuilder
*/
public org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk.Builder clearData() {
data = null;
fieldSetFlags()[5] = false;
return this;
}
@Override
public ChukwaChunk build() {
try {
ChukwaChunk record = new ChukwaChunk();
record.source = fieldSetFlags()[0] ? this.source
: (java.lang.CharSequence) defaultValue(fields()[0]);
record.tags = fieldSetFlags()[1] ? this.tags
: (java.lang.CharSequence) defaultValue(fields()[1]);
record.datatype = fieldSetFlags()[2] ? this.datatype
: (java.lang.CharSequence) defaultValue(fields()[2]);
record.sequenceID = fieldSetFlags()[3] ? this.sequenceID
: (java.lang.Long) defaultValue(fields()[3]);
record.name = fieldSetFlags()[4] ? this.name
: (java.lang.CharSequence) defaultValue(fields()[4]);
record.data = fieldSetFlags()[5] ? this.data
: (java.nio.ByteBuffer) defaultValue(fields()[5]);
return record;
} catch (Exception e) {
throw new org.apache.avro.AvroRuntimeException(e);
}
}
}
public ChukwaChunk.Tombstone getTombstone() {
return TOMBSTONE;
}
public ChukwaChunk newInstance() {
return newBuilder().build();
}
private static final Tombstone TOMBSTONE = new Tombstone();
public static final class Tombstone extends ChukwaChunk
implements org.apache.gora.persistency.Tombstone {
private Tombstone() {
}
/**
* Gets the value of the 'source' field.
*/
public java.lang.CharSequence getSource() {
throw new java.lang.UnsupportedOperationException(
"Get is not supported on tombstones");
}
/**
* Sets the value of the 'source' field.
*
* @param value
* the value to set.
*/
public void setSource(java.lang.CharSequence value) {
throw new java.lang.UnsupportedOperationException(
"Set is not supported on tombstones");
}
/**
* Checks the dirty status of the 'source' field. A field is dirty if it
* represents a change that has not yet been written to the database.
*/
public boolean isSourceDirty() {
throw new java.lang.UnsupportedOperationException(
"IsDirty is not supported on tombstones");
}
/**
* Gets the value of the 'tags' field.
*/
public java.lang.CharSequence getTags() {
throw new java.lang.UnsupportedOperationException(
"Get is not supported on tombstones");
}
/**
* Sets the value of the 'tags' field.
*
* @param value
* the value to set.
*/
public void setTags(java.lang.CharSequence value) {
throw new java.lang.UnsupportedOperationException(
"Set is not supported on tombstones");
}
/**
* Checks the dirty status of the 'tags' field. A field is dirty if it
* represents a change that has not yet been written to the database.
*/
public boolean isTagsDirty() {
throw new java.lang.UnsupportedOperationException(
"IsDirty is not supported on tombstones");
}
/**
* Gets the value of the 'datatype' field.
*/
public java.lang.CharSequence getDatatype() {
throw new java.lang.UnsupportedOperationException(
"Get is not supported on tombstones");
}
/**
* Sets the value of the 'datatype' field.
*
* @param value
* the value to set.
*/
public void setDatatype(java.lang.CharSequence value) {
throw new java.lang.UnsupportedOperationException(
"Set is not supported on tombstones");
}
/**
* Checks the dirty status of the 'datatype' field. A field is dirty if it
* represents a change that has not yet been written to the database.
*/
public boolean isDatatypeDirty() {
throw new java.lang.UnsupportedOperationException(
"IsDirty is not supported on tombstones");
}
/**
* Gets the value of the 'sequenceID' field.
*/
public java.lang.Long getSequenceID() {
throw new java.lang.UnsupportedOperationException(
"Get is not supported on tombstones");
}
/**
* Sets the value of the 'sequenceID' field.
*
* @param value
* the value to set.
*/
public void setSequenceID(java.lang.Long value) {
throw new java.lang.UnsupportedOperationException(
"Set is not supported on tombstones");
}
/**
* Checks the dirty status of the 'sequenceID' field. A field is dirty if it
* represents a change that has not yet been written to the database.
*/
public boolean isSequenceIDDirty() {
throw new java.lang.UnsupportedOperationException(
"IsDirty is not supported on tombstones");
}
/**
* Gets the value of the 'name' field.
*/
public java.lang.CharSequence getName() {
throw new java.lang.UnsupportedOperationException(
"Get is not supported on tombstones");
}
/**
* Sets the value of the 'name' field.
*
* @param value
* the value to set.
*/
public void setName(java.lang.CharSequence value) {
throw new java.lang.UnsupportedOperationException(
"Set is not supported on tombstones");
}
/**
* Checks the dirty status of the 'name' field. A field is dirty if it
* represents a change that has not yet been written to the database.
*/
public boolean isNameDirty() {
throw new java.lang.UnsupportedOperationException(
"IsDirty is not supported on tombstones");
}
/**
* Gets the value of the 'data' field.
*/
public java.nio.ByteBuffer getData() {
throw new java.lang.UnsupportedOperationException(
"Get is not supported on tombstones");
}
/**
* Sets the value of the 'data' field.
*
* @param value
* the value to set.
*/
public void setData(java.nio.ByteBuffer value) {
throw new java.lang.UnsupportedOperationException(
"Set is not supported on tombstones");
}
/**
* Checks the dirty status of the 'data' field. A field is dirty if it
* represents a change that has not yet been written to the database.
*/
public boolean isDataDirty() {
throw new java.lang.UnsupportedOperationException(
"IsDirty is not supported on tombstones");
}
}
}
| 8,252 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/gora/GoraWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer.gora;
import java.nio.ByteBuffer;
import java.util.List;
import org.apache.gora.store.DataStore;
import org.apache.gora.store.DataStoreFactory;
import org.apache.gora.util.GoraException;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.apache.hadoop.chukwa.datacollection.writer.ChukwaWriter;
import org.apache.hadoop.chukwa.datacollection.writer.PipelineableWriter;
import org.apache.hadoop.chukwa.datacollection.writer.WriterException;
import org.apache.hadoop.chukwa.datacollection.writer.solr.SolrWriter;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
/**
* This class leverages <a href="http://gora.apache.org">Apache Gora</a>
* as a pipeline writer implementation for mapping Chukwa data chunks and
* metadata as {@link org.apache.hadoop.chukwa.datacollection.writer.gora.ChukwaChunk}'s.
*
*/
public class GoraWriter extends PipelineableWriter {
private static Logger log = Logger.getLogger(SolrWriter.class);
DataStore<String, ChukwaChunk> chunkStore;
/**
* Default constructor for this class.
* @throws WriterException if error writing
*/
public GoraWriter() throws WriterException {
log.debug("Initializing configuration for GoraWriter pipeline...");
init(ChukwaAgent.getStaticConfiguration());
}
/**
* {@link org.apache.gora.store.DataStore} objects are created from a factory. It is necessary to
* provide the key and value class. The datastore class parameters is optional,
* and if not specified it will be read from the <code>gora.properties</code> file.
* @throws WriterException if error occurs
* @see org.apache.hadoop.chukwa.datacollection.writer.ChukwaWriter#init(org.apache.hadoop.conf.Configuration)
*/
@Override
public void init(Configuration c) throws WriterException {
try {
chunkStore = DataStoreFactory.getDataStore(String.class, ChukwaChunk.class, c);
} catch (GoraException e) {
log.error(ExceptionUtil.getStackTrace(e));
e.printStackTrace();
}
}
/**
* <p>
* If the {@link org.apache.gora.store.DataStore} instance is not null, we
* execute a {@link org.apache.gora.store.DataStore#flush()}. This forces
* the write caches to be flushed. DataStore implementations may optimize
* their writing by deferring the actual put / delete operations until
* this moment.
* </p>
* <p>Otherwise, we utilize {@link org.apache.gora.store.DataStore#close()}
* which closes the DataStore. This should release any resources held by
* the implementation, so that the instance is ready for GC. All other
* DataStore methods cannot be used after this method was called.
* Subsequent calls of this method are ignored.
* </p>
* @see org.apache.hadoop.chukwa.datacollection.writer.ChukwaWriter#close()
*/
@Override
public void close() throws WriterException {
if (chunkStore != null) {
chunkStore.flush();
} else {
chunkStore.close();
}
log.debug("Gora datastore successfully closed.");
}
@Override
public CommitStatus add(List<Chunk> chunks) throws WriterException {
CommitStatus cStatus = ChukwaWriter.COMMIT_OK;
for(Chunk chunk : chunks) {
try {
ChukwaChunk chukwaChunk = ChukwaChunk.newBuilder().build();
chukwaChunk.setSource(chunk.getSource());
chukwaChunk.setDatatype(chunk.getDataType());
chukwaChunk.setSequenceID(chunk.getSeqID());
chukwaChunk.setName(chunk.getStreamName());
chukwaChunk.setTags(chunk.getTags());
chukwaChunk.setData(ByteBuffer.wrap(chunk.getData()));
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
throw new WriterException("Failed to store data to Solr Cloud.");
}
}
if (next != null) {
cStatus = next.add(chunks); //pass data through
}
return cStatus;
}
}
| 8,253 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/gora/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Package dedicated to using <a href="http://gora.apache.org">Apache Gora</a>
* as a pipeline writer implementation.
*/
package org.apache.hadoop.chukwa.datacollection.writer.gora; | 8,254 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/hbase/HBaseWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer.hbase;
import java.io.IOException;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.datacollection.writer.ChukwaWriter;
import org.apache.hadoop.chukwa.datacollection.writer.PipelineableWriter;
import org.apache.hadoop.chukwa.datacollection.writer.WriterException;
import org.apache.hadoop.chukwa.extraction.hbase.AbstractProcessor;
import org.apache.hadoop.chukwa.extraction.hbase.ProcessorFactory;
import org.apache.hadoop.chukwa.extraction.hbase.UnknownRecordTypeException;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.log4j.Logger;
public class HBaseWriter extends PipelineableWriter {
static Logger log = Logger.getLogger(HBaseWriter.class);
private static final String CHUKWA_TABLE = "chukwa";
private static final String CHUKWA_META_TABLE = "chukwa_meta";
boolean reportStats;
volatile long dataSize = 0;
final Timer statTimer;
private ArrayList<Put> output;
private Reporter reporter;
private ChukwaConfiguration conf;
private Configuration hconf;
String defaultProcessor;
private static Connection connection;
private class StatReportingTask extends TimerTask {
private long lastTs = System.currentTimeMillis();
private long lastDataSize = 0;
public void run() {
long time = System.currentTimeMillis();
long interval = time - lastTs;
lastTs = time;
long ds = dataSize;
long dataRate = 1000 * (ds - lastDataSize) / interval; // bytes/sec
// refers only to data field, not including http or chukwa headers
lastDataSize = ds;
log.info("stat=HBaseWriter|dataRate="
+ dataRate);
}
};
public HBaseWriter() throws IOException {
this(true);
}
public HBaseWriter(boolean reportStats) throws IOException {
/* HBase Version >= 0.89.x */
this(reportStats, new ChukwaConfiguration(), HBaseConfiguration.create());
}
public HBaseWriter(ChukwaConfiguration conf, Configuration hconf) throws IOException {
this(true, conf, hconf);
}
private HBaseWriter(boolean reportStats, ChukwaConfiguration conf, Configuration hconf) throws IOException {
this.reportStats = reportStats;
this.conf = conf;
this.hconf = hconf;
this.statTimer = new Timer();
this.defaultProcessor = conf.get(
"chukwa.demux.mapper.default.processor",
"org.apache.hadoop.chukwa.extraction.hbase.DefaultProcessor");
log.info("hbase.zookeeper.quorum: " + hconf.get(HConstants.ZOOKEEPER_QUORUM) + ":" + hconf.get(HConstants.ZOOKEEPER_CLIENT_PORT));
if (reportStats) {
statTimer.schedule(new StatReportingTask(), 1000, 10 * 1000);
}
output = new ArrayList<Put>();
try {
reporter = new Reporter();
} catch (NoSuchAlgorithmException e) {
throw new IOException("Can not register hashing algorithm.");
}
if (connection == null || connection.isClosed()) {
connection = ConnectionFactory.createConnection(hconf);
}
}
public void close() {
if (reportStats) {
statTimer.cancel();
}
}
public void init(Configuration conf) throws WriterException {
if (connection == null || connection.isClosed()) {
try {
connection = ConnectionFactory.createConnection(hconf);
} catch (IOException e) {
throw new WriterException("HBase is offline, retry later...");
}
}
}
@Override
public CommitStatus add(List<Chunk> chunks) throws WriterException {
CommitStatus rv = ChukwaWriter.COMMIT_OK;
Table hbase;
Table meta;
try {
if (connection == null || connection.isClosed()) {
try {
connection = ConnectionFactory.createConnection(hconf);
} catch (IOException e) {
throw new WriterException("HBase is offline, retry later...");
}
}
hbase = connection.getTable(TableName.valueOf(CHUKWA_TABLE));
meta = connection.getTable(TableName.valueOf(CHUKWA_META_TABLE));
for(Chunk chunk : chunks) {
synchronized (this) {
try {
AbstractProcessor processor = getProcessor(chunk.getDataType());
processor.process(chunk, output, reporter);
hbase.put(output);
meta.put(reporter.getInfo());
} catch (Throwable e) {
log.warn("Unable to process data:");
log.warn(new String(chunk.getData()));
log.warn(ExceptionUtil.getStackTrace(e));
}
dataSize += chunk.getData().length;
output.clear();
reporter.clear();
}
}
hbase.close();
meta.close();
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
if(connection != null) {
try {
connection.close();
} catch(IOException e2) {
connection = null;
throw new WriterException("HBase connection maybe leaking.");
}
}
}
if (next != null) {
rv = next.add(chunks); //pass data through
}
return rv;
}
private AbstractProcessor getProcessor(String dataType) throws UnknownRecordTypeException {
String processorClass = findProcessor(conf.get(dataType, defaultProcessor), defaultProcessor);
return ProcessorFactory.getProcessor(processorClass);
}
/**
* Look for mapper parser class in the demux configuration.
* Demux configuration has been changed since CHUKWA-581 to
* support mapping of both mapper and reducer, and this utility
* class is to detect the mapper class and return the mapper
* class only.
*
*/
private String findProcessor(String processors, String defaultProcessor) {
if(processors.startsWith(",")) {
// No mapper class defined.
return defaultProcessor;
} else if(processors.contains(",")) {
// Both mapper and reducer defined.
String[] parsers = processors.split(",");
return parsers[0];
}
// No reducer defined.
return processors;
}
}
| 8,255 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/hbase/Reporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer.hbase;
import java.lang.reflect.Type;
import java.nio.charset.Charset;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hbase.client.Put;
import org.mortbay.log.Log;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
public class Reporter {
private ArrayList<Put> meta = new ArrayList<Put>();
private MessageDigest md5 = null;
private final static Charset UTF8 = Charset.forName("UTF-8");
public Reporter() throws NoSuchAlgorithmException {
md5 = MessageDigest.getInstance("md5");
}
public void putSource(String type, String source) {
byte[] value = getHash(source);
String buffer;
try {
Type metaType = new TypeToken<Map<String, String>>(){}.getType();
Map<String, String> meta = new HashMap<String, String>();
meta.put("sig", new String(value, UTF8));
meta.put("type", "source");
Gson gson = new Gson();
buffer = gson.toJson(meta, metaType);
put(type.getBytes(UTF8), source.getBytes(UTF8), buffer.toString().getBytes(UTF8));
} catch (Exception e) {
Log.warn("Error encoding metadata.");
Log.warn(e);
}
}
public void putMetric(String type, String metric) {
String buf = new StringBuilder(type).append(".").append(metric).toString();
byte[] pk = getHash(buf);
String buffer;
try {
Type metaType = new TypeToken<Map<String, String>>(){}.getType();
Map<String, String> meta = new HashMap<String, String>();
meta.put("sig", new String(pk, "UTF-8"));
meta.put("type", "metric");
Gson gson = new Gson();
buffer = gson.toJson(meta, metaType);
put(type.getBytes(UTF8), metric.getBytes(UTF8), buffer.toString().getBytes(UTF8));
} catch (Exception e) {
Log.warn("Error encoding metadata.");
Log.warn(e);
}
}
public void put(String key, String source, String info) {
put(key.getBytes(UTF8), source.getBytes(UTF8), info.getBytes(UTF8));
}
public void put(byte[] key, byte[] source, byte[] info) {
Put put = new Put(key);
put.addColumn("k".getBytes(UTF8), source, info);
meta.add(put);
}
public void clear() {
meta.clear();
}
public List<Put> getInfo() {
return meta;
}
private byte[] getHash(String key) {
byte[] hash = new byte[5];
System.arraycopy(md5.digest(key.getBytes(UTF8)), 0, hash, 0, 5);
return hash;
}
public void putClusterName(String type, String clusterName) {
byte[] value = getHash(clusterName);
String buffer;
Type metaType = new TypeToken<Map<String, String>>(){}.getType();
Map<String, String> meta = new HashMap<String, String>();
meta.put("sig", new String(value, UTF8));
meta.put("type", "cluster");
Gson gson = new Gson();
buffer = gson.toJson(meta, metaType);
put(type.getBytes(UTF8), clusterName.getBytes(UTF8), buffer.toString().getBytes(UTF8));
}
}
| 8,256 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/hbase/Annotation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer.hbase;
import java.lang.annotation.*;
public class Annotation {
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE})
public @interface Tables {
Table[] annotations();
}
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE})
public @interface Table {
String name();
String columnFamily();
}
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.METHOD,ElementType.FIELD})
public @interface ColumnFamily {
}
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.LOCAL_VARIABLE})
public @interface RowKey {
}
}
| 8,257 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/parquet/ChukwaParquetWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer.parquet;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.util.Calendar;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.apache.hadoop.chukwa.datacollection.writer.ChukwaWriter;
import org.apache.hadoop.chukwa.datacollection.writer.PipelineableWriter;
import org.apache.hadoop.chukwa.datacollection.writer.WriterException;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.Logger;
import org.apache.parquet.avro.AvroParquetWriter;
import org.apache.parquet.hadoop.metadata.CompressionCodecName;
public class ChukwaParquetWriter extends PipelineableWriter {
private static Logger LOG = Logger.getLogger(ChukwaParquetWriter.class);
public static final String OUTPUT_DIR_OPT= "chukwaCollector.outputDir";
private int blockSize = 128 * 1024 * 1024;
private int pageSize = 1 * 1024 * 1024;
private Schema avroSchema = null;
private AvroParquetWriter<GenericRecord> parquetWriter = null;
protected String outputDir = null;
private Calendar calendar = Calendar.getInstance();
private String localHostAddr = null;
private long rotateInterval = 300000L;
private long startTime = 0;
private Path previousPath = null;
private String previousFileName = null;
private FileSystem fs = null;
public ChukwaParquetWriter() throws WriterException {
this(ChukwaAgent.getStaticConfiguration());
}
public ChukwaParquetWriter(Configuration c) throws WriterException {
setup(c);
}
@Override
public void init(Configuration c) throws WriterException {
}
private void setup(Configuration c) throws WriterException {
try {
localHostAddr = "_" + InetAddress.getLocalHost().getHostName() + "_";
} catch (UnknownHostException e) {
localHostAddr = "-NA-";
}
outputDir = c.get(OUTPUT_DIR_OPT, "/chukwa/logs");
blockSize = c.getInt("dfs.blocksize", 128 * 1024 * 1024);
rotateInterval = c.getLong("chukwaCollector.rotateInterval", 300000L);
if(fs == null) {
try {
fs = FileSystem.get(c);
} catch (IOException e) {
throw new WriterException(e);
}
}
// load Chukwa Avro schema
avroSchema = ChukwaAvroSchema.getSchema();
// generate the corresponding Parquet schema
rotate();
}
@Override
public void close() throws WriterException {
try {
parquetWriter.close();
fs.rename(previousPath, new Path(previousFileName + ".done"));
} catch (IOException e) {
throw new WriterException(e);
}
}
@Override
public CommitStatus add(List<Chunk> chunks) throws WriterException {
long elapsedTime = 0;
CommitStatus rv = ChukwaWriter.COMMIT_OK;
for(Chunk chunk : chunks) {
try {
GenericRecord record = new GenericData.Record(avroSchema);
record.put("dataType", chunk.getDataType());
record.put("data", ByteBuffer.wrap(chunk.getData()));
record.put("tags", chunk.getTags());
record.put("seqId", chunk.getSeqID());
record.put("source", chunk.getSource());
record.put("stream", chunk.getStreamName());
parquetWriter.write(record);
elapsedTime = System.currentTimeMillis() - startTime;
if(elapsedTime > rotateInterval) {
rotate();
}
} catch (IOException e) {
LOG.warn("Failed to store data to HDFS.");
LOG.warn(ExceptionUtil.getStackTrace(e));
}
}
if (next != null) {
rv = next.add(chunks); //pass data through
}
return rv;
}
private void rotate() throws WriterException {
if(parquetWriter!=null) {
try {
parquetWriter.close();
String newFileName = previousFileName.substring(0, previousFileName.length() - 7);
fs.rename(previousPath, new Path(newFileName + ".done"));
} catch (IOException e) {
LOG.warn("Fail to close Chukwa write ahead log.");
}
}
startTime = System.currentTimeMillis();
calendar.setTimeInMillis(startTime);
String newName = new java.text.SimpleDateFormat("yyyyMMddHHmmssSSS")
.format(calendar.getTime());
newName += localHostAddr + new java.rmi.server.UID().toString();
newName = newName.replace("-", "");
newName = newName.replace(":", "");
newName = newName.replace(".", "");
newName = outputDir + "/" + newName.trim() + ".chukwa";
LOG.info("writing: "+newName);
Path path = new Path(newName);
try {
parquetWriter = new AvroParquetWriter<GenericRecord>(path, avroSchema, CompressionCodecName.SNAPPY, blockSize, pageSize);
previousPath = path;
previousFileName = newName;
} catch (IOException e) {
throw new WriterException(e);
}
}
/**
* Calculates delay for scheduling the next rotation in case of
* FixedTimeRotatorScheme. This delay is the time difference between the
* currentTimestamp (t1) and the next time the collector should rotate the
* sequence files (t2). t2 is the time when the current rotateInterval ends
* plus an offset (as set by chukwaCollector.FixedTimeIntervalOffset).
* So, delay = t2 - t1
*
* @param currentTime - the current timestamp
* @param rotateInterval - chukwaCollector.rotateInterval
* @param offsetInterval - chukwaCollector.fixedTimeIntervalOffset
* @return delay for scheduling next rotation
*/
public long getDelayForFixedInterval(long currentTime, long rotateInterval, long offsetInterval){
// time since last rounded interval
long remainder = (currentTime % rotateInterval);
long prevRoundedInterval = currentTime - remainder;
long nextRoundedInterval = prevRoundedInterval + rotateInterval;
long delay = nextRoundedInterval - currentTime + offsetInterval;
if (LOG.isInfoEnabled()) {
LOG.info("currentTime="+currentTime+" prevRoundedInterval="+
prevRoundedInterval+" nextRoundedInterval" +
"="+nextRoundedInterval+" delay="+delay);
}
return delay;
}
}
| 8,258 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/writer/parquet/ChukwaAvroSchema.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer.parquet;
import org.apache.avro.Schema;
public class ChukwaAvroSchema {
public static Schema getSchema() {
String input = "{\"namespace\": \"chukwa.apache.org\"," +
"\"type\": \"record\"," +
"\"name\": \"Chunk\"," +
"\"fields\": [" +
"{\"name\": \"dataType\", \"type\": \"string\"}," +
"{\"name\": \"data\", \"type\": \"bytes\"}," +
"{\"name\": \"source\", \"type\": \"string\"}," +
"{\"name\": \"stream\", \"type\": \"string\"}," +
"{\"name\": \"tags\", \"type\": \"string\"}," +
"{\"name\": \"seqId\", \"type\": [\"long\", \"null\"]}" +
"]"+
"}";
// load your Avro schema
Schema avroSchema = new Schema.Parser().parse(input);
return avroSchema;
}
}
| 8,259 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/connector/ChunkCatcherConnector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.connector;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.*;
import java.util.*;
public class ChunkCatcherConnector implements Connector {
ChunkQueue eq;
Timer tm;
static class Interruptor extends TimerTask {
Thread targ;
volatile boolean deactivate = false;
Interruptor(Thread t) {
targ =t;
}
public synchronized void run() {
if(!deactivate)
targ.interrupt();
}
};
public void start() {
eq = DataFactory.getInstance().getEventQueue();
tm = new Timer();
}
public Chunk waitForAChunk(long ms) {
ArrayList<Chunk> chunks = new ArrayList<Chunk>();
Interruptor i = new Interruptor(Thread.currentThread());
if(ms > 0)
tm.schedule(i, ms);
try {
eq.collect(chunks, 1);
synchronized(i) {
i.deactivate = true;
}
} catch(InterruptedException e) {
Thread.interrupted();
return null;
}
return chunks.get(0);
}
public Chunk waitForAChunk() throws InterruptedException {
return this.waitForAChunk(0);//wait forever by default
}
public void shutdown() {
tm.cancel();
}
@Override
public void reloadConfiguration() {
System.out.println("reloadConfiguration");
}
public void clear() throws InterruptedException {
ArrayList<Chunk> list = new ArrayList<Chunk>();
while(eq.size() > 0)
eq.collect(list, 1);
}
}
| 8,260 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/connector/Connector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.connector;
/**
* This class is responsible for setting up a long living process that
* repeatedly calls the <code>send</code> function of a Sender.
*/
public interface Connector {
static final int proxyTimestampField = 0;
/**
*
*/
static final int proxyURIField = 1;
static final int proxyRetryField = 2;
static final int adaptorTimestampField = 3;
static final int adaptorURIField = 4;
static final int logTimestampField = 5;
static final int logSourceField = 6;
static final int logApplicationField = 7;
static final int logEventField = 8;
public void start();
public void shutdown();
public void reloadConfiguration();
}
| 8,261 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/connector/PipelineConnector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.connector;
/**
* This class is responsible for setting up connections with configured
* storage writers base on configuration of chukwa_agent.xml.
*
* On error, tries the list of available storage writers, pauses for a minute,
* and then repeats.
*
*/
import java.util.ArrayList;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.ChunkQueue;
import org.apache.hadoop.chukwa.datacollection.DataFactory;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.apache.hadoop.chukwa.datacollection.connector.Connector;
import org.apache.hadoop.chukwa.datacollection.writer.ChukwaWriter;
import org.apache.hadoop.chukwa.datacollection.writer.ChukwaWriter.CommitStatus;
import org.apache.hadoop.chukwa.datacollection.writer.PipelineStageWriter;
import org.apache.hadoop.chukwa.datacollection.writer.WriterException;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
public class PipelineConnector implements Connector, Runnable {
static Logger log = Logger.getLogger(PipelineConnector.class);
Timer statTimer = null;
volatile int chunkCount = 0;
int MAX_SIZE_PER_POST = 2 * 1024 * 1024;
int MIN_POST_INTERVAL = 5 * 1000;
public static final String MIN_POST_INTERVAL_OPT = "pipelineConnector.minPostInterval";
public static final String MAX_SIZE_PER_POST_OPT = "pipelineConnector.maxPostSize";
public static final String ASYNC_ACKS_OPT = "pipelineConnector.asyncAcks";
ChunkQueue chunkQueue;
private ChukwaAgent agent = null;
private volatile boolean stopMe = false;
protected ChukwaWriter writers = null;
public PipelineConnector() {
//instance initializer block
statTimer = new Timer();
statTimer.schedule(new TimerTask() {
public void run() {
int count = chunkCount;
chunkCount = 0;
log.info("# Data chunks sent since last report: " + count);
}
}, 100, 60 * 1000);
}
public void start() {
chunkQueue = DataFactory.getInstance().getEventQueue();
agent = ChukwaAgent.getAgent();
Configuration conf = agent.getConfiguration();
MAX_SIZE_PER_POST = conf.getInt(MAX_SIZE_PER_POST_OPT, MAX_SIZE_PER_POST);
MIN_POST_INTERVAL = conf.getInt(MIN_POST_INTERVAL_OPT, MIN_POST_INTERVAL);
try {
writers = new PipelineStageWriter(conf);
(new Thread(this, "Pipeline connector thread")).start();
} catch(Exception e) {
log.error("Pipeline initialization error: ", e);
}
}
public void shutdown() {
stopMe = true;
try {
writers.close();
} catch (WriterException e) {
log.warn("Shutdown error: ",e);
}
}
public void run() {
log.info("PipelineConnector started at time:" + System.currentTimeMillis());
try {
long lastPost = System.currentTimeMillis();
while (!stopMe) {
List<Chunk> newQueue = new ArrayList<Chunk>();
try {
// get all ready chunks from the chunkQueue to be sent
chunkQueue.collect(newQueue, MAX_SIZE_PER_POST);
CommitStatus result = writers.add(newQueue);
if(result.equals(ChukwaWriter.COMMIT_OK)) {
chunkCount = newQueue.size();
for (Chunk c : newQueue) {
agent.reportCommit(c.getInitiator(), c.getSeqID());
}
}
} catch (WriterException e) {
log.warn("PipelineStageWriter Exception: ", e);
} catch (InterruptedException e) {
log.warn("thread interrupted during addChunks(ChunkQueue)");
Thread.currentThread().interrupt();
break;
}
long now = System.currentTimeMillis();
long delta = MIN_POST_INTERVAL - now + lastPost;
if(delta > 0) {
Thread.sleep(delta); // wait for stuff to accumulate
}
lastPost = now;
} // end of try forever loop
log.info("received stop() command so exiting run() loop to shutdown connector");
} catch (OutOfMemoryError e) {
log.warn("Bailing out", e);
throw new RuntimeException("Shutdown pipeline connector.");
} catch (InterruptedException e) {
// do nothing, let thread die.
log.warn("Bailing out", e);
throw new RuntimeException("Shutdown pipeline connector.");
} catch (Throwable e) {
log.error("connector failed; shutting down agent: ", e);
throw new RuntimeException("Shutdown pipeline connector.");
}
}
@Override
public void reloadConfiguration() {
}
}
| 8,262 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/connector | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/connector/http/HttpConnector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.connector.http;
/**
* This class is responsible for setting up a {@link HttpConnectorClient} with a collectors
* and then repeatedly calling its send function which encapsulates the work of setting up the
* connection with the appropriate collector and then collecting and sending the {@link Chunk}s
* from the global {@link ChunkQueue} which where added by {@link Adaptors}. We want to separate
* the long living (i.e. looping) behavior from the ConnectorClient because we also want to be able
* to use the HttpConnectorClient for its add and send API in arbitrary applications that want to send
* chunks without an {@link LocalAgent} daemon.
*
* * <p>
* On error, tries the list of available collectors, pauses for a minute, and then repeats.
* </p>
* <p> Will wait forever for collectors to come up. </p>
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.ChunkQueue;
import org.apache.hadoop.chukwa.datacollection.DataFactory;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.apache.hadoop.chukwa.datacollection.connector.Connector;
import org.apache.hadoop.chukwa.datacollection.sender.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
public class HttpConnector implements Connector, Runnable {
static Logger log = Logger.getLogger(HttpConnector.class);
Timer statTimer = null;
AtomicInteger chunkCount = new AtomicInteger();
int MAX_SIZE_PER_POST = 2 * 1024 * 1024;
int MIN_POST_INTERVAL = 5 * 1000;
public static final String MIN_POST_INTERVAL_OPT = "httpConnector.minPostInterval";
public static final String MAX_SIZE_PER_POST_OPT = "httpConnector.maxPostSize";
public static final String ASYNC_ACKS_OPT = "httpConnector.asyncAcks";
boolean ASYNC_ACKS = false;
ChunkQueue chunkQueue;
ChukwaAgent agent;
String argDestination = null;
private volatile boolean stopMe = false;
private Iterator<String> collectors = null;
protected ChukwaSender connectorClient = null;
{ //instance initializer block
statTimer = new Timer();
statTimer.schedule(new TimerTask() {
public void run() {
int count = chunkCount.get();
chunkCount.set(0);
log.info("# http chunks ACK'ed since last report: " + count);
}
}, 100, 60 * 1000);
}
public HttpConnector(ChukwaAgent agent) {
this.agent = agent;
}
public HttpConnector(ChukwaAgent agent, String destination) {
this.agent = agent;
this.argDestination = destination;
log.info("Setting HTTP Connector URL manually using arg passed to Agent: "
+ destination);
}
public void start() {
chunkQueue = DataFactory.getInstance().getEventQueue();
Configuration conf = agent.getConfiguration();
MAX_SIZE_PER_POST = conf.getInt(MAX_SIZE_PER_POST_OPT, MAX_SIZE_PER_POST);
MIN_POST_INTERVAL = conf.getInt(MIN_POST_INTERVAL_OPT, MIN_POST_INTERVAL);
ASYNC_ACKS = conf.getBoolean(ASYNC_ACKS_OPT, ASYNC_ACKS);
(new Thread(this, "HTTP post thread")).start();
}
public void shutdown() {
stopMe = true;
connectorClient.stop();
}
public void run() {
log.info("HttpConnector started at time:" + System.currentTimeMillis());
// build a list of our destinations from collectors
try {
if(collectors == null)
collectors = DataFactory.getInstance().getCollectorURLs(agent.getConfiguration());
} catch (IOException e) {
log.error("Failed to retrieve list of collectors from "
+ "conf/collectors file", e);
}
if(ASYNC_ACKS) {
try {
connectorClient = new AsyncAckSender(agent.getConfiguration(), agent);
} catch(IOException e) {
log.fatal("can't read AsycAck hostlist file, exiting");
agent.shutdown(true);
}
} else
connectorClient = new ChukwaHttpSender(agent.getConfiguration());
if (argDestination != null) {
ArrayList<String> tmp = new ArrayList<String>();
tmp.add(argDestination);
collectors = tmp.iterator();
log.info("using collector specified at agent runtime: " + argDestination);
} else
log.info("using collectors from collectors file");
if (collectors == null || !collectors.hasNext()) {
log.error("No collectors specified, exiting (and taking agent with us).");
agent.shutdown(true);// error is unrecoverable, so stop hard.
return;
}
connectorClient.setCollectors(collectors);
try {
long lastPost = System.currentTimeMillis();
while (!stopMe) {
List<Chunk> newQueue = new ArrayList<Chunk>();
try {
// get all ready chunks from the chunkQueue to be sent
chunkQueue.collect(newQueue, MAX_SIZE_PER_POST); // FIXME: should
// really do this by size
} catch (InterruptedException e) {
System.out.println("thread interrupted during addChunks(ChunkQueue)");
Thread.currentThread().interrupt();
break;
}
List<ChukwaHttpSender.CommitListEntry> results = connectorClient
.send(newQueue);
// checkpoint the chunks which were committed
for (ChukwaHttpSender.CommitListEntry cle : results) {
agent.reportCommit(cle.adaptor, cle.uuid);
chunkCount.set(chunkCount.get()+1);;
}
long now = System.currentTimeMillis();
long delta = MIN_POST_INTERVAL - now + lastPost;
if(delta > 0) {
Thread.sleep(delta); // wait for stuff to accumulate
}
lastPost = now;
} // end of try forever loop
log.info("received stop() command so exiting run() loop to shutdown connector");
} catch (OutOfMemoryError e) {
log.warn("Bailing out", e);
} catch (InterruptedException e) {
// do nothing, let thread die.
log.warn("Bailing out", e);
} catch (java.io.IOException e) {
log.error("connector failed; shutting down agent");
agent.shutdown(true);
}
}
@Override
public void reloadConfiguration() {
Iterator<String> destinations = null;
// build a list of our destinations from collectors
try {
destinations = DataFactory.getInstance().getCollectorURLs(agent.getConfiguration());
} catch (IOException e) {
log.error("Failed to retreive list of collectors from conf/collectors file", e);
}
if (destinations != null && destinations.hasNext()) {
collectors = destinations;
connectorClient.setCollectors(collectors);
log.info("Resetting collectors");
}
}
public ChukwaSender getSender() {
return connectorClient;
}
public void setCollectors(Iterator<String> list) {
collectors = list;
}
}
| 8,263 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/tools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/tools/backfilling/BackfillingLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.tools.backfilling;
import java.io.File;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.datacollection.ChunkQueue;
import org.apache.hadoop.chukwa.datacollection.DataFactory;
import org.apache.hadoop.chukwa.datacollection.adaptor.*;
import org.apache.hadoop.chukwa.datacollection.agent.AdaptorFactory;
import org.apache.hadoop.chukwa.datacollection.agent.AdaptorManager;
import org.apache.hadoop.chukwa.datacollection.connector.Connector;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
public class BackfillingLoader {
static Logger log = Logger.getLogger(BackfillingLoader.class);
protected Configuration conf = null;
protected ChunkQueue queue = null;
protected Connector connector = null;
private String cluster = null;
private String machine = null;
private String adaptorName = null;
private String recordType = null;
private String logFile = null;
public BackfillingLoader(Configuration conf, String cluster, String machine,
String adaptorName, String recordType, String logFile) {
this.conf = conf;
this.cluster = cluster.trim();
this.machine = machine.trim();
this.adaptorName = adaptorName;
this.recordType = recordType;
this.logFile = logFile;
log.info("cluster >>>" + cluster) ;
log.info("machine >>>" + machine) ;
log.info("adaptorName >>>" + adaptorName) ;
log.info("recordType >>>" + recordType) ;
log.info("logFile >>>" + logFile) ;
// Set the right cluster and machine information
DataFactory.getInstance().addDefaultTag("cluster=\"" + this.cluster + "\"");
ChunkImpl.setHostAddress(this.machine);
queue = DataFactory.getInstance().getEventQueue();
connector = new QueueToWriterConnector(conf,true);
}
public void process() throws AdaptorException {
File file = new File(logFile);
connector.start();
Adaptor adaptor = AdaptorFactory.createAdaptor(adaptorName);
adaptor.parseArgs(recordType, "0 " +file.getAbsolutePath(),AdaptorManager.NULL);
adaptor.start("", recordType, 0l,queue);
adaptor.shutdown(AdaptorShutdownPolicy.WAIT_TILL_FINISHED);
connector.shutdown();
if(!file.renameTo(new File(logFile + ".sav"))) {
System.err.println("Error in renaming "+logFile+" to "+logFile+".sav");
}
}
public static void usage() {
System.out.println("java org.apache.hadoop.chukwa.tools.backfilling.BackfillingLoader <cluster> <machine> <adaptorName> <recordType> <logFile>");
}
/**
* @param args is command line parameters
* @throws Exception if problem loading data to HDFS
*/
public static void main(String[] args) throws Exception {
if (args.length != 5) {
usage();
return;
}
String cluster = args[0];
String machine = args[1];
String adaptorName = args[2];
String recordType = args[3];
String logFile = args[4];
BackfillingLoader loader = new BackfillingLoader(new ChukwaConfiguration(),cluster,machine,adaptorName,recordType,logFile);
loader.process();
}
}
| 8,264 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/tools | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/tools/backfilling/QueueToWriterConnector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.tools.backfilling;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.ChunkQueue;
import org.apache.hadoop.chukwa.datacollection.DataFactory;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.apache.hadoop.chukwa.datacollection.connector.Connector;
import org.apache.hadoop.chukwa.datacollection.writer.ChukwaWriter;
import org.apache.hadoop.chukwa.datacollection.writer.parquet.ChukwaParquetWriter;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
public class QueueToWriterConnector implements Connector, Runnable {
static Logger log = Logger.getLogger(QueueToWriterConnector.class);
static final int MAX_SIZE_PER_POST = 2 * 1024 * 1024;
protected Configuration conf = null;
protected volatile boolean isRunning = true;
protected ChunkQueue chunkQueue = DataFactory.getInstance().getEventQueue();
protected ChukwaWriter writer = null;
protected Thread runner = null;
protected boolean isBackfilling = false;
public QueueToWriterConnector(Configuration conf,boolean isBackfilling) {
this.conf = conf;
this.isBackfilling = isBackfilling;
}
@Override
public void reloadConfiguration() {
// do nothing here
}
@Override
public void shutdown() {
isRunning = false;
log.info("Shutdown in progress ...");
while (isAlive()) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {}
}
try {
if (writer != null) {
writer.close();
}
} catch(Exception e) {
log.warn("Exception while closing writer: ", e);
}
log.info("Shutdown done.");
}
@Override
public void start() {
log.info("Starting QueueToWriterConnector thread");
runner = new Thread(this, "QueueToWriterConnectorThread");
runner.start();
}
protected boolean isAlive() {
return this.runner.isAlive();
}
@Override
public void run() {
log.info("initializing QueueToWriterConnector");
try {
String writerClassName = conf.get("chukwaCollector.writerClass",
ChukwaParquetWriter.class.getCanonicalName());
Class<?> writerClass = Class.forName(writerClassName);
if (writerClass != null
&& ChukwaWriter.class.isAssignableFrom(writerClass)) {
writer = (ChukwaWriter) writerClass.getDeclaredConstructor(Configuration.class).newInstance(conf);
} else {
throw new RuntimeException("Wrong class type");
}
} catch (Throwable e) {
log.warn("failed to use user-chosen writer class, Bail out!", e);
throw new RuntimeException("Bail out!");
}
List<Chunk> chunks = new LinkedList<Chunk>();
ChukwaAgent agent = null;// ChukwaAgent.getAgent();
log.info("processing data for QueueToWriterConnector");
while ( isRunning || chunkQueue.size() != 0 || chunks.size() != 0) {
try {
if (chunks.size() == 0) {
if (isBackfilling && chunkQueue.size() == 0) {
Thread.sleep(300);
continue;
}
chunkQueue.collect(chunks, MAX_SIZE_PER_POST);
log.info("Got " + chunks.size() + " chunks back from the queue");
}
writer.add(chunks);
if (agent != null) {
for(Chunk chunk: chunks) {
agent.reportCommit(chunk.getInitiator(), chunk.getSeqID());
}
}
chunks.clear();
}
catch (Throwable e) {
log.warn("Could not save some chunks");
e.printStackTrace();
try {
Thread.sleep(5000);
} catch (InterruptedException e1) {}
}
}
}
}
| 8,265 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa/visualization/Swimlanes.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.analysis.salsa.visualization;
import prefuse.data.io.sql.*;
import prefuse.data.expression.parser.*;
import prefuse.data.expression.*;
import prefuse.data.column.*;
import prefuse.data.query.*;
import prefuse.data.*;
import prefuse.action.*;
import prefuse.action.layout.*;
import prefuse.action.assignment.*;
import prefuse.visual.expression.*;
import prefuse.visual.*;
import prefuse.render.*;
import prefuse.util.collections.*;
import prefuse.util.*;
import prefuse.*;
import org.apache.hadoop.chukwa.hicc.OfflineTimeHandler;
import org.apache.hadoop.chukwa.hicc.TimeHandler;
import org.apache.hadoop.chukwa.util.DatabaseWriter;
import org.apache.hadoop.chukwa.database.Macro;
import org.apache.hadoop.chukwa.util.XssFilter;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import javax.servlet.http.*;
import java.sql.*;
import java.util.*;
import java.awt.Font;
import java.awt.geom.Rectangle2D;
/**
* Static image generation for Swimlanes visualization for scalable
* rendering on front-end client (web-browser)
* Handles database data retrieval, transforming data to form for
* visualization elements, and initializing and calling visualization
* elements
*/
public class Swimlanes {
private static Log log = LogFactory.getLog(Swimlanes.class);
int SIZE_X=1600, SIZE_Y=1600;
final int [] BORDER = {50,50,50,50};
final int LEGEND_X_OFFSET = 50;
final int LEGEND_Y_OFFSET = 25;
final int LEGEND_TEXT_OFFSET = 20;
final int LEGEND_FONT_SIZE = 18;
final int AXIS_NAME_FONT_SIZE = 24;
protected boolean offline_use = true;
protected HttpServletRequest request;
protected String abc;
/**
* Modifier for generic Swimlanes plots to plot shuffle, sort, and reducer
* states of same reduce on same line
*/
protected static class MapReduceSwimlanes {
protected Table plot_tab;
protected HashMap<String, ArrayList<Tuple> > reducepart_hash;
protected boolean collate_reduces = false;
public MapReduceSwimlanes() {
this.plot_tab = new Table();
this.plot_tab.addColumn("ycoord",float.class);
this.plot_tab.addColumn("state_name",String.class);
this.plot_tab.addColumn("hostname",String.class);
this.plot_tab.addColumn("friendly_id",String.class);
this.plot_tab.addColumn(START_FIELD_NAME,double.class);
this.plot_tab.addColumn(END_FIELD_NAME,double.class);
this.plot_tab.addColumn(PolygonRenderer.POLYGON,float[].class);
this.reducepart_hash = new HashMap<String, ArrayList<Tuple> >();
}
public void populateTable_OneLinePerState(Table orig_tab) {
IntIterator rownumiter;
int newrownum, origrownum;
rownumiter = orig_tab.rows(); // iterate over everything
while (rownumiter.hasNext()) {
origrownum = ((Integer)rownumiter.next()).intValue();
newrownum = this.plot_tab.addRow();
this.plot_tab.set(newrownum, "state_name", orig_tab.getString(origrownum, "state_name"));
this.plot_tab.set(newrownum, "ycoord", orig_tab.getInt(origrownum, "seqno"));
this.plot_tab.set(newrownum,"hostname",orig_tab.getString(origrownum,"hostname"));
this.plot_tab.set(newrownum,"friendly_id",orig_tab.getString(origrownum,"friendly_id"));
this.plot_tab.set(newrownum,START_FIELD_NAME, orig_tab.getDouble(origrownum,START_FIELD_NAME));
this.plot_tab.set(newrownum,END_FIELD_NAME, orig_tab.getDouble(origrownum,END_FIELD_NAME));
}
}
public void populateTable_CollateReduces(Table orig_tab) {
IntIterator rownumiter;
int newrownum, origrownum;
this.collate_reduces = true;
// add maps normally
rownumiter = orig_tab.rows(
(Predicate) ExpressionParser.parse("[state_name] == 'map' " +
"OR [state_name] == 'shuffle_local' " +
"OR [state_name] == 'shuffle_remote'")
);
while (rownumiter.hasNext()) {
origrownum = ((Integer)rownumiter.next()).intValue();
newrownum = this.plot_tab.addRow();
this.plot_tab.set(newrownum, "state_name", orig_tab.getString(origrownum, "state_name"));
this.plot_tab.set(newrownum, "ycoord", orig_tab.getInt(origrownum, "seqno"));
this.plot_tab.set(newrownum,"hostname",orig_tab.getString(origrownum,"hostname"));
this.plot_tab.set(newrownum,"friendly_id",orig_tab.getString(origrownum,"friendly_id"));
this.plot_tab.set(newrownum,START_FIELD_NAME, orig_tab.getDouble(origrownum,START_FIELD_NAME));
this.plot_tab.set(newrownum,END_FIELD_NAME, orig_tab.getDouble(origrownum,END_FIELD_NAME));
}
// special breakdown for reduces
IntIterator rownumiter3 = orig_tab.rows(
(Predicate) ExpressionParser.parse("[state_name] == 'reduce_reducer' " +
"OR [state_name] == 'reduce_shufflewait' " +
"OR [state_name] == 'reduce_sort' " +
"OR [state_name] == 'reduce'")
);
ArrayList<Tuple> tuple_array;
while (rownumiter3.hasNext()) {
origrownum = ((Integer)rownumiter3.next()).intValue();
if (orig_tab.getString(origrownum,"state_name").equals("reduce")) {
continue; // do NOT add reduces
}
String curr_reduce = orig_tab.getString(origrownum, "friendly_id");
newrownum = this.plot_tab.addRow();
this.plot_tab.set(newrownum, "state_name", orig_tab.getString(origrownum, "state_name"));
this.plot_tab.set(newrownum, "ycoord", orig_tab.getInt(origrownum, "seqno"));
this.plot_tab.set(newrownum,"hostname",orig_tab.getString(origrownum,"hostname"));
this.plot_tab.set(newrownum,"friendly_id",orig_tab.getString(origrownum,"friendly_id"));
this.plot_tab.set(newrownum,START_FIELD_NAME, orig_tab.getDouble(origrownum,START_FIELD_NAME));
this.plot_tab.set(newrownum,END_FIELD_NAME, orig_tab.getDouble(origrownum,END_FIELD_NAME));
tuple_array = this.reducepart_hash.get(curr_reduce);
if (tuple_array == null) {
tuple_array = new ArrayList<Tuple>();
tuple_array.add(this.plot_tab.getTuple(newrownum));
this.reducepart_hash.put(curr_reduce, tuple_array);
} else {
tuple_array.add(this.plot_tab.getTuple(newrownum));
}
}
}
public void populateTable_MapsReducesOnly(Table orig_tab) {
IntIterator rownumiter;
int newrownum, origrownum;
rownumiter = orig_tab.rows(
(Predicate) ExpressionParser.parse("[state_name] == 'map' OR [state_name] == 'reduce'")
);
while (rownumiter.hasNext()) {
origrownum = ((Integer)rownumiter.next()).intValue();
newrownum = this.plot_tab.addRow();
this.plot_tab.set(newrownum, "state_name", orig_tab.getString(origrownum, "state_name"));
this.plot_tab.set(newrownum, "ycoord", orig_tab.getInt(origrownum, "seqno"));
this.plot_tab.set(newrownum,"hostname",orig_tab.getString(origrownum,"hostname"));
this.plot_tab.set(newrownum,"friendly_id",orig_tab.getString(origrownum,"friendly_id"));
this.plot_tab.set(newrownum,START_FIELD_NAME, orig_tab.getDouble(origrownum,START_FIELD_NAME));
this.plot_tab.set(newrownum,START_FIELD_NAME, orig_tab.getDouble(origrownum,END_FIELD_NAME));
}
}
/**
* Reassigns Y coord values to group by state
*/
public void groupByState() {
int counter, rownum;
int rowcount = this.plot_tab.getRowCount();
HashSet<String> states = new HashSet<String>();
String curr_state = null;
Iterator<String> state_iter;
IntIterator rownumiter;
for (int i = 0; i < rowcount; i++) {
states.add(this.plot_tab.getString(i,"state_name"));
}
state_iter = states.iterator();
counter = 1;
while (state_iter.hasNext()) {
curr_state = state_iter.next();
if (this.collate_reduces && ((curr_state.equals("reduce_reducer") || curr_state.equals("reduce_sort")))) {
continue;
}
rownumiter = this.plot_tab.rows(
(Predicate) ExpressionParser.parse("[state_name] == '"+curr_state+"'")
);
if (this.collate_reduces && curr_state.equals("reduce_shufflewait")) {
while (rownumiter.hasNext()) {
rownum = ((Integer)rownumiter.next()).intValue();
this.plot_tab.setFloat(rownum,"ycoord",(float)counter);
ArrayList<Tuple> alt = this.reducepart_hash.get(this.plot_tab.getString(rownum,"friendly_id"));
Object [] tarr = alt.toArray();
for (int i = 0; i < tarr.length; i++) ((Tuple)tarr[i]).setFloat("ycoord",(float)counter);
counter++;
}
} else {
while (rownumiter.hasNext()) {
rownum = ((Integer)rownumiter.next()).intValue();
this.plot_tab.setFloat(rownum,"ycoord",(float)counter);
counter++;
}
}
}
}
public void groupByStartTime() {
int counter, rownum;
String curr_state = null;
IntIterator rownumiter;
rownumiter = this.plot_tab.rowsSortedBy(START_FIELD_NAME, true);
counter = 1;
while (rownumiter.hasNext()) {
rownum = ((Integer)rownumiter.next()).intValue();
curr_state = this.plot_tab.getString(rownum, "state_name");
if (this.collate_reduces && curr_state.equals("reduce_shufflewait")) {
this.plot_tab.setFloat(rownum,"ycoord",(float)counter);
ArrayList<Tuple> alt = this.reducepart_hash.get(this.plot_tab.getString(rownum,"friendly_id"));
Object [] tarr = alt.toArray();
for (int i = 0; i < tarr.length; i++) ((Tuple)tarr[i]).setFloat("ycoord",(float)counter);
counter++;
} else if (!curr_state.equals("reduce_sort") && !curr_state.equals("reduce_reducer")) {
this.plot_tab.setFloat(rownum,"ycoord",(float)counter);
counter++;
}
}
}
public void groupByEndTime() {
int counter, rownum;
String curr_state = null;
IntIterator rownumiter;
rownumiter = this.plot_tab.rowsSortedBy(END_FIELD_NAME, true);
counter = 1;
while (rownumiter.hasNext()) {
rownum = ((Integer)rownumiter.next()).intValue();
curr_state = this.plot_tab.getString(rownum, "state_name");
if (this.collate_reduces && curr_state.equals("reduce_reducer")) {
this.plot_tab.setFloat(rownum,"ycoord",(float)counter);
ArrayList<Tuple> alt = this.reducepart_hash.get(this.plot_tab.getString(rownum,"friendly_id"));
Object [] tarr = alt.toArray();
for (int i = 0; i < tarr.length; i++) ((Tuple)tarr[i]).setFloat("ycoord",(float)counter);
counter++;
} else if (!curr_state.equals("reduce_sort") && !curr_state.equals("reduce_shufflewait")) {
this.plot_tab.setFloat(rownum,"ycoord",(float)counter);
counter++;
}
}
}
public VisualTable addToVisualization(Visualization viz, String groupname) {
return viz.addTable(groupname, this.plot_tab);
}
}
/**
* Provide constant mapping between state names and colours
* so that even if particular states are missing, the colours are fixed
* for each state
*/
public static class SwimlanesStatePalette {
protected final String [] states = {"map","reduce","reduce_shufflewait","reduce_sort","reduce_reducer","shuffle"};
HashMap<String,Integer> colourmap;
protected int [] palette;
public SwimlanesStatePalette() {
palette = ColorLib.getCategoryPalette(states.length);
colourmap = new HashMap<String,Integer>();
for (int i = 0; i < states.length; i++) {
colourmap.put(states[i], Integer.valueOf(palette[i]));
}
}
public int getColour(String state_name) {
Integer val = colourmap.get(state_name);
if (val == null) {
return ColorLib.color(java.awt.Color.BLACK);
} else {
return val.intValue();
}
}
public int getNumStates() {
return this.states.length;
}
public String [] getStates() {
return this.states.clone();
}
}
/**
* Provides convenient rescaling of raw values to be plotted to
* actual pixels for plotting on image
*/
public static class CoordScaler {
double x_pixel_size, y_pixel_size;
double x_max_value, y_max_value, x_min_value, y_min_value;
double x_start, y_start;
public CoordScaler() {
this.x_pixel_size = 0.0;
this.y_pixel_size = 0.0;
this.x_max_value = 1.0;
this.y_max_value = 1.0;
this.x_min_value = 0.0;
this.y_min_value = 0.0;
this.x_start = 0.0;
this.y_start = 0.0;
}
public void set_pixel_start(double x, double y) {
this.x_start = x;
this.y_start = y;
}
public void set_pixel_size(double x, double y) {
this.x_pixel_size = x;
this.y_pixel_size = y;
}
public void set_value_ranges(double x_min, double y_min, double x_max, double y_max) {
this.x_max_value = x_max;
this.y_max_value = y_max;
this.x_min_value = x_min;
this.y_min_value = y_min;
}
public double get_x_coord(double x_value) {
return x_start+(((x_value - x_min_value) / (x_max_value-x_min_value)) * x_pixel_size);
}
public double get_y_coord(double y_value) {
// this does "inverting" to shift the (0,0) point from top-right to bottom-right
return y_start+(y_pixel_size - ((((y_value - y_min_value) / (y_max_value-y_min_value)) * y_pixel_size)));
}
}
/**
* Prefuse action for plotting a line for each state
*/
public static class SwimlanesStateAction extends GroupAction {
protected CoordScaler cs;
public SwimlanesStateAction() {
super();
}
public SwimlanesStateAction(String group, CoordScaler cs) {
super(group);
this.cs = cs;
}
public void run (double frac) {
VisualItem item = null;
SwimlanesStatePalette pal = new SwimlanesStatePalette();
Iterator<?> curr_group_items = this.m_vis.items(this.m_group);
while (curr_group_items.hasNext()) {
item = (VisualItem) curr_group_items.next();
double start_time = item.getDouble(START_FIELD_NAME);
double finish_time = item.getDouble(END_FIELD_NAME);
item.setShape(Constants.POLY_TYPE_LINE);
item.setX(0.0);
item.setY(0.0);
float [] coords = new float[4];
coords[0] = (float) cs.get_x_coord(start_time);
coords[1] = (float) cs.get_y_coord((double)item.getInt("ycoord"));
coords[2] = (float) cs.get_x_coord(finish_time);
coords[3] = (float) cs.get_y_coord((double)item.getInt("ycoord"));
item.set(VisualItem.POLYGON,coords);
item.setStrokeColor(pal.getColour(item.getString("state_name")));
}
}
} // SwimlanesStateAction
// keys that need to be filled:
// period (last1/2/3/6/12/24hr,last7d,last30d), time_type (range/last), start, end
protected HashMap<String, String> param_map;
protected String cluster;
protected String timezone;
protected String shuffle_option;
protected final String table = "mapreduce_fsm";
protected boolean plot_legend = true;
protected String jobname = null;
protected Display dis;
protected Visualization viz;
protected Rectangle2D dataBound = new Rectangle2D.Double();
protected Rectangle2D xlabBound = new Rectangle2D.Double();
protected Rectangle2D ylabBound = new Rectangle2D.Double();
protected Rectangle2D labelBottomBound = new Rectangle2D.Double();
static final String START_FIELD_NAME = "start_time_num";
static final String END_FIELD_NAME = "finish_time_num";
/* Different group names allow control of what Renderers to use */
final String maingroup = "Job";
final String othergroup = "Misc";
final String labelgroup = "Label";
final String legendgroup = "Legend";
final String legendshapegroup = "LegendShape";
public Swimlanes() {
this.cluster = "";
this.timezone = "";
this.shuffle_option = "";
param_map = new HashMap<String, String>();
}
/**
* Constructor for Swimlanes visualization object
* @param timezone Timezone string from environment
* @param cluster Cluster name from environment
* @param event_type Whether to display shuffles or not
* @param valmap HashMap of key/value pairs simulating parameters from a HttpRequest
*/
public Swimlanes
(String timezone, String cluster, String event_type,
HashMap<String, String> valmap)
{
this.cluster = cluster;
if (timezone != null) {
this.timezone = timezone;
} else {
this.timezone = null;
}
this.shuffle_option = event_type;
/* This should "simulate" an HttpServletRequest
* Need to have "start" and "end" in seconds since Epoch
*/
this.param_map = valmap;
}
public Swimlanes
(String timezone, String cluster, String event_type,
HashMap<String, String> valmap, int width, int height)
{
this.cluster = cluster;
if (timezone != null) {
this.timezone = timezone;
} else {
this.timezone = null;
}
this.shuffle_option = event_type;
/* This should "simulate" an HttpServletRequest
* Need to have "start" and "end" in seconds since Epoch
*/
this.param_map = valmap;
this.SIZE_X = width;
this.SIZE_Y = height;
}
public Swimlanes
(String timezone, String cluster, String event_type,
HashMap<String, String> valmap, int width, int height,
String legend_opt)
{
this.cluster = cluster;
if (timezone != null) {
this.timezone = timezone;
} else {
this.timezone = null;
}
this.shuffle_option = event_type;
/* This should "simulate" an HttpServletRequest
* Need to have "start" and "end" in seconds since Epoch
*/
this.param_map = valmap;
this.SIZE_X = width;
this.SIZE_Y = height;
if (legend_opt.equals("nolegend")) {
this.plot_legend = false;
}
}
public Swimlanes(HttpServletRequest request) {
XssFilter xf = new XssFilter(request);
this.offline_use = false;
this.request = request;
HttpSession session = request.getSession();
this.cluster = session.getAttribute("cluster").toString();
String evt_type = xf.getParameter("event_type");
if (evt_type != null) {
this.shuffle_option = evt_type;
} else {
this.shuffle_option = "noshuffle";
}
this.timezone = session.getAttribute("time_zone").toString();
}
/**
* Set job ID to filter results on
* Call before calling @see #run
* @param s job name
*/
public void setJobName(String s) {
this.jobname = s;
}
/**
* Set dimensions of image to be generated
* Call before calling @see #run
* @param width image width in pixels
* @param height image height in pixels
*/
public void setDimensions(int width, int height) {
this.SIZE_X=width;
this.SIZE_Y=height;
}
/**
* Specify whether to print legend of states
* Advisable to not print legend for excessively small images since
* legend has fixed point size
* Call before calling @see #run
* @param legendopt parameter to turn on legends
*/
public void setLegend(boolean legendopt) {
if (legendopt) {
this.plot_legend = true;
} else {
this.plot_legend = false;
}
}
/**
* Generates image in specified format, and writes image as binary
* output to supplied output stream
* @param output output stream of image
* @param img_fmt image format
* @param scale image scaling factor
* @return true if image is saved
*/
public boolean getImage(java.io.OutputStream output, String img_fmt, double scale) {
dis = new Display(this.viz);
dis.setSize(SIZE_X,SIZE_Y);
dis.setHighQuality(true);
dis.setFont(new Font(Font.SANS_SERIF,Font.PLAIN,24));
return dis.saveImage(output, img_fmt, scale);
}
/**
* Adds a column to given table by converting timestamp to long with
* seconds since epoch, and adding milliseconds from additional column
* in original table
*
* @param origTable Table to add to
* @param srcFieldName Name of column containing timestamp
* @param srcMillisecondFieldName Name of column containing millisecond value of time
* @param dstFieldName Name of new column to add
*
* @return Modified table with added column
*/
protected Table addTimeCol
(Table origTable, String srcFieldName,
String srcMillisecondFieldName, String dstFieldName)
{
origTable.addColumn(dstFieldName, long.class);
int total_rows = origTable.getRowCount();
for (int curr_row_num = 0; curr_row_num < total_rows; curr_row_num++) {
origTable.setLong(curr_row_num, dstFieldName,
((Timestamp)origTable.get(curr_row_num, srcFieldName)).getTime() +
origTable.getLong(curr_row_num, srcMillisecondFieldName)
);
}
return origTable;
}
/**
* Adds a column with number of seconds of timestamp elapsed since lowest
* start time; allows times to be plotted as a delta of the start time
*
* @param origTable Table to add column to
* @param srcFieldName Name of column containing timestamp
* @param srcMillisecondFieldName Name of column containing millisecond value of time
* @param dstFieldName Name of new column to add
*
* @return Modified table with added column
*/
protected Table addTimeOffsetCol
(Table origTable, String srcFieldName,
String srcMillisecondFieldName, String dstFieldName,
long timeOffset)
{
Table newtable = addTimeCol(origTable, srcFieldName,
srcMillisecondFieldName, dstFieldName + "_fulltime");
ColumnMetadata dstcol = newtable.getMetadata(dstFieldName + "_fulltime");
long mintime = newtable.getLong(dstcol.getMinimumRow(), dstFieldName + "_fulltime");
if (timeOffset == 0) {
newtable.addColumn(dstFieldName, "ROUND((["+dstFieldName+"_fulltime] - " + mintime +"L) / 1000L)");
} else {
newtable.addColumn(dstFieldName, "ROUND((["+dstFieldName+"_fulltime] - " + timeOffset +"L) / 1000L)");
}
return newtable;
}
protected void setupRenderer() {
this.viz.setRendererFactory(new RendererFactory(){
AbstractShapeRenderer sr = new ShapeRenderer();
ShapeRenderer sr_big = new ShapeRenderer(20);
Renderer arY = new AxisRenderer(Constants.LEFT, Constants.TOP);
Renderer arX = new AxisRenderer(Constants.CENTER, Constants.BOTTOM);
PolygonRenderer pr = new PolygonRenderer(Constants.POLY_TYPE_LINE);
LabelRenderer lr = new LabelRenderer("label");
LabelRenderer lr_legend = new LabelRenderer("label");
public Renderer getRenderer(VisualItem item) {
lr.setHorizontalAlignment(Constants.CENTER);
lr.setVerticalAlignment(Constants.TOP);
lr_legend.setHorizontalAlignment(Constants.LEFT);
lr_legend.setVerticalAlignment(Constants.CENTER);
if (item.isInGroup("ylab")) {
return arY;
} else if (item.isInGroup("xlab")) {
return arX;
} else if (item.isInGroup(maingroup)) {
return pr;
} else if (item.isInGroup(labelgroup)) {
return lr;
} else if (item.isInGroup(legendgroup)) {
return lr_legend;
} else if (item.isInGroup(legendshapegroup)) {
return sr_big;
} else {
return sr;
}
}
});
}
// setup columns: add additional time fields
protected Table setupDataTable() {
Table res_tab = this.getData();
if (res_tab == null) {
return res_tab;
}
res_tab.addColumn("seqno","ROW()");
res_tab = addTimeOffsetCol(res_tab, "start_time", "start_time_millis", START_FIELD_NAME, 0);
ColumnMetadata dstcol = res_tab.getMetadata(START_FIELD_NAME);
long mintime = ((Timestamp)res_tab.get(dstcol.getMinimumRow(), "start_time")).getTime();
res_tab = addTimeOffsetCol(res_tab, "finish_time", "finish_time_millis", END_FIELD_NAME, mintime);
res_tab.addColumn(PolygonRenderer.POLYGON,float[].class);
log.debug("After adding seqno: #cols: " + res_tab.getColumnCount() + "; #rows: " + res_tab.getRowCount());
return res_tab;
}
protected void addAxisNames() {
Table textlabels_table = new Table();
textlabels_table.addColumn("label",String.class);
textlabels_table.addColumn("type",String.class);
textlabels_table.addRow();
textlabels_table.setString(0,"label","Time/s");
textlabels_table.setString(0,"type","xaxisname");
VisualTable textlabelsviz = this.viz.addTable(labelgroup, textlabels_table);
textlabelsviz.setX(0,SIZE_X/2d);
textlabelsviz.setY(0,SIZE_Y - BORDER[2] + (BORDER[2]*0.1));
textlabelsviz.setTextColor(0,ColorLib.color(java.awt.Color.GRAY));
textlabelsviz.setFont(0,new Font(Font.SANS_SERIF,Font.PLAIN,AXIS_NAME_FONT_SIZE));
}
protected void addLegend() {
SwimlanesStatePalette ssp = new SwimlanesStatePalette();
Table shapes_table = new Table();
shapes_table.addColumn(VisualItem.X,float.class);
shapes_table.addColumn(VisualItem.Y,float.class);
Table legend_labels_table = new Table();
legend_labels_table.addColumn("label",String.class);
// add labels
int num_states = ssp.getNumStates();
String [] state_names = ssp.getStates();
legend_labels_table.addRows(num_states);
shapes_table.addRows(num_states);
for (int i = 0; i < num_states; i++) {
legend_labels_table.setString(i,"label",state_names[i]);
}
// add legend shapes, manipulate visualitems to set colours
VisualTable shapes_table_viz = viz.addTable(legendshapegroup, shapes_table);
float start_x = BORDER[0] + LEGEND_X_OFFSET;
float start_y = BORDER[1] + LEGEND_Y_OFFSET;
float incr = (float) 30.0;
for (int i = 0; i < num_states; i++) {
shapes_table_viz.setFillColor(i, ssp.getColour(state_names[i]));
shapes_table_viz.setFloat(i, VisualItem.X, start_x);
shapes_table_viz.setFloat(i, VisualItem.Y, start_y + (i * incr));
}
// add legend labels, manipulate visualitems to set font
VisualTable legend_labels_table_viz = this.viz.addTable(legendgroup, legend_labels_table);
for (int i = 0; i < num_states; i++) {
legend_labels_table_viz.setFloat(i, VisualItem.X, start_x + LEGEND_TEXT_OFFSET);
legend_labels_table_viz.setFloat(i, VisualItem.Y, start_y + (i * incr));
legend_labels_table_viz.setTextColor(i,ColorLib.color(java.awt.Color.BLACK));
legend_labels_table_viz.setFont(i,new Font(Font.SANS_SERIF,Font.PLAIN,LEGEND_FONT_SIZE));
}
}
public void run() {
// setup bounds
this.dataBound.setRect(BORDER[0],BORDER[1],SIZE_X-BORDER[2]-BORDER[0],SIZE_Y-BORDER[3]-BORDER[1]);
this.xlabBound.setRect(BORDER[0],BORDER[1],SIZE_X-BORDER[2]-BORDER[0],SIZE_Y-BORDER[3]-BORDER[1]);
this.ylabBound.setRect(BORDER[0],BORDER[1],SIZE_X-BORDER[2]-BORDER[0],SIZE_Y-BORDER[3]-BORDER[1]);
this.labelBottomBound.setRect(BORDER[0],SIZE_X-BORDER[2],SIZE_Y-BORDER[0]-BORDER[1],BORDER[3]);
// setup visualization
this.viz = new Visualization();
this.setupRenderer();
// add table to visualization
Table raw_data_tab = this.setupDataTable();
MapReduceSwimlanes mrs = new MapReduceSwimlanes();
mrs.populateTable_CollateReduces(raw_data_tab);
mrs.groupByState();
VisualTable maindatatable = mrs.addToVisualization(this.viz, maingroup);
addAxisNames();
if (plot_legend) {
addLegend();
}
// plot swimlanes lines: setup axes, call custom action
ActionList draw = new ActionList();
{
// setup axes
AxisLayout xaxis = new AxisLayout(maingroup, START_FIELD_NAME, Constants.X_AXIS, VisiblePredicate.TRUE);
AxisLayout yaxis = new AxisLayout(maingroup, "ycoord", Constants.Y_AXIS, VisiblePredicate.FALSE);
xaxis.setLayoutBounds(dataBound);
yaxis.setLayoutBounds(dataBound);
ColumnMetadata starttime_meta = maindatatable.getMetadata(START_FIELD_NAME);
ColumnMetadata finishtime_meta = maindatatable.getMetadata(END_FIELD_NAME);
ColumnMetadata ycoord_meta = maindatatable.getMetadata("ycoord");
long x_min = (long) ((Double)maindatatable.get(starttime_meta.getMinimumRow(), START_FIELD_NAME)).doubleValue();
long x_max = (long) ((Double)maindatatable.get(finishtime_meta.getMaximumRow(), END_FIELD_NAME)).doubleValue();
xaxis.setRangeModel(new NumberRangeModel(x_min,x_max,x_min,x_max));
float y_max = maindatatable.getFloat(ycoord_meta.getMaximumRow(),"ycoord");
yaxis.setRangeModel(new NumberRangeModel(0,y_max,0,y_max));
// call custom action to plot actual swimlanes lines
CoordScaler cs = new CoordScaler();
cs.set_pixel_size(SIZE_X-BORDER[0]-BORDER[2], SIZE_Y-BORDER[1]-BORDER[3]);
cs.set_pixel_start(BORDER[0],BORDER[1]);
cs.set_value_ranges(x_min,0,x_max,y_max);
//SwimlanesStateAction swimlaneslines = new SwimlanesStateAction(maingroup, cs);
SwimlanesStateAction swimlaneslines = new SwimlanesStateAction(maingroup, cs);
// add everything to the plot
draw.add(xaxis);
draw.add(yaxis);
draw.add(swimlaneslines);
AxisLabelLayout xlabels = new AxisLabelLayout("xlab", xaxis, xlabBound);
this.viz.putAction("xlabels",xlabels);
AxisLabelLayout ylabels = new AxisLabelLayout("ylab", yaxis, ylabBound);
this.viz.putAction("ylabels",ylabels);
}
// add axes names
{
SpecifiedLayout sl = new SpecifiedLayout(labelgroup, VisualItem.X, VisualItem.Y);
ActionList labeldraw = new ActionList();
labeldraw.add(sl);
this.viz.putAction(labelgroup, labeldraw);
}
// add legend
if (plot_legend) {
ShapeAction legend_sa = new ShapeAction(legendshapegroup);
SpecifiedLayout legendlabels_sl = new SpecifiedLayout(legendgroup, VisualItem.X, VisualItem.Y);
ActionList legenddraw = new ActionList();
legenddraw.add(legend_sa);
this.viz.putAction(legendshapegroup, legenddraw);
ActionList legendlabelsdraw = new ActionList();
legendlabelsdraw.add(legendlabels_sl);
this.viz.putAction(legendgroup,legendlabelsdraw);
}
// draw everything else
this.viz.putAction("draw",draw);
// finally draw
this.viz.run("draw");
this.viz.run("xlabels");
this.viz.run("ylabels");
}
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value =
"SQL_NONCONSTANT_STRING_PASSED_TO_EXECUTE",
justification = "Dynamic based upon tables in the database")
public Table getData() {
// preliminary setup
OfflineTimeHandler time_offline;
TimeHandler time_online;
long start, end;
if (offline_use) {
time_offline = new OfflineTimeHandler(param_map, this.timezone);
start = time_offline.getStartTime();
end = time_offline.getEndTime();
} else {
time_online = new TimeHandler(this.request, this.timezone);
start = time_online.getStartTime();
end = time_online.getEndTime();
}
DatabaseWriter dbw = new DatabaseWriter(this.cluster);
String query;
// setup query
if (this.shuffle_option != null && this.shuffle_option.equals("shuffles")) {
query = "select job_id,friendly_id,start_time,finish_time,start_time_millis,finish_time_millis,status,state_name,hostname from ["+this.table+"] where finish_time between '[start]' and '[end]'";
} else {
query = "select job_id,friendly_id,start_time,finish_time,start_time_millis,finish_time_millis,status,state_name,hostname from ["+this.table+"] where finish_time between '[start]' and '[end]' and not state_name like 'shuffle_local' and not state_name like 'shuffle_remote'";
}
if (this.jobname != null) {
query = query + " and job_id like '"+ this.jobname +"'";
}
Macro mp = new Macro(start,end,query);
query = mp.toString() + " order by start_time";
Table rs_tab = null;
DatabaseDataSource dds;
log.debug("Query: " + query);
// execute query
try {
dds = ConnectionFactory.getDatabaseConnection(dbw.getConnection());
rs_tab = dds.getData(query);
} catch (prefuse.data.io.DataIOException e) {
System.err.println("prefuse data IO error: " + e);
log.warn("prefuse data IO error: " + e);
return null;
} catch (SQLException e) {
System.err.println("Error in SQL: " + e + " in statement: " + query);
log.warn("Error in SQL: " + e + " in statement: " + query);
return null;
}
HashMap<String, Integer> state_counts = new HashMap<String, Integer>();
for (int i = 0; i < rs_tab.getRowCount(); i++) {
String curr_state = rs_tab.getString(i, "state_name");
Integer cnt = state_counts.get(curr_state);
if (cnt == null) {
state_counts.put(curr_state, Integer.valueOf(1));
} else {
state_counts.remove(curr_state);
state_counts.put(curr_state, Integer.valueOf(cnt.intValue()+1));
}
}
log.info("Search complete: #cols: " + rs_tab.getColumnCount() + "; #rows: " + rs_tab.getRowCount());
return rs_tab;
}
}
| 8,266 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa/visualization/Heatmap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.analysis.salsa.visualization;
import prefuse.data.*;
import prefuse.action.*;
import prefuse.action.layout.*;
import prefuse.action.assignment.*;
import prefuse.visual.*;
import prefuse.render.*;
import prefuse.util.*;
import prefuse.*;
import org.apache.hadoop.chukwa.hicc.OfflineTimeHandler;
import org.apache.hadoop.chukwa.hicc.TimeHandler;
import org.apache.hadoop.chukwa.util.DatabaseWriter;
import org.apache.hadoop.chukwa.database.Macro;
import org.apache.hadoop.chukwa.util.XssFilter;
import javax.servlet.http.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.sql.*;
import java.util.*;
import java.awt.Font;
import java.awt.geom.Rectangle2D;
import java.awt.Color;
/**
* Static image rendering for heatmap visualization of spatial HDFS
* activity patterns for scalable rendering on front-end (web-browser)
* Handles database data retrieval, transforming data to form for
* visualization elements, and initializing and calling visualization
* elements
*/
public class Heatmap {
/**
* Internal representation of all data needed to render heatmap;
* data-handling code populates this data structure
*/
protected static class HeatmapData {
public Table agg_tab;
public long [][] stats;
public long min;
public long max;
public int num_hosts;
public String [] hostnames;
public HeatmapData() {
}
}
private static Log log = LogFactory.getLog(Heatmap.class);
static final String START_FIELD_NAME = "start_time_num";
static final String END_FIELD_NAME = "finish_time_num";
int BOXWIDTH = 250;
int SIZE_X = 1600, SIZE_Y=1600;
final int [] BORDER = {200,150,150,150};
final int LEGEND_X_OFFSET = 10;
final int LEGEND_Y_OFFSET = 0;
final int LEGEND_TEXT_OFFSET = 10;
final int LEGEND_FONT_SIZE = 24;
final int AXIS_NAME_FONT_SIZE = 24;
protected boolean offline_use = true;
protected HttpServletRequest request;
// for offline use only
// keys that need to be filled:
// period (last1/2/3/6/12/24hr,last7d,last30d), time_type (range/last), start, end
protected HashMap<String, String> param_map;
protected String cluster;
protected String timezone;
protected String query_state;
protected String query_stat_type;
protected final String table = "filesystem_fsm";
protected boolean plot_legend = false; // controls whether to plot hostnames
protected boolean sort_nodes = true;
protected boolean plot_additional_info = true;
protected String add_info_extra = null;
protected Display dis;
protected Visualization viz;
protected Rectangle2D dataBound = new Rectangle2D.Double();
protected Rectangle2D xlabBound = new Rectangle2D.Double();
protected Rectangle2D ylabBound = new Rectangle2D.Double();
protected Rectangle2D labelBottomBound = new Rectangle2D.Double();
protected HashMap<String, String> prettyStateNames;
/* Different group names allow control of what Renderers to use */
final String maingroup = "Data";
final String othergroup = "Misc";
final String labelgroup = "Label";
final String legendgroup = "Legend";
final String legendshapegroup = "LegendShape";
final String addinfogroup = "AddInfo";
final String addinfoshapegroup = "AddInfoShape";
public Heatmap() {
this.cluster = "";
this.timezone = "";
this.query_state = "";
this.query_stat_type = "";
param_map = new HashMap<String, String>();
}
/**
* Constructor for Swimlanes visualization object
* @param timezone Timezone string from environment
* @param cluster Cluster name from environment
* @param event_type Whether to display shuffles or not
* @param query_stat_type Query state type
* @param valmap HashMap of key/value pairs simulating parameters from a HttpRequest
*/
public Heatmap
(String timezone, String cluster, String event_type,
String query_stat_type,
HashMap<String, String> valmap)
{
this.cluster = cluster;
if (timezone != null) {
this.timezone = timezone;
} else {
this.timezone = null;
}
this.query_state = event_type;
this.query_stat_type = query_stat_type;
/* This should "simulate" an HttpServletRequest
* Need to have "start" and "end" in seconds since Epoch
*/
this.param_map = valmap;
}
public Heatmap
(String timezone, String cluster, String query_state,
String query_stat_type,
HashMap<String, String> valmap, String shuffles)
{
this.cluster = cluster;
if (timezone != null) {
this.timezone = timezone;
} else {
this.timezone = null;
}
this.query_state = query_state;
this.query_stat_type = query_stat_type;
/* This should "simulate" an HttpServletRequest
* Need to have "start" and "end" in seconds since Epoch
*/
this.param_map = valmap;
}
public Heatmap
(String timezone, String cluster, String query_state,
String query_stat_type,
HashMap<String, String> valmap,
int w, int h)
{
this.cluster = cluster;
if (timezone != null) {
this.timezone = timezone;
} else {
this.timezone = null;
}
this.query_state = query_state;
this.query_stat_type = query_stat_type;
/* This should "simulate" an HttpServletRequest
* Need to have "start" and "end" in seconds since Epoch
*/
this.param_map = valmap;
this.SIZE_X = w;
this.SIZE_Y = h;
}
public Heatmap(HttpServletRequest request) {
XssFilter xf = new XssFilter(request);
this.offline_use = false;
this.request = request;
HttpSession session = request.getSession();
this.cluster = session.getAttribute("cluster").toString();
String query_state = xf.getParameter("query_state");
if (query_state != null) {
this.query_state = query_state;
} else {
this.query_state = "read";
}
String query_stat_type = xf.getParameter("query_stat_type");
if (query_stat_type != null) {
this.query_stat_type = query_stat_type;
} else {
this.query_stat_type = "transaction_count";
}
this.timezone = session.getAttribute("time_zone").toString();
}
/**
* Set dimensions of image to be generated
* Call before calling @see #run
* @param width Image width in pixels
* @param height Image height in pixels
*/
public void setDimensions(int width, int height) {
this.SIZE_X=width;
this.SIZE_Y=height;
}
/**
* Specify whether to print labels of hosts along axes
* Call before calling @see #run
* @param legendopt Flag to control plot legends
*/
public void setLegend(boolean legendopt) {
if (legendopt) {
this.plot_legend = true;
} else {
this.plot_legend = false;
}
}
/**
* Generates image in specified format, and writes image as binary
* output to supplied output stream
* @param output Image output stream
* @param img_fmt Image format
* @param scale Image scale
* @return true if image is saved
*/
public boolean getImage(java.io.OutputStream output, String img_fmt, double scale) {
dis = new Display(this.viz);
dis.setSize(SIZE_X,SIZE_Y);
dis.setHighQuality(true);
dis.setFont(new Font(Font.SANS_SERIF,Font.PLAIN,24));
return dis.saveImage(output, img_fmt, scale);
}
protected void setupRenderer() {
this.viz.setRendererFactory(new RendererFactory(){
AbstractShapeRenderer sr = new ShapeRenderer();
ShapeRenderer sr_big = new ShapeRenderer(BOXWIDTH);
LabelRenderer lr = new LabelRenderer("label");
LabelRenderer lr_legend = new LabelRenderer("label");
public Renderer getRenderer(VisualItem item) {
lr_legend.setHorizontalAlignment(Constants.LEFT);
lr_legend.setVerticalAlignment(Constants.CENTER);
lr.setHorizontalAlignment(Constants.CENTER);
lr.setVerticalAlignment(Constants.CENTER);
if (item.isInGroup(maingroup)) {
return sr_big;
} else if (item.isInGroup(legendgroup)) {
return lr_legend;
} else if (item.isInGroup(addinfogroup)) {
return lr;
}
return sr;
}
});
}
// setup columns: add additional time fields
protected HeatmapData setupDataTable() {
HeatmapData hd = this.getData();
return hd;
}
protected void setupHeatmap(VisualTable vtab, HeatmapData hd)
{
long [][] stats = hd.stats;
int i, j, curr_idx;
long curr_val;
int num_hosts = hd.num_hosts;
ColorMap cm = new ColorMap(
ColorLib.getInterpolatedPalette(
ColorLib.color(ColorLib.getColor(32,0,0)),
ColorLib.color(Color.WHITE)
),
(double)hd.min,(double)hd.max
);
for (i = 0; i < num_hosts; i++) {
for (j = 0; j < num_hosts; j++) {
curr_idx = j+(i*num_hosts);
curr_val = stats[i][j];
if (curr_val >= hd.min) {
vtab.setFillColor(curr_idx, cm.getColor((double)curr_val));
} else if (curr_val == 0) {
vtab.setFillColor(curr_idx, ColorLib.color(Color.BLACK));
}
}
}
// gridlayout puts tiles on row-wise (row1, followed by row2, etc.)
GridLayout gl = new GridLayout(maingroup, num_hosts, num_hosts);
gl.setLayoutBounds(this.dataBound);
ActionList gl_list = new ActionList();
gl_list.add(gl);
this.viz.putAction("gridlayout",gl_list);
this.viz.run("gridlayout");
}
protected void addHostLabels(HeatmapData hd) {
Table legend_labels_table = new Table();
legend_labels_table.addColumn("label",String.class);
legend_labels_table.addRows(hd.hostnames.length);
for (int i = 0; i < hd.hostnames.length; i++) {
legend_labels_table.setString(i,"label",hd.hostnames[i]);
}
float start_x = LEGEND_X_OFFSET;
float start_y = LEGEND_Y_OFFSET + BORDER[1] + (BOXWIDTH/2);
float incr = this.BOXWIDTH;
VisualTable legend_labels_table_viz = this.viz.addTable(legendgroup, legend_labels_table);
for (int i = 0; i < hd.hostnames.length; i++) {
legend_labels_table_viz.setFloat(i, VisualItem.X, start_x + LEGEND_TEXT_OFFSET);
legend_labels_table_viz.setFloat(i, VisualItem.Y, start_y + (i * incr));
legend_labels_table_viz.setTextColor(i,ColorLib.color(java.awt.Color.BLACK));
legend_labels_table_viz.setFont(i,new Font(Font.SANS_SERIF,Font.PLAIN,LEGEND_FONT_SIZE));
}
}
protected void addAddlInfo(HeatmapData hd) {
Table legend_labels_table = new Table();
legend_labels_table.addColumn("label",String.class);
legend_labels_table.addRows(3);
String hostnumstring = "Number of hosts: " + hd.num_hosts;
if (sort_nodes) {
hostnumstring += " (nodes sorted)";
} else {
hostnumstring += " (nodes not sorted)";
}
if (add_info_extra != null) hostnumstring += add_info_extra;
legend_labels_table.setString(0,"label",hostnumstring);
legend_labels_table.setString(1,"label","Src. Hosts");
legend_labels_table.setString(2,"label","Dest. Hosts");
VisualTable legend_labels_table_viz = this.viz.addTable(addinfogroup, legend_labels_table);
legend_labels_table_viz.setFloat(0, VisualItem.X, this.SIZE_X/2f);
legend_labels_table_viz.setFloat(0, VisualItem.Y, BORDER[1]/2f);
legend_labels_table_viz.setTextColor(0,ColorLib.color(java.awt.Color.BLACK));
legend_labels_table_viz.setFont(0,new Font(Font.SANS_SERIF,Font.PLAIN,LEGEND_FONT_SIZE));
legend_labels_table_viz.setFloat(1, VisualItem.X, this.SIZE_X/2f);
legend_labels_table_viz.setFloat(1, VisualItem.Y, BORDER[1] + (BOXWIDTH*hd.num_hosts) + BORDER[3]/2f);
legend_labels_table_viz.setTextColor(1,ColorLib.color(java.awt.Color.BLACK));
legend_labels_table_viz.setFont(1,new Font(Font.SANS_SERIF,Font.PLAIN,LEGEND_FONT_SIZE));
legend_labels_table_viz.setFloat(2, VisualItem.X, BORDER[0] + (BOXWIDTH*hd.num_hosts) + BORDER[2]/2f);
legend_labels_table_viz.setFloat(2, VisualItem.Y, this.SIZE_Y/2f);
legend_labels_table_viz.setTextColor(2,ColorLib.color(java.awt.Color.BLACK));
legend_labels_table_viz.setFont(2,new Font(Font.SANS_SERIF,Font.PLAIN,LEGEND_FONT_SIZE));
}
protected void initPrettyNames() {
this.prettyStateNames = new HashMap<String, String>();
prettyStateNames.put("read","Block Reads");
prettyStateNames.put("write","Block Writes");
prettyStateNames.put("read_local", "Local Block Reads");
prettyStateNames.put("write_local", "Local Block Writes");
prettyStateNames.put("read_remote", "Remote Block Reads");
prettyStateNames.put("write_remote", "Remote Block Writes");
prettyStateNames.put("write_replicated", "Replicated Block Writes");
}
/**
* Actual code that calls data, generates heatmap, and saves it
*/
public void run() {
initPrettyNames();
// setup visualization
this.viz = new Visualization();
// add table to visualization
HeatmapData hd = this.setupDataTable();
// setup bounds
int width;
if (SIZE_X-BORDER[0]-BORDER[2] < SIZE_Y-BORDER[1]-BORDER[3]) {
BOXWIDTH = (SIZE_X-BORDER[0]-BORDER[2]) / hd.num_hosts;
} else {
BOXWIDTH = (SIZE_Y-BORDER[1]-BORDER[3]) / hd.num_hosts;
}
width = hd.num_hosts * BOXWIDTH;
this.dataBound.setRect(
BORDER[0]+BOXWIDTH/2,
BORDER[1]+BOXWIDTH/2,
width-BOXWIDTH,width-BOXWIDTH
);
this.SIZE_X = BORDER[0] + BORDER[2] + (hd.num_hosts * BOXWIDTH);
this.SIZE_Y = BORDER[1] + BORDER[3] + (hd.num_hosts * BOXWIDTH);
log.debug("width total: " + width + " width per state: " + BOXWIDTH + " xstart: "
+ (BORDER[0]+BOXWIDTH/2)
+ " ystart: " + (BORDER[1]+BOXWIDTH/2) + " (num hosts: "+hd.num_hosts+")");
log.debug("X size: " + this.SIZE_X + " Y size: " + this.SIZE_Y);
this.setupRenderer();
VisualTable data_tab_viz = viz.addTable(maingroup, hd.agg_tab);
setupHeatmap(data_tab_viz, hd);
ShapeAction legend_sa1 = null, legend_sa2 = null;
SpecifiedLayout legendlabels_sl1 = null, legendlabels_sl2 = null;
if (plot_legend) {
addHostLabels(hd);
legend_sa1 = new ShapeAction(legendshapegroup);
legendlabels_sl1 = new SpecifiedLayout(legendgroup, VisualItem.X, VisualItem.Y);
ActionList legenddraw = new ActionList();
legenddraw.add(legend_sa1);
this.viz.putAction(legendshapegroup, legenddraw);
ActionList legendlabelsdraw = new ActionList();
legendlabelsdraw.add(legendlabels_sl1);
this.viz.putAction(legendgroup,legendlabelsdraw);
}
if (plot_additional_info) {
addAddlInfo(hd);
legend_sa2 = new ShapeAction(addinfoshapegroup);
legendlabels_sl2 = new SpecifiedLayout(addinfogroup, VisualItem.X, VisualItem.Y);
ActionList legenddraw = new ActionList();
legenddraw.add(legend_sa2);
this.viz.putAction(addinfoshapegroup, legenddraw);
ActionList legendlabelsdraw = new ActionList();
legendlabelsdraw.add(legendlabels_sl2);
this.viz.putAction(addinfogroup,legendlabelsdraw);
}
}
protected boolean checkDone(int [] clustId) {
for (int i = 1; i < clustId.length; i++) {
if (clustId[i] != clustId[0]) return false;
}
return true;
}
/**
* Sort data for better visualization of patterns
*/
protected int [] hClust (long [][] stat)
{
int statlen = stat.length;
long [] rowSums = new long[statlen];
int [] permute = new int[statlen];
int i,j;
// initialize permutation
for (i = 0; i < statlen; i++) {
permute[i] = i;
}
for (i = 0; i < statlen; i++) {
rowSums[i] = 0;
for (j = 0; j < statlen; j++) {
rowSums[i] += stat[i][j];
}
}
// insertion sort
for (i = 0; i < statlen-1; i++) {
long val = rowSums[i];
int thispos = permute[i];
j = i-1;
while (j >= 0 && rowSums[j] > val) {
rowSums[j+1] = rowSums[j];
permute[j+1] = permute[j];
j--;
}
rowSums[j+1] = val;
permute[j+1] = thispos;
}
return permute;
}
/**
* Reorder rows (and columns) according to a given ordering
* Maintains same ordering along rows and columns
*/
protected long [][] doPermute (long [][] stat, int [] permute) {
int statlen = stat.length;
int i, j, curr_pos;
long [][] stat2 = new long[statlen][statlen];
assert(stat.length == permute.length);
for (i = 0; i < statlen; i++) {
curr_pos = permute[i];
for (j = 0; j < statlen; j++) {
stat2[i][j] = stat[curr_pos][permute[j]];
}
}
return stat2;
}
/**
* Interfaces with database to get data and
* populate data structures for rendering
* @return heat map data JSON
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value =
"SQL_NONCONSTANT_STRING_PASSED_TO_EXECUTE",
justification = "Dynamic based upon tables in the database")
public HeatmapData getData() {
// preliminary setup
OfflineTimeHandler time_offline;
TimeHandler time_online;
long start, end, min, max;
if (offline_use) {
time_offline = new OfflineTimeHandler(param_map, this.timezone);
start = time_offline.getStartTime();
end = time_offline.getEndTime();
} else {
time_online = new TimeHandler(this.request, this.timezone);
start = time_online.getStartTime();
end = time_online.getEndTime();
}
DatabaseWriter dbw = new DatabaseWriter(this.cluster);
// setup query
String sqlTemplate = "select block_id,start_time,finish_time,start_time_millis,finish_time_millis,status,state_name,hostname,other_host,bytes from [%s] where finish_time between '[start]' and '[end]' and (%s) order by start_time";
String query;
if (this.query_state != null && this.query_state.equals("read")) {
query = String.format(sqlTemplate, table,"state_name like 'read_local' or state_name like 'read_remote'");
} else if (this.query_state != null && this.query_state.equals("write")) {
query = String.format(sqlTemplate, table, "state_name like 'write_local' or state_name like 'write_remote' or state_name like 'write_replicated'");
} else {
query = String.format(sqlTemplate, table, "state_name like '" + query_state + "'");
}
Macro mp = new Macro(start,end,query);
String q = mp.toString();
ArrayList<HashMap<String, Object>> events = new ArrayList<HashMap<String, Object>>();
ResultSet rs = null;
log.debug("Query: " + q);
// run query, extract results
try {
rs = dbw.query(q);
ResultSetMetaData rmeta = rs.getMetaData();
int col = rmeta.getColumnCount();
while (rs.next()) {
HashMap<String, Object> event = new HashMap<String, Object>();
for(int i=1;i<=col;i++) {
if(rmeta.getColumnType(i)==java.sql.Types.TIMESTAMP) {
event.put(rmeta.getColumnName(i),rs.getTimestamp(i).getTime());
} else {
event.put(rmeta.getColumnName(i),rs.getString(i));
}
}
events.add(event);
}
} catch (SQLException ex) {
// handle any errors
log.error("SQLException: " + ex.getMessage());
log.error("SQLState: " + ex.getSQLState());
log.error("VendorError: " + ex.getErrorCode());
} finally {
dbw.close();
}
log.info(events.size() + " results returned.");
HashSet<String> host_set = new HashSet<String>();
HashMap<String, Integer> host_indices = new HashMap<String, Integer>();
HashMap<Integer, String> host_rev_indices = new HashMap<Integer, String>();
// collect hosts, name unique hosts
for(int i = 0; i < events.size(); i++) {
HashMap<String, Object> event = events.get(i);
String curr_host = (String) event.get("hostname");
String other_host = (String) event.get("other_host");
host_set.add(curr_host);
host_set.add(other_host);
}
int num_hosts = host_set.size();
Iterator<String> host_iter = host_set.iterator();
for (int i = 0; i < num_hosts && host_iter.hasNext(); i++) {
String curr_host = host_iter.next();
host_indices.put(curr_host, i);
host_rev_indices.put(i,curr_host);
}
System.out.println("Number of hosts: " + num_hosts);
long stats[][] = new long[num_hosts][num_hosts];
long count[][] = new long[num_hosts][num_hosts]; // used for averaging
int start_millis = 0, end_millis = 0;
// deliberate design choice to duplicate code PER possible operation
// otherwise we have to do the mode check N times, for N states returned
//
// compute aggregate statistics
log.info("Query statistic type: "+this.query_stat_type);
if (this.query_stat_type.equals("transaction_count")) {
for(int i=0;i<events.size();i++) {
HashMap<String, Object> event = events.get(i);
start=(Long)event.get("start_time");
end=(Long)event.get("finish_time");
start_millis = Integer.parseInt(((String)event.get("start_time_millis")));
end_millis = Integer.parseInt(((String)event.get("finish_time_millis")));
String this_host = (String) event.get("hostname");
String other_host = (String) event.get("other_host");
int this_host_idx = host_indices.get(this_host).intValue();
int other_host_idx = host_indices.get(other_host).intValue();
// to, from
stats[other_host_idx][this_host_idx] += 1;
}
} else if (this.query_stat_type.equals("avg_duration")) {
for(int i=0;i<events.size();i++) {
HashMap<String, Object> event = events.get(i);
start=(Long)event.get("start_time");
end=(Long)event.get("finish_time");
start_millis = Integer.parseInt(((String)event.get("start_time_millis")));
end_millis = Integer.parseInt(((String)event.get("finish_time_millis")));
String this_host = (String) event.get("hostname");
String other_host = (String) event.get("other_host");
int this_host_idx = host_indices.get(this_host).intValue();
int other_host_idx = host_indices.get(other_host).intValue();
long curr_val = end_millis - start_millis + ((end - start)*1000);
// to, from
stats[other_host_idx][this_host_idx] += curr_val;
count[other_host_idx][this_host_idx] += 1;
}
for (int i = 0; i < num_hosts; i++) {
for (int j = 0; j < num_hosts; j++) {
if (count[i][j] > 0) stats[i][j] = stats[i][j] / count[i][j];
}
}
} else if (this.query_stat_type.equals("avg_volume")) {
for(int i=0;i<events.size();i++) {
HashMap<String, Object> event = events.get(i);
start=(Long)event.get("start_time");
end=(Long)event.get("finish_time");
start_millis = Integer.parseInt(((String)event.get("start_time_millis")));
end_millis = Integer.parseInt(((String)event.get("finish_time_millis")));
String this_host = (String) event.get("hostname");
String other_host = (String) event.get("other_host");
int this_host_idx = host_indices.get(this_host).intValue();
int other_host_idx = host_indices.get(other_host).intValue();
long curr_val = Long.parseLong((String)event.get("bytes"));
// to, from
stats[other_host_idx][this_host_idx] += curr_val;
count[other_host_idx][this_host_idx] += 1;
}
for (int i = 0; i < num_hosts; i++) {
for (int j = 0; j < num_hosts; j++) {
if (count[i][j] > 0) stats[i][j] = stats[i][j] / count[i][j];
}
}
} else if (this.query_stat_type.equals("total_duration")) {
for(int i=0;i<events.size();i++) {
HashMap<String, Object> event = events.get(i);
start=(Long)event.get("start_time");
end=(Long)event.get("finish_time");
start_millis = Integer.parseInt(((String)event.get("start_time_millis")));
end_millis = Integer.parseInt(((String)event.get("finish_time_millis")));
String this_host = (String) event.get("hostname");
String other_host = (String) event.get("other_host");
int this_host_idx = host_indices.get(this_host).intValue();
int other_host_idx = host_indices.get(other_host).intValue();
double curr_val = end_millis - start_millis + ((end - start)*1000);
// to, from
stats[other_host_idx][this_host_idx] += curr_val;
}
} else if (this.query_stat_type.equals("total_volume")) {
for(int i=0;i<events.size();i++) {
HashMap<String, Object> event = events.get(i);
start=(Long)event.get("start_time");
end=(Long)event.get("finish_time");
start_millis = Integer.parseInt(((String)event.get("start_time_millis")));
end_millis = Integer.parseInt(((String)event.get("finish_time_millis")));
String this_host = (String) event.get("hostname");
String other_host = (String) event.get("other_host");
int this_host_idx = host_indices.get(this_host).intValue();
int other_host_idx = host_indices.get(other_host).intValue();
long curr_val = Long.parseLong((String)event.get("bytes"));
// to, from
stats[other_host_idx][this_host_idx] += curr_val;
}
}
int [] permute = null;
if (sort_nodes) {
permute = hClust(stats);
stats = doPermute(stats,permute);
}
Table agg_tab = new Table();
agg_tab.addColumn("stat", long.class);
min = Long.MAX_VALUE;
max = Long.MIN_VALUE;
agg_tab.addRows(num_hosts*num_hosts);
// row-wise placement (row1, followed by row2, etc.)
for (int i = 0; i < num_hosts; i++) {
for (int j = 0; j < num_hosts; j++) {
agg_tab.setLong((i*num_hosts)+j,"stat",stats[i][j]);
if (stats[i][j] > max) max = stats[i][j];
if (stats[i][j] > 0 && stats[i][j] < min) min = stats[i][j];
}
}
if (min == Long.MAX_VALUE) min = 0;
log.info(agg_tab);
// collate data
HeatmapData hd = new HeatmapData();
hd.stats = stats;
hd.min = min;
hd.max = max;
hd.num_hosts = num_hosts;
hd.agg_tab = agg_tab;
this.add_info_extra = new StringBuilder().append("\nState: ").append(this.prettyStateNames.get(this.query_state)).
append(" (").append(events.size()).append(" ").append(this.query_state).
append("'s [").append(this.query_stat_type).append("])\n").
append("Plotted value range: [").append(hd.min).append(",").append(hd.max).
append("] (Zeros in black)").toString();
hd.hostnames = new String [num_hosts];
for (int i = 0; i < num_hosts; i++) {
String curr_host = host_rev_indices.get(permute[i]);
hd.hostnames[i] = curr_host;
}
return hd;
}
}
| 8,267 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa/fsm/ParseUtilities.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.analysis.salsa.fsm;
import java.util.StringTokenizer;
/**
* Parse Utilities for Parsing ChukwaRecords for FSMBuilder Mappers
*
*/
public class ParseUtilities {
public static FSMIntermedEntry splitChukwaRecordKey
(String origkey, FSMIntermedEntry rec, String delim)
throws Exception
{
StringTokenizer st = new StringTokenizer(origkey, delim);
if (st.countTokens() != 3) {
throw new Exception("Expected 3 tokens from ChukwaRecordKey but only found " + st.countTokens() + ".");
}
rec.time_orig_epoch = st.nextToken();
rec.job_id = st.nextToken();
rec.time_orig = st.nextToken();
return rec;
}
public static String extractHostnameFromTrackerName (String trackerName)
{
int firstPos = "tracker_".length();
int secondPos;
String hostname = "";
if (trackerName.startsWith("tracker_")) {
secondPos = trackerName.indexOf(":",firstPos);
hostname = trackerName.substring(firstPos, secondPos);
}
return hostname;
}
public static String removeRackFromHostname (String origHostname)
{
int pos = origHostname.lastIndexOf("/");
if (pos > -1) {
return origHostname.substring(pos);
} else {
return origHostname;
}
}
} | 8,268 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa/fsm/StateType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.analysis.salsa.fsm;
public class StateType {
public static final int STATE_NOOP = 0;
public static final int STATE_START = 1;
public static final int STATE_END = 2;
public static final int STATE_INSTANT = 3;
static final String [] NAMES = {"STATE_NOOP", "STATE_START", "STATE_END", "STATE_INSTANT"};
public StateType() { this.val = 0; }
public StateType(int newval) { this.val = newval; }
public int val;
public String toString() { assert(this.val < NAMES.length && this.val >= 0); return String.valueOf(NAMES[this.val]); }
}
| 8,269 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa/fsm/FSMType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.analysis.salsa.fsm;
public class FSMType {
public static final int MAPREDUCE_FSM = 0;
public static final int FILESYSTEM_FSM = 1;
public static final int MAPREDUCE_FSM_INCOMPLETE = 2;
public static final int FILESYSTEM_FSM_INCOMPLETE = 3;
static final String [] NAMES = { "MAPREDUCE_FSM", "FILESYSTEM_FSM", "MAPREDUCE_FSM_INCOMPLETE", "FILESYSTEM_FSM_INCOMPLETE" };
public FSMType() { this.val = 0; }
public FSMType(int newval) { this.val = newval; }
public int val;
public String toString() { assert(this.val < NAMES.length && this.val >= 0); return String.valueOf(NAMES[this.val]); }
}
| 8,270 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa/fsm/MapRedState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.analysis.salsa.fsm;
public class MapRedState {
public static final int NONE = 0;
public static final int MAP = 1;
public static final int REDUCE = 2;
public static final int REDUCE_SHUFFLEWAIT = 3;
public static final int REDUCE_SORT = 4;
public static final int REDUCE_REDUCER = 5;
public static final int SHUFFLE_LOCAL = 6;
public static final int SHUFFLE_REMOTE = 7;
static final String [] NAMES = { "NONE", "MAP", "REDUCE", "REDUCE_SHUFFLEWAIT",
"REDUCE_SORT", "REDUCE_REDUCER", "SHUFFLE_LOCAL", "SHUFFLE_REMOTE"};
public MapRedState() { this.val = 0; }
public MapRedState(int newval) { this.val = newval; }
public int val;
public String toString() { assert(this.val < NAMES.length && this.val >= 0); return String.valueOf(NAMES[this.val]); }
}
| 8,271 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa/fsm/HDFSState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.analysis.salsa.fsm;
public class HDFSState {
public static final int NONE = 6;
public static final int READ_LOCAL = 1;
public static final int READ_REMOTE = 2;
public static final int WRITE_LOCAL = 3;
public static final int WRITE_REMOTE = 4;
public static final int WRITE_REPLICATED = 5;
static final String [] NAMES = { "NONE", "READ_LOCAL", "READ_REMOTE", "WRITE_LOCAL", "WRITE_REMOTE", "WRITE_REPLICATED"};
public HDFSState() { this.val = 1; }
public HDFSState(int newval) { this.val = newval; }
public int val;
public String toString() { assert(this.val < NAMES.length && this.val >= 0); return String.valueOf(NAMES[this.val]); }
}
| 8,272 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa/fsm/TaskTrackerClientTraceMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.analysis.salsa.fsm;
import java.io.IOException;
import java.util.ArrayList;
import java.util.regex.*;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.chukwa.extraction.demux.*;
import org.apache.hadoop.chukwa.extraction.engine.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.*;
/**
* Pluggable mapper for FSMBuilder
*
* K2 = State Name + State ID
* (We use ChukwaRecordKey since it would already have implemented a bunch of
* useful things such as Comparators etc.)
* V2 = TreeMap
*/
public class TaskTrackerClientTraceMapper
extends MapReduceBase
implements Mapper<ChukwaRecordKey, ChukwaRecord, ChukwaRecordKey, FSMIntermedEntry>
{
private static Log log = LogFactory.getLog(FSMBuilder.class);
protected static final String SEP = "/";
protected final static String FSM_CRK_ReduceType = FSMType.NAMES[FSMType.MAPREDUCE_FSM];
private final Pattern ipPattern =
Pattern.compile("([0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+)[a-zA-Z\\-_:\\/].*");
public void map
(ChukwaRecordKey key, ChukwaRecord val,
OutputCollector<ChukwaRecordKey, FSMIntermedEntry> output,
Reporter reporter)
throws IOException
{
/* Extract field names for checking */
String [] fieldNames = val.getFields();
ArrayList<String> fieldNamesList = new ArrayList<String>(fieldNames.length);
for (int i = 0; i < fieldNames.length; i++) {
fieldNamesList.add(fieldNames[i]);
}
// Handle ClientTraceDetailed and DataNodeLog entries separately
// because we need to combine both types of entries for a complete picture
if (key.getReduceType().equals("ClientTraceDetailed")) {
assert(fieldNamesList.contains("op"));
if (val.getValue("op").startsWith("MAPRED")) {
parseClientTraceDetailed(key, val, output, reporter, fieldNamesList);
} // pick up only mapreduce operations
}
} // end of map()
protected final int DEFAULT_SHUFFLE_DURATION_MS = 10;
// works with 0.20 ClientTrace with no durations
// includes hack to create start+end entries immediately
protected void parseClientTraceDetailed
(ChukwaRecordKey key, ChukwaRecord val,
OutputCollector<ChukwaRecordKey, FSMIntermedEntry> output,
Reporter reporter, ArrayList<String> fieldNamesList)
throws IOException
{
FSMIntermedEntry start_rec, end_rec;
String current_op = null, src_add = null, dest_add = null;
String reduce_id = null, map_id = null;
/* initialize state records */
start_rec = new FSMIntermedEntry();
end_rec = new FSMIntermedEntry();
start_rec.fsm_type = new FSMType(FSMType.MAPREDUCE_FSM);
start_rec.state_type = new StateType(StateType.STATE_START);
end_rec.fsm_type = new FSMType(FSMType.MAPREDUCE_FSM);
end_rec.state_type = new StateType(StateType.STATE_END);
/* extract addresses */
Matcher src_regex = ipPattern.matcher(val.getValue("src"));
if (src_regex.matches()) {
src_add = src_regex.group(1);
} else {
log.warn("Failed to match src IP:"+val.getValue("src")+"");
src_add = "";
}
Matcher dest_regex = ipPattern.matcher(val.getValue("dest"));
if (dest_regex.matches()) {
dest_add = dest_regex.group(1);
} else {
log.warn("Failed to match dest IP:"+val.getValue("dest")+"");
dest_add = "";
}
if (fieldNamesList.contains("reduceID")) {
reduce_id = val.getValue("reduceID");
} else {
// add a random number so we get unique keys or the CRK will break
Random r = new Random();
reduce_id = "noreduce" + r.nextInt();
}
if (fieldNamesList.contains("cliID")) {
map_id = val.getValue("cliID").trim();
} else {
map_id = "nomap";
}
current_op = val.getValue("op");
start_rec.host_exec = src_add;
end_rec.host_exec = src_add;
start_rec.host_other = dest_add;
end_rec.host_other = dest_add;
// timestamp of the log entry is the end time;
// subtract duration to get start time
long actual_time_ms = Long.parseLong(val.getValue("actual_time"));
if (fieldNamesList.contains("duration")) {
try {
actual_time_ms -= (Long.parseLong(val.getValue("duration").trim()) / 1000);
} catch (NumberFormatException nef) {
log.warn("Failed to parse duration: >>" + val.getValue("duration"));
}
} else {
actual_time_ms -= DEFAULT_SHUFFLE_DURATION_MS;
}
String [] k = key.getKey().split("/");
start_rec.time_orig_epoch = k[0];
start_rec.time_orig = Long.toString(actual_time_ms); // not actually used
start_rec.timestamp = Long.toString(actual_time_ms);
start_rec.time_end = "";
start_rec.time_start = start_rec.timestamp;
end_rec.time_orig_epoch = k[0];
end_rec.time_orig = val.getValue("actual_time");
end_rec.timestamp = val.getValue("actual_time");
end_rec.time_end = val.getValue("actual_time");
end_rec.time_start = "";
log.debug("Duration: " + (Long.parseLong(end_rec.time_end) - Long.parseLong(start_rec.time_start)));
start_rec.job_id = reduce_id; // use job id = block id
end_rec.job_id = reduce_id;
if (current_op.equals("MAPRED_SHUFFLE")) {
if (src_add != null && src_add.equals(dest_add)) {
start_rec.state_mapred = new MapRedState(MapRedState.SHUFFLE_LOCAL);
} else {
start_rec.state_mapred = new MapRedState(MapRedState.SHUFFLE_REMOTE);
}
} else {
log.warn("Invalid state: " + current_op);
}
end_rec.state_mapred = start_rec.state_mapred;
start_rec.state_name = start_rec.state_mapred.toString();
end_rec.state_name = end_rec.state_mapred.toString();
start_rec.identifier = new StringBuilder().append(reduce_id).append("@").append(map_id).toString();
end_rec.identifier = new StringBuilder().append(reduce_id).append("@").append(map_id).toString();
start_rec.generateUniqueID();
end_rec.generateUniqueID();
start_rec.add_info.put(Record.tagsField,val.getValue(Record.tagsField));
start_rec.add_info.put("csource",val.getValue("csource"));
end_rec.add_info.put(Record.tagsField,val.getValue(Record.tagsField));
end_rec.add_info.put("csource",val.getValue("csource"));
end_rec.add_info.put("STATE_STRING","SUCCESS"); // by default
// add counter value
end_rec.add_info.put("BYTES",val.getValue("bytes"));
String crk_mid_string_start = new StringBuilder().append(start_rec.getUniqueID()).append("_").append(start_rec.timestamp).toString();
String crk_mid_string_end = new StringBuilder().append(end_rec.getUniqueID()).append("_").append(start_rec.timestamp).toString();
output.collect(new ChukwaRecordKey(FSM_CRK_ReduceType, crk_mid_string_start), start_rec);
output.collect(new ChukwaRecordKey(FSM_CRK_ReduceType, crk_mid_string_end), end_rec);
}
} // end of mapper class
| 8,273 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa/fsm/FSMIntermedEntryPartitioner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.analysis.salsa.fsm;
import org.apache.hadoop.mapred.Partitioner;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
public class FSMIntermedEntryPartitioner
implements Partitioner<ChukwaRecordKey, FSMIntermedEntry>
{
public int getPartition
(ChukwaRecordKey key, FSMIntermedEntry val, int numPartitions)
{
return (Math.abs(key.hashCode() % numPartitions));
}
public void configure(JobConf job) {
// do nothing
}
} | 8,274 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa/fsm/FSMIntermedEntry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.analysis.salsa.fsm;
import java.io.IOException;
import java.io.DataInput;
import java.io.DataOutput;
import java.util.Iterator;
import java.util.TreeMap;
import java.util.Set;
import java.util.Map.Entry;
import org.apache.hadoop.io.WritableComparable;
import org.apache.commons.lang3.builder.HashCodeBuilder;
/*
* FSM Intermediate State Entry
*
* Each state corresponds to two of these entries:
* One corresponding to the start of the state, one corresponding to the end of the state
*
* Intermediate data-structure passed from Maps to Reduces
*
*/
public class FSMIntermedEntry
implements Cloneable, WritableComparable
{
private final char DELIM = 1;
/* Begin fields */
public StateType state_type;
public MapRedState state_mapred;
public HDFSState state_hdfs;
public FSMType fsm_type;
public String state_name;
public String identifier;
public String unique_id; // state name + unique identifier
// (state-dependent)
// this id should also correspond
// to the k2 value between
// mappers and reducers
public String timestamp;
public String time_start;
public String time_end;
public String host_exec;
public String host_other; // for instance, source host for shuffle,
// src/dest host for dfs read/write
// These values filled in by splitting the original
// ChukwaRecordKey from Demux
public String time_orig_epoch;
public String time_orig;
public String job_id; // we get this for free from the CRK
TreeMap<String,String> add_info; // additional information
// e.g. locality information
/* End of fields */
public FSMIntermedEntry() {
this.state_mapred = new MapRedState(MapRedState.NONE);
this.state_hdfs = new HDFSState(HDFSState.NONE);
this.state_type = new StateType(StateType.STATE_NOOP);
this.add_info = new TreeMap<String, String>();
this.host_other = "";
this.job_id = "";
this.time_orig_epoch = "";
this.time_orig = "";
}
public String getUniqueID()
{
return this.unique_id;
}
public String getFriendlyID()
{
return this.identifier;
}
/**
* Set state_type and identifier before calling
*/
public void generateUniqueID()
{
if (this.fsm_type.val == FSMType.MAPREDUCE_FSM ||
this.fsm_type.val == FSMType.MAPREDUCE_FSM_INCOMPLETE)
{
this.state_name = this.state_mapred.toString();
} else if (this.fsm_type.val == FSMType.FILESYSTEM_FSM ||
this.fsm_type.val == FSMType.FILESYSTEM_FSM_INCOMPLETE)
{
this.state_name = this.state_hdfs.toString();
}
this.unique_id = new StringBuilder().append(this.state_name).append("@").append(this.identifier).toString();
}
public void write(DataOutput out) throws IOException {
Set<String> mapKeys;
out.writeInt(this.state_type.val);
out.writeInt(this.state_mapred.val);
out.writeInt(this.state_hdfs.val);
out.writeInt(this.fsm_type.val);
out.writeChar(DELIM);
out.writeInt(state_name.length());
if (state_name.length() > 0) out.writeUTF(state_name);
out.writeInt(unique_id.length());
if (unique_id.length() > 0) out.writeUTF(unique_id);
out.writeInt(timestamp.length());
if (timestamp.length() > 0) out.writeUTF(timestamp);
out.writeInt(time_start.length());
if (time_start.length() > 0) out.writeUTF(time_start);
out.writeInt(time_end.length());
if (time_end.length() > 0) out.writeUTF(time_end);
out.writeInt(host_exec.length());
if (host_exec.length() > 0) out.writeUTF(host_exec);
out.writeInt(host_other.length());
if (host_other.length() > 0) out.writeUTF(host_other);
out.writeInt(time_orig_epoch.length());
if (time_orig_epoch.length() > 0) out.writeUTF(time_orig_epoch);
out.writeInt(time_orig.length());
if (time_orig.length() > 0) out.writeUTF(time_orig);
out.writeInt(job_id.length());
if (job_id.length() > 0) out.writeUTF(job_id);
out.writeInt(identifier.length());
if (identifier.length() > 0) out.writeUTF(identifier);
mapKeys = this.add_info.keySet();
out.writeInt(mapKeys.size());
for(Entry<String, String> entry : this.add_info.entrySet()) {
String value = entry.getValue();
if(value.length() > 0) {
out.writeUTF(entry.getKey());
out.writeInt(value.length());
out.writeUTF(value);
} else {
out.writeUTF("NULL");
out.writeInt(0);
}
}
}
public void readFields(DataInput in) throws IOException {
int currlen, numkeys;
this.state_type = new StateType(in.readInt());
this.state_mapred = new MapRedState(in.readInt());
this.state_hdfs = new HDFSState(in.readInt());
this.fsm_type = new FSMType(in.readInt());
in.readChar();
currlen = in.readInt();
if (currlen > 0) this.state_name = in.readUTF();
else this.state_name = "";
currlen = in.readInt();
if (currlen > 0) this.unique_id = in.readUTF();
else this.unique_id = "";
currlen = in.readInt();
if (currlen > 0) this.timestamp = in.readUTF();
else this.timestamp = "";
currlen = in.readInt();
if (currlen > 0) this.time_start = in.readUTF();
else this.time_start = "";
currlen = in.readInt();
if (currlen > 0) this.time_end = in.readUTF();
else this.time_end = "";
currlen = in.readInt();
if (currlen > 0) this.host_exec = in.readUTF();
else this.host_exec = "";
currlen = in.readInt();
if (currlen > 0) this.host_other = in.readUTF();
else this.host_other = "";
currlen = in.readInt();
if (currlen > 0) this.time_orig_epoch = in.readUTF();
else this.time_orig_epoch = "";
currlen = in.readInt();
if (currlen > 0) this.time_orig = in.readUTF();
else this.time_orig = "";
currlen = in.readInt();
if (currlen > 0) this.job_id = in.readUTF();
else this.job_id = "";
currlen = in.readInt();
if (currlen > 0) this.identifier = in.readUTF();
else this.identifier = "";
numkeys = in.readInt();
this.add_info = new TreeMap<String, String>();
if (numkeys > 0) {
for (int i = 0; i < numkeys; i++) {
String currkey, currval;
currkey = in.readUTF();
currlen = in.readInt();
if (currlen > 0) {
currval = in.readUTF();
this.add_info.put(currkey, currval);
}
}
}
}
@Override
public int hashCode() {
return new HashCodeBuilder(13, 71).
append(this.unique_id).
toHashCode();
}
@Override
public boolean equals (Object o) {
if((o instanceof FSMIntermedEntry)) {
FSMIntermedEntry other = (FSMIntermedEntry) o;
return this.unique_id.equals(other.unique_id);
}
return false;
}
public int compareTo (Object o) {
final int BEFORE = -1;
final int EQUAL = 0;
//this optimization is usually worthwhile, and can
//always be added
if ( this == o ) return EQUAL;
if((o instanceof FSMIntermedEntry)) {
FSMIntermedEntry other = (FSMIntermedEntry) o;
return this.unique_id.compareTo(other.unique_id);
}
return BEFORE;
}
/*
* This method is to support convenient creating of new copies
* of states for Reduce to create sub-states ReduceShuffle, ReduceSort, and ReduceReducer
*/
public FSMIntermedEntry clone() throws CloneNotSupportedException {
FSMIntermedEntry newObj = (FSMIntermedEntry) super.clone();
Set<String> mapKeys;
newObj.state_type = new StateType(this.state_type.val);
newObj.state_mapred = new MapRedState(this.state_mapred.val);
newObj.state_hdfs = new HDFSState(this.state_hdfs.val);
newObj.fsm_type = new FSMType(this.fsm_type.val);
/* Deep copy all strings */
newObj.state_name = this.state_name;
newObj.unique_id = this.unique_id;
newObj.timestamp = this.timestamp;
newObj.time_start = this.time_start;
newObj.time_end = this.time_end;
newObj.time_orig_epoch = this.time_orig_epoch;
newObj.time_orig = this.time_orig;
newObj.job_id = this.job_id;
/* Deep copy of TreeMap */
newObj.add_info = new TreeMap<String,String>();
for(Entry<String, String> entry : this.add_info.entrySet()) {
String currKey = entry.getKey();
String value = entry.getValue();
newObj.add_info.put(currKey, value);
}
return newObj;
}
public String toString() {
return new StringBuilder().append(this.state_name).append("@").append(this.unique_id).toString();
}
}
| 8,275 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa/fsm/FSMBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.analysis.salsa.fsm;
import java.io.IOException;
import java.util.Iterator;
import java.util.ArrayList;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.chukwa.extraction.demux.*;
import org.apache.hadoop.chukwa.extraction.engine.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.chukwa.extraction.demux.processor.ChukwaOutputCollector;
/**
* FSM Builder
*
* Input: start/end pairs i.e. JobHistory data
*
* Input handling is controlled by choosing a custom mapper that
* is able to parse the desired input format (e.g. JobHistory lines)
* One map class is provided for each type of input data provided
* Each map class "standardizes" the different input log types
* to the standardized internal FSMIntermedEntry representation
*
* Currently available mapper classes:
* DataNodeClientTraceMapper
* TaskTrackerClientTraceMapper
* JobHistoryTaskDataMapper
*
* Parameterizing choice of mapper class - read in as config parameter
*
* Output is standardized, regardless of input, and is generated by
* the common reducer
*
*/
public class FSMBuilder extends Configured implements Tool {
private static Log log = LogFactory.getLog(FSMBuilder.class);
public enum AddInfoTypes {HOST_OTHER, INPUT_BYTES, INPUT_RECORDS, INPUT_GROUPS,
OUTPUT_BYTES, OUTPUT_RECORDS, SHUFFLE_BYTES, RECORDS_SPILLED,
COMBINE_INPUT_RECORDS, COMBINE_OUTPUT_RECORDS}
protected static final String SEP = "/";
public static class FSMReducer
extends MapReduceBase
implements Reducer<ChukwaRecordKey, FSMIntermedEntry, ChukwaRecordKey, ChukwaRecord> {
/**
* These are used for the add_info TreeMap; keys not listed here are automatically
* prepended with "COUNTER_"
*/
final static String NON_COUNTER_KEYS [] = {"csource","ctags","STATE_STRING"};
protected final static String JCDF_ID1 = "JCDF_ID1";
protected final static String JCDF_ID2 = "JCDF_ID2";
protected final static String JCDF_EDGE_TIME = "JCDF_E_TIME";
protected final static String JCDF_EDGE_VOL = "JCDF_E_VOL";
protected final static String JCDF_SEP = "@";
/**
* Populates fields used by Pig script for stitching together causal flows
*/
protected void addStitchingFields_blockread
(ChukwaRecord cr, ArrayList<String> fnl)
{
assert(fnl.contains("JOB_ID"));
assert(fnl.contains("TASK_ID"));
assert(fnl.contains("TIME_END"));
assert(fnl.contains("TIME_START"));
assert(fnl.contains("COUNTER_BYTES"));
String id1 = new StringBuilder().append(cr.getValue("TASK_ID")).append(JCDF_SEP).append(cr.getValue("TIME_START")).toString();
String id2 = new StringBuilder().append("map").append(JCDF_SEP).append(cr.getValue("JOB_ID")).toString();
String et = Long.toString((Long.parseLong(cr.getValue("TIME_END")) - Long.parseLong(cr.getValue("TIME_START"))));
String ev = new StringBuilder().append(cr.getValue("COUNTER_BYTES")).toString();
cr.add(JCDF_ID1, id1);
cr.add(JCDF_ID2, id2);
cr.add(JCDF_EDGE_TIME, et);
cr.add(JCDF_EDGE_VOL, ev);
}
/**
* Populates fields used by Pig script for stitching together causal flows
*/
protected void addStitchingFields_map
(ChukwaRecord cr, ArrayList<String> fnl)
{
assert(fnl.contains("TASK_ID"));
assert(fnl.contains("TIME_END"));
assert(fnl.contains("TIME_START"));
assert(fnl.contains("COUNTER_INPUT_BYTES"));
String id1 = new StringBuilder().append("map").append(JCDF_SEP).append(cr.getValue("TASK_ID")).toString();
String id2 = new StringBuilder().append("shuf").append(JCDF_SEP).append(cr.getValue("TASK_ID")).toString();
String et = Long.toString((Long.parseLong(cr.getValue("TIME_END")) - Long.parseLong(cr.getValue("TIME_START"))));
String ev = cr.getValue("COUNTER_INPUT_BYTES");
cr.add(JCDF_ID1, id1);
cr.add(JCDF_ID2, id2);
cr.add(JCDF_EDGE_TIME, et);
cr.add(JCDF_EDGE_VOL, ev);
}
/**
* Populates fields used by Pig script for stitching together causal flows
*/
protected void addStitchingFields_shuffle
(ChukwaRecord cr, ArrayList<String> fnl)
{
assert(fnl.contains("TASK_ID"));
assert(fnl.contains("TIME_END"));
assert(fnl.contains("TIME_START"));
assert(fnl.contains("COUNTER_BYTES"));
String mapid, redid;
String id_parts[];
id_parts = (cr.getValue("TASK_ID")).split("@");
if (id_parts.length != 2) {
log.warn("Could not split [" + cr.getValue("TASK_ID") + "]; had length " + id_parts.length);
}
redid = id_parts[0];
mapid = id_parts[1];
String id1 = new StringBuilder().append("shuf").append(JCDF_SEP).append(mapid).toString();
String id2 = new StringBuilder().append("shufred").append(JCDF_SEP).append(redid).toString();
String et = Long.toString(
Long.parseLong(cr.getValue("TIME_END")) -
Long.parseLong(cr.getValue("TIME_START"))
);
String ev = cr.getValue("COUNTER_BYTES");
cr.add(JCDF_ID1, id1);
cr.add(JCDF_ID2, id2);
cr.add(JCDF_EDGE_TIME, et);
cr.add(JCDF_EDGE_VOL, ev);
}
/**
* Populates fields used by Pig script for stitching together causal flows
*/
protected void addStitchingFields_redshufwait
(ChukwaRecord cr, ArrayList<String> fnl)
{
assert(fnl.contains("TASK_ID"));
assert(fnl.contains("TIME_END"));
assert(fnl.contains("TIME_START"));
assert(fnl.contains("COUNTER_INPUT_BYTES"));
String id1 = new StringBuilder().append("shufred").append(JCDF_SEP).append(cr.getValue("TASK_ID")).toString();
String id2 = new StringBuilder().append("redsort").append(JCDF_SEP).append(cr.getValue("TASK_ID")).toString();
String et = Long.toString(
(Long.parseLong(cr.getValue("TIME_END")) -
Long.parseLong(cr.getValue("TIME_START")))
);
String ev = new StringBuilder().append(cr.getValue("COUNTER_INPUT_BYTES")).toString();
cr.add(JCDF_ID1, id1);
cr.add(JCDF_ID2, id2);
cr.add(JCDF_EDGE_TIME, et);
cr.add(JCDF_EDGE_VOL, ev);
}
/**
* Populates fields used by Pig script for stitching together causal flows
*/
protected void addStitchingFields_redsort
(ChukwaRecord cr, ArrayList<String> fnl)
{
assert(fnl.contains("TASK_ID"));
assert(fnl.contains("TIME_END"));
assert(fnl.contains("TIME_START"));
assert(fnl.contains("COUNTER_INPUT_BYTES"));
String id1 = new StringBuilder().append("redsort").append(JCDF_SEP).append(cr.getValue("TASK_ID")).toString();
String id2 = new StringBuilder().append("red").append(JCDF_SEP).append(cr.getValue("TASK_ID")).toString();
String et = Long.toString(
Long.parseLong(cr.getValue("TIME_END")) -
Long.parseLong(cr.getValue("TIME_START"))
);
String ev = new StringBuilder().append(cr.getValue("COUNTER_INPUT_BYTES")).toString();
cr.add(JCDF_ID1, id1);
cr.add(JCDF_ID2, id2);
cr.add(JCDF_EDGE_TIME, et);
cr.add(JCDF_EDGE_VOL, ev);
}
/**
* Populates fields used by Pig script for stitching together causal flows
*/
protected void addStitchingFields_redreducer
(ChukwaRecord cr, ArrayList<String> fnl)
{
assert(fnl.contains("TASK_ID"));
assert(fnl.contains("TIME_END"));
assert(fnl.contains("TIME_START"));
assert(fnl.contains("COUNTER_INPUT_BYTES"));
String id1 = new StringBuilder().append("red").append(JCDF_SEP).append(cr.getValue("TASK_ID")).toString();
String id2 = new StringBuilder().append("redout").append(JCDF_SEP).append(cr.getValue("TASK_ID")).toString();
String et = Long.toString(Long.parseLong(cr.getValue("TIME_END")) - Long.parseLong(cr.getValue("TIME_START")));
String ev = cr.getValue("COUNTER_INPUT_BYTES");
cr.add(JCDF_ID1, id1);
cr.add(JCDF_ID2, id2);
cr.add(JCDF_EDGE_TIME, et);
cr.add(JCDF_EDGE_VOL, ev);
}
protected void addStitchingFields_blockwrite
(ChukwaRecord cr, ArrayList<String> fnl)
{
assert(fnl.contains("JOB_ID"));
assert(fnl.contains("TASK_ID"));
assert(fnl.contains("TIME_END"));
assert(fnl.contains("TIME_START"));
assert(fnl.contains("COUNTER_BYTES"));
String id1 = new StringBuilder().append("redout").append(JCDF_SEP).append(cr.getValue("JOB_ID")).toString();
String id2 = new StringBuilder().append(cr.getValue("TASK_ID")).append(JCDF_SEP).append(cr.getValue("TIME_START")).toString();
String et = new StringBuilder().append(Long.toString(Long.parseLong(cr.getValue("TIME_END")) - Long.parseLong(cr.getValue("TIME_START")))).toString();
String ev = cr.getValue("COUNTER_BYTES");
cr.add(JCDF_ID1, id1);
cr.add(JCDF_ID2, id2);
cr.add(JCDF_EDGE_TIME, et);
cr.add(JCDF_EDGE_VOL, ev);
}
public void addStitchingFields
(ChukwaRecord cr)
{
String state_name = null;
String [] fieldNames = cr.getFields();
// get field name list
ArrayList<String> fieldNamesList = new ArrayList<String>(fieldNames.length);
for (int i = 0; i < fieldNames.length; i++) fieldNamesList.add(fieldNames[i]);
// safety checks
assert(fieldNamesList.contains("STATE_NAME"));
state_name = cr.getValue("STATE_NAME");
if (state_name.equals("MAP")) {
addStitchingFields_map(cr, fieldNamesList);
} else if (state_name.equals("REDUCE_SHUFFLEWAIT")) {
addStitchingFields_redshufwait(cr, fieldNamesList);
} else if (state_name.equals("REDUCE_SORT")) {
addStitchingFields_redsort(cr, fieldNamesList);
} else if (state_name.equals("REDUCE_REDUCER")) {
addStitchingFields_redreducer(cr, fieldNamesList);
} else if (state_name.equals("SHUFFLE_LOCAL") || state_name.equals("SHUFFLE_REMOTE")) {
addStitchingFields_shuffle(cr, fieldNamesList);
} else if (state_name.equals("READ_LOCAL") || state_name.equals("READ_REMOTE")) {
addStitchingFields_blockread(cr, fieldNamesList);
} else if (state_name.equals("WRITE_LOCAL") || state_name.equals("WRITE_REMOTE")) {
addStitchingFields_blockwrite(cr, fieldNamesList);
}
// else add nothing
}
public void reduce
(ChukwaRecordKey key, Iterator<FSMIntermedEntry> values,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output,
Reporter reporter)
throws IOException
{
FSMIntermedEntry start_rec = null, end_rec = null;
FSMIntermedEntry tmpent;
String keystr = key.getKey();
String newkey;
ArrayList<FSMIntermedEntry> ents = new ArrayList<FSMIntermedEntry>();
ArrayList<String> noncounters = new ArrayList<String>();
keystr = keystr.trim();
ChukwaRecord cr = new ChukwaRecord();
for (int i = 0; i < NON_COUNTER_KEYS.length; i++) noncounters.add(NON_COUNTER_KEYS[i]);
ChukwaOutputCollector coc = new ChukwaOutputCollector("SALSA_COMPLETE", output, reporter);
int itemcount = 0;
try {
while (values.hasNext()) {
itemcount++;
tmpent = values.next();
ents.add(tmpent.clone());
}
} catch (CloneNotSupportedException e) {
// do nothing
}
log.debug("In reduce [Key " + keystr + "] (" + itemcount + " vals)");
if (itemcount == 2) { // i.e. we have both start and end events
if (ents.get(0).state_type.val == StateType.STATE_START &&
ents.get(1).state_type.val == StateType.STATE_END)
{
start_rec = ents.get(0); end_rec = ents.get(1);
} else if (ents.get(1).state_type.val == StateType.STATE_START &&
ents.get(0).state_type.val == StateType.STATE_END)
{
start_rec = ents.get(1); end_rec = ents.get(0);
} else {
log.warn("In reduce [Key " + keystr + "] Invalid combination of state types: number of states: "+itemcount+".");
// error handling?
}
cr.add("STATE_NAME",start_rec.state_name);
cr.add("STATE_UNIQ_ID",start_rec.getUniqueID());
cr.add("TIMESTAMP",start_rec.timestamp);
cr.add("TIME_START",start_rec.time_start);
cr.add("TIME_END",end_rec.time_end);
cr.add("TIME_START_MILLIS",start_rec.time_start.substring(start_rec.time_start.length()-3));
cr.add("TIME_END_MILLIS",end_rec.time_end.substring(end_rec.time_end.length()-3));
cr.add("HOST",start_rec.host_exec);
cr.add("HOST_OTHER",start_rec.host_other);
cr.add("JOB_ID",start_rec.job_id);
cr.add("TASK_ID",start_rec.getFriendlyID());
Set<String> treemapkeys = end_rec.add_info.keySet();
Iterator<String> keyIter = treemapkeys.iterator();
for (int i = 0; i < treemapkeys.size(); i++) {
assert(keyIter.hasNext());
String currkey = keyIter.next();
if (currkey != null &&
!noncounters.contains(currkey)) {
cr.add("COUNTER_" + currkey, end_rec.add_info.get(currkey));
} else if (currkey != null && noncounters.contains(currkey)) {
cr.add(currkey, end_rec.add_info.get(currkey));
}
}
assert(!keyIter.hasNext());
cr.setTime(Long.parseLong(start_rec.timestamp));
newkey = null;
newkey = new StringBuilder().append(start_rec.time_orig_epoch).append(SEP).append(start_rec.getUniqueID()).
append(SEP).append(start_rec.time_orig).toString();
log.info("Key ["+newkey+"] Task ["+start_rec.getUniqueID()+"] Job ["+start_rec.job_id+"] Friendly ["+start_rec.getFriendlyID()+"]");
addStitchingFields(cr);
log.debug(cr);
coc.collect(new ChukwaRecordKey(key.getReduceType(), newkey), cr);
} else if (itemcount == 1) {
// check that we have only the start; if we have only the end, dump it
// otherwise change the reducetype to get record written to file for
// incomplete entries
log.warn("Key ["+keystr+"] Too few state entries: "+itemcount+" (intermediate processing not implemented yet).");
} else { // any other value is invalid
// malformed data; print debug info?
log.warn("Key ["+keystr+"] Malformed data: unexpected number of state entries: "+itemcount+".");
}
}
}
public int run (String args[]) throws Exception {
int num_inputs;
JobConf conf = new JobConf(getConf(), FSMBuilder.class);
String [] args2 = args;
if (args2.length < 4 || !"-in".equals(args2[0]))
{
System.err.println("Specifying mapper (full Java class): -D chukwa.salsa.fsm.mapclass=");
System.err.println("Application-specific arguments: -in <# inputs> [input dir 1] ... [input dir n] [output dir]");
return(1);
}
conf.setJobName("Salsa_FSMBuilder");
/* Get name of Mapper class to use */
String mapclassname = conf.get("chukwa.salsa.fsm.mapclass");
log.info("Mapper class: " + mapclassname);
Class mapperClass = null;
try {
mapperClass = Class.forName(mapclassname);
} catch (ClassNotFoundException c) {
System.err.println("Mapper " + mapclassname + " not found: " + c.toString());
}
/* Get on with usual job setup */
conf.setMapperClass(mapperClass);
conf.setReducerClass(FSMReducer.class);
conf.setOutputKeyClass(ChukwaRecordKey.class);
conf.setOutputValueClass(ChukwaRecord.class);
conf.setInputFormat(SequenceFileInputFormat.class);
conf.setOutputFormat(ChukwaRecordOutputFormat.class);
conf.setPartitionerClass(FSMIntermedEntryPartitioner.class);
conf.setMapOutputValueClass(FSMIntermedEntry.class);
conf.setMapOutputKeyClass(ChukwaRecordKey.class);
conf.setNumReduceTasks(1); // fixed at 1 to ensure that all records are grouped together
/* Setup inputs/outputs */
try {
num_inputs = Integer.parseInt(args2[1]);
} catch (NumberFormatException e) {
System.err.println("Specifying mapper: -D chukwa.salsa.fsm.mapper=");
System.err.println("Application-specific arguments: -in <# inputs> -out <#outputs> [input dir] [output dir]");
return(1);
}
if (num_inputs <= 0) {
System.err.println("Must have at least 1 input.");
return(1);
}
for (int i = 2; i < 2+num_inputs; i++) {
Path in = new Path(args2[i]);
FileInputFormat.addInputPath(conf, in);
}
Path out = new Path(args2[2+num_inputs]);
FileOutputFormat.setOutputPath(conf, out);
JobClient.runJob(conf);
return(0);
}
public static void main (String [] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new FSMBuilder(), args);
return;
}
}
| 8,276 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa/fsm/DataNodeClientTraceMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.analysis.salsa.fsm;
import java.io.IOException;
import java.util.ArrayList;
import java.util.regex.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.chukwa.extraction.demux.*;
import org.apache.hadoop.chukwa.extraction.engine.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.*;
/**
* Pluggable mapper for FSMBuilder
*
* K2 = State Name + State ID
* (We use ChukwaRecordKey since it would already have implemented a bunch of
* useful things such as Comparators etc.)
* V2 = TreeMap
*/
public class DataNodeClientTraceMapper
extends MapReduceBase
implements Mapper<ChukwaRecordKey, ChukwaRecord, ChukwaRecordKey, FSMIntermedEntry>
{
private static Log log = LogFactory.getLog(FSMBuilder.class);
protected static final String SEP = "/";
protected final static String FSM_CRK_ReduceType = FSMType.NAMES[FSMType.FILESYSTEM_FSM];
private final Pattern ipPattern =
Pattern.compile(".*[a-zA-Z\\-_:\\/]([0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+)[a-zA-Z0-9\\-_:\\/].*");
public void map
(ChukwaRecordKey key, ChukwaRecord val,
OutputCollector<ChukwaRecordKey, FSMIntermedEntry> output,
Reporter reporter)
throws IOException
{
/* Extract field names for checking */
String [] fieldNames = val.getFields();
ArrayList<String> fieldNamesList = new ArrayList<String>(fieldNames.length);
for (int i = 0; i < fieldNames.length; i++) {
fieldNamesList.add(fieldNames[i]);
}
// Handle ClientTraceDetailed and DataNodeLog entries separately
// because we need to combine both types of entries for a complete picture
if (key.getReduceType().equals("ClientTraceDetailed")) {
assert(fieldNamesList.contains("op"));
if (val.getValue("op").startsWith("HDFS")) {
parseClientTraceDetailed(key, val, output, reporter, fieldNamesList);
} // drop non-HDFS operations
}
// ignore "DataNode" type log messages; unsupported
} // end of map()
protected final int DEFAULT_READ_DURATION_MS = 10;
// works with <= 0.20 ClientTrace with no durations
// includes hack to create start+end entries immediately
protected void parseClientTraceDetailed
(ChukwaRecordKey key, ChukwaRecord val,
OutputCollector<ChukwaRecordKey, FSMIntermedEntry> output,
Reporter reporter, ArrayList<String> fieldNamesList)
throws IOException
{
FSMIntermedEntry start_rec, end_rec;
String current_op = null, src_add = null, dest_add = null;
String datanodeserver_add = null, blkid = null, cli_id = null;
/* initialize state records */
start_rec = new FSMIntermedEntry();
end_rec = new FSMIntermedEntry();
start_rec.fsm_type = new FSMType(FSMType.FILESYSTEM_FSM);
start_rec.state_type = new StateType(StateType.STATE_START);
end_rec.fsm_type = new FSMType(FSMType.FILESYSTEM_FSM);
end_rec.state_type = new StateType(StateType.STATE_END);
/* extract addresses */
Matcher src_regex = ipPattern.matcher(val.getValue("src"));
if (src_regex.matches()) {
src_add = src_regex.group(1);
} else {
log.warn("Failed to match src IP:"+val.getValue("src")+"");
src_add = "";
}
Matcher dest_regex = ipPattern.matcher(val.getValue("dest"));
if (dest_regex.matches()) {
dest_add = dest_regex.group(1);
} else {
log.warn("Failed to match dest IP:"+val.getValue("dest")+"");
dest_add = "";
}
Matcher datanodeserver_regex = ipPattern.matcher(val.getValue("srvID"));
if (datanodeserver_regex.matches()) {
datanodeserver_add = datanodeserver_regex.group(1);
} else {
log.warn("Failed to match DataNode server address:"+val.getValue("srvID")+"");
datanodeserver_add = "";
}
start_rec.host_exec = src_add;
end_rec.host_exec = src_add;
blkid = val.getValue("blockid").trim();
if (fieldNamesList.contains("cliID")) {
cli_id = val.getValue("cliID").trim();
if (cli_id.startsWith("DFSClient_")) {
cli_id = cli_id.substring(10);
}
} else {
cli_id = "";
}
current_op = val.getValue("op");
String [] k = key.getKey().split("/");
long actual_time_ms = Long.parseLong(val.getValue("actual_time"));
if (fieldNamesList.contains("duration")) {
try {
actual_time_ms -= (Long.parseLong(val.getValue("duration").trim()) / 1000);
} catch (NumberFormatException nef) {
log.warn("Failed to parse duration: >>" + val.getValue("duration"));
}
} else {
actual_time_ms -= DEFAULT_READ_DURATION_MS;
}
start_rec.time_orig_epoch = k[0];
start_rec.time_orig = Long.toString(actual_time_ms); // not actually used
start_rec.timestamp = Long.toString(actual_time_ms);
start_rec.time_end = "";
start_rec.time_start = start_rec.timestamp;
end_rec.time_orig_epoch = k[0];
end_rec.time_orig = val.getValue("actual_time");
end_rec.timestamp = val.getValue("actual_time");
end_rec.time_end = val.getValue("actual_time");
end_rec.time_start = "";
log.debug("Duration: " + (Long.parseLong(end_rec.time_end) - Long.parseLong(start_rec.time_start)));
end_rec.job_id = cli_id; // use job id = block id
start_rec.job_id = cli_id;
if (current_op.equals("HDFS_READ")) {
if (src_add != null && src_add.equals(dest_add)) {
start_rec.state_hdfs = new HDFSState(HDFSState.READ_LOCAL);
} else {
start_rec.state_hdfs = new HDFSState(HDFSState.READ_REMOTE);
}
// should these ALWAYS be dest?
start_rec.host_other = dest_add;
end_rec.host_other = dest_add;
} else if (current_op.equals("HDFS_WRITE")) {
if (src_add != null && dest_add.equals(datanodeserver_add)) {
start_rec.state_hdfs = new HDFSState(HDFSState.WRITE_LOCAL);
} else if (!dest_add.equals(datanodeserver_add)) {
start_rec.state_hdfs = new HDFSState(HDFSState.WRITE_REMOTE);
} else {
start_rec.state_hdfs = new HDFSState(HDFSState.WRITE_REPLICATED);
}
start_rec.host_other = dest_add;
end_rec.host_other = dest_add;
} else {
log.warn("Invalid state: " + current_op);
}
end_rec.state_hdfs = start_rec.state_hdfs;
start_rec.state_name = start_rec.state_hdfs.toString();
end_rec.state_name = end_rec.state_hdfs.toString();
start_rec.identifier = blkid;
end_rec.identifier = blkid;
start_rec.unique_id = new StringBuilder().append(start_rec.state_name).append("@").append(start_rec.identifier).append("@").append(start_rec.job_id).toString();
end_rec.unique_id = new StringBuilder().append(end_rec.state_name).append("@").append(end_rec.identifier).append("@").append(end_rec.job_id).toString();
start_rec.add_info.put(Record.tagsField,val.getValue(Record.tagsField));
start_rec.add_info.put("csource",val.getValue("csource"));
end_rec.add_info.put(Record.tagsField,val.getValue(Record.tagsField));
end_rec.add_info.put("csource",val.getValue("csource"));
end_rec.add_info.put("STATE_STRING","SUCCESS"); // by default
// add counter value
end_rec.add_info.put("BYTES",val.getValue("bytes"));
String crk_mid_string_start = new StringBuilder().append(start_rec.getUniqueID()).append("_").append(start_rec.timestamp).toString();
String crk_mid_string_end = new StringBuilder().append(end_rec.getUniqueID()).append("_").append(start_rec.timestamp).toString();
output.collect(new ChukwaRecordKey(FSM_CRK_ReduceType, crk_mid_string_start), start_rec);
output.collect(new ChukwaRecordKey(FSM_CRK_ReduceType, crk_mid_string_end), end_rec);
}
} // end of mapper class
| 8,277 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/analysis/salsa/fsm/JobHistoryTaskDataMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.analysis.salsa.fsm;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.chukwa.extraction.engine.*;
import org.apache.hadoop.mapred.*;
/**
* Pluggable mapper for FSMBuilder
* Supports only 0.20+ JobHistory files
* because of explicitly coded counter names
*
* K2 = State Name + State ID
* (We use ChukwaRecordKey since it would already have implemented a bunch of
* useful things such as Comparators etc.)
* V2 = TreeMap
*/
public class JobHistoryTaskDataMapper
extends MapReduceBase
implements Mapper<ChukwaRecordKey, ChukwaRecord, ChukwaRecordKey, FSMIntermedEntry>
{
private static Log log = LogFactory.getLog(FSMBuilder.class);
protected static final String SEP = "/";
protected final static String FSM_CRK_ReduceType = FSMType.NAMES[FSMType.MAPREDUCE_FSM];
/*
* Helper function for mapper to populate TreeMap of FSMIntermedEntr
* with input/output counters for Map records
*/
protected FSMIntermedEntry populateRecord_MapCounters
(FSMIntermedEntry this_rec, ChukwaRecord val, ArrayList<String> fieldNamesList)
{
String mapCounterNames [] = {
"Counter:FileSystemCounters:FILE_BYTES_WRITTEN",
"Counter:org.apache.hadoop.mapred.Task$Counter:COMBINE_INPUT_RECORDS",
"Counter:org.apache.hadoop.mapred.Task$Counter:COMBINE_OUTPUT_RECORDS",
"Counter:org.apache.hadoop.mapred.Task$Counter:MAP_INPUT_BYTES",
"Counter:org.apache.hadoop.mapred.Task$Counter:MAP_INPUT_RECORDS",
"Counter:org.apache.hadoop.mapred.Task$Counter:MAP_OUTPUT_BYTES",
"Counter:org.apache.hadoop.mapred.Task$Counter:MAP_OUTPUT_RECORDS",
"Counter:org.apache.hadoop.mapred.Task$Counter:SPILLED_RECORDS"
};
String mapCounterDestNames[] = {
"FILE_BYTES_WRITTEN",
"COMBINE_INPUT_RECORDS",
"COMBINE_OUTPUT_RECORDS",
"INPUT_BYTES",
"INPUT_RECORDS",
"OUTPUT_BYTES",
"OUTPUT_RECORDS",
"SPILLED_RECORDS"
};
assert(mapCounterDestNames.length == mapCounterNames.length);
for (int i = 0; i < mapCounterDestNames.length; i++) {
if (fieldNamesList.contains(mapCounterNames[i])) {
this_rec.add_info.put(mapCounterDestNames[i], val.getValue(mapCounterNames[i]));
}
}
this_rec.add_info.put("FILE_BYTES_READ","0"); // to have same fields as reduce
this_rec.add_info.put("INPUT_GROUPS","0"); // to have same fields as reduce
return this_rec;
}
/*
* Helper function for mapper to populate TreeMap of FSMIntermedEntr
* with input/output counters for Reduce records
*/
protected FSMIntermedEntry populateRecord_ReduceCounters
(FSMIntermedEntry this_rec, ChukwaRecord val, ArrayList<String> fieldNamesList)
{
String redCounterNames [] = {
"Counter:FileSystemCounters:FILE_BYTES_READ",
"Counter:FileSystemCounters:FILE_BYTES_WRITTEN",
"Counter:org.apache.hadoop.mapred.Task$Counter:COMBINE_INPUT_RECORDS",
"Counter:org.apache.hadoop.mapred.Task$Counter:COMBINE_OUTPUT_RECORDS",
"Counter:org.apache.hadoop.mapred.Task$Counter:REDUCE_INPUT_GROUPS",
"Counter:org.apache.hadoop.mapred.Task$Counter:REDUCE_INPUT_RECORDS",
"Counter:org.apache.hadoop.mapred.Task$Counter:REDUCE_OUTPUT_RECORDS",
"Counter:org.apache.hadoop.mapred.Task$Counter:REDUCE_SHUFFLE_BYTES",
"Counter:org.apache.hadoop.mapred.Task$Counter:SPILLED_RECORDS"
};
String redCounterDestNames[] = {
"FILE_BYTES_READ",
"FILE_BYTES_WRITTEN",
"COMBINE_INPUT_RECORDS",
"COMBINE_OUTPUT_RECORDS",
"INPUT_GROUPS",
"INPUT_RECORDS",
"OUTPUT_RECORDS",
"INPUT_BYTES", // NOTE: shuffle bytes are mapped to "input_bytes"
"SPILLED_RECORDS"
};
assert(redCounterDestNames.length == redCounterNames.length);
for (int i = 0; i < redCounterDestNames.length; i++) {
if (fieldNamesList.contains(redCounterNames[i])) {
this_rec.add_info.put(redCounterDestNames[i], val.getValue(redCounterNames[i]));
}
}
this_rec.add_info.put("OUTPUT_BYTES","0"); // to have same fields as map
return this_rec;
}
public void map
(ChukwaRecordKey key, ChukwaRecord val,
OutputCollector<ChukwaRecordKey, FSMIntermedEntry> output,
Reporter reporter)
throws IOException
{
String task_type;
FSMIntermedEntry this_rec = new FSMIntermedEntry();
boolean add_record = true;
/* Extract field names for checking */
String [] fieldNames = val.getFields();
ArrayList<String> fieldNamesList = new ArrayList<String>(fieldNames.length);
for (int i = 0; i < fieldNames.length; i++) fieldNamesList.add(fieldNames[i]);
/* Check state (Map or Reduce), generate unique ID */
if (!fieldNamesList.contains("TASK_ATTEMPT_ID")) return; // Ignore "TASK" entries
if (!fieldNamesList.contains("TASK_TYPE")) { // Malformed, ignore
return;
} else {
task_type = val.getValue("TASK_TYPE");
if (!task_type.equals("MAP") && !task_type.equals("REDUCE")) {
return; // do nothing
}
}
/* Check if this is a start or end entry, set state type, extract start/end times */
if (fieldNamesList.contains("START_TIME")) {
this_rec.state_type.val = StateType.STATE_START;
this_rec.timestamp = val.getValue("START_TIME");
this_rec.time_start = val.getValue("START_TIME");
this_rec.time_end = "";
if (val.getValue("START_TIME").length() < 4+2) { // needs to at least have milliseconds
add_record = add_record & false;
}
} else if (fieldNamesList.contains("FINISH_TIME")) {
this_rec.state_type.val = StateType.STATE_END;
this_rec.timestamp = val.getValue("FINISH_TIME");
this_rec.time_start = "";
this_rec.time_end = val.getValue("FINISH_TIME");
if (val.getValue("FINISH_TIME").length() < 4+2) { // needs to at least have milliseconds
add_record = add_record & false;
}
} else {
this_rec.state_type.val = StateType.STATE_NOOP;
}
/* Fill in common intermediate state entry information */
// Extract original ChukwaRecordKey values for later key reconstruction by reducer
try {
this_rec = ParseUtilities.splitChukwaRecordKey(key.getKey().trim(),this_rec,SEP);
} catch (Exception e) {
log.warn("Error occurred splitting ChukwaRecordKey ["+key.getKey().trim()+"]: " + e.toString());
return;
}
// Populate state enum information
this_rec.fsm_type = new FSMType(FSMType.MAPREDUCE_FSM);
if (task_type.equals("MAP")) {
this_rec.state_mapred = new MapRedState(MapRedState.MAP);
} else if (task_type.equals("REDUCE")) {
this_rec.state_mapred = new MapRedState(MapRedState.REDUCE);
} else {
this_rec.state_mapred = new MapRedState(MapRedState.NONE); // error handling here?
}
// Fill state name, unique ID
this_rec.state_name = this_rec.state_mapred.toString();
this_rec.identifier = val.getValue("TASK_ATTEMPT_ID");
this_rec.generateUniqueID();
// Extract hostname from tracker name (if present), or directly fill from hostname (<= 0.18)
if (fieldNamesList.contains("HOSTNAME")) {
this_rec.host_exec = val.getValue("HOSTNAME");
this_rec.host_exec = ParseUtilities.removeRackFromHostname(this_rec.host_exec);
} else if (fieldNamesList.contains("TRACKER_NAME")) {
this_rec.host_exec = ParseUtilities.extractHostnameFromTrackerName(val.getValue("TRACKER_NAME"));
} else {
this_rec.host_exec = "";
}
if (this_rec.state_type.val == StateType.STATE_END) {
assert(fieldNamesList.contains("TASK_STATUS"));
String tmpstring = null;
tmpstring = val.getValue("TASK_STATUS");
if (tmpstring != null && (tmpstring.equals("KILLED") || tmpstring.equals("FAILED"))) {
add_record = add_record & false;
}
if (tmpstring != null && tmpstring.length() > 0) {
this_rec.add_info.put("STATE_STRING",tmpstring);
} else {
this_rec.add_info.put("STATE_STRING","");
}
switch(this_rec.state_mapred.val) {
case MapRedState.MAP:
this_rec = populateRecord_MapCounters(this_rec, val, fieldNamesList);
break;
case MapRedState.REDUCE:
this_rec = populateRecord_ReduceCounters(this_rec, val, fieldNamesList);
break;
default:
// do nothing
break;
}
}
// manually add clustername etc
assert(fieldNamesList.contains(Record.tagsField));
assert(fieldNamesList.contains("csource"));
this_rec.add_info.put(Record.tagsField,val.getValue(Record.tagsField));
this_rec.add_info.put("csource",val.getValue("csource"));
/* Special handling for Reduce Ends */
if (task_type.equals("REDUCE")) {
if (this_rec.state_type.val == StateType.STATE_END) {
add_record = add_record & expandReduceEnd(key,val,output,reporter,this_rec);
} else if (this_rec.state_type.val == StateType.STATE_START) {
add_record = add_record & expandReduceStart(key,val,output,reporter,this_rec);
}
} else if (task_type.equals("MAP")) {
add_record = add_record & true;
}
if (add_record) {
log.debug("Collecting record " + this_rec + "("+this_rec.state_type+") (ReduceType "+FSM_CRK_ReduceType+")");
output.collect(new ChukwaRecordKey(FSM_CRK_ReduceType,this_rec.getUniqueID()),this_rec);
}
} // end of map()
protected boolean expandReduceStart
(ChukwaRecordKey key, ChukwaRecord val,
OutputCollector<ChukwaRecordKey, FSMIntermedEntry> output,
Reporter reporter, FSMIntermedEntry this_rec)
throws IOException
{
FSMIntermedEntry redshuf_start_rec = null;
try {
redshuf_start_rec = this_rec.clone();
} catch (CloneNotSupportedException e) {
// TODO: Error handling
}
redshuf_start_rec.state_type = new StateType(StateType.STATE_START);
redshuf_start_rec.state_mapred = new MapRedState(MapRedState.REDUCE_SHUFFLEWAIT);
redshuf_start_rec.timestamp = this_rec.timestamp;
redshuf_start_rec.time_start = this_rec.timestamp;
redshuf_start_rec.time_end = "";
redshuf_start_rec.generateUniqueID();
log.debug("Collecting record " + redshuf_start_rec +
"("+redshuf_start_rec.state_type+") (ReduceType "+FSM_CRK_ReduceType+")");
output.collect(
new ChukwaRecordKey(FSM_CRK_ReduceType,redshuf_start_rec.getUniqueID()),
redshuf_start_rec
);
return true;
}
/*
* Generates 5 extra FSMIntermedEntry's for a given reduce_end entry
*/
protected boolean expandReduceEnd
(ChukwaRecordKey key, ChukwaRecord val,
OutputCollector<ChukwaRecordKey, FSMIntermedEntry> output,
Reporter reporter, FSMIntermedEntry this_rec)
throws IOException
{
/* Split into ReduceShuffleWait, ReduceSort, ReduceReducer
* But also retain the original combined Reduce at the same time
*/
FSMIntermedEntry redshuf_end_rec = null;
FSMIntermedEntry redsort_start_rec = null, redsort_end_rec = null;
FSMIntermedEntry redred_start_rec = null, redred_end_rec = null;
/* Extract field names for checking */
String [] fieldNames = val.getFields();
ArrayList<String> fieldNamesList = new ArrayList<String>(fieldNames.length);
for (int i = 0; i < fieldNames.length; i++) fieldNamesList.add(fieldNames[i]);
try {
redsort_start_rec = this_rec.clone();
redred_start_rec = this_rec.clone();
redshuf_end_rec = this_rec.clone();
redsort_end_rec = this_rec.clone();
redred_end_rec = this_rec.clone();
} catch (CloneNotSupportedException e) {
// TODO: Error handling
}
redshuf_end_rec.state_type = new StateType(StateType.STATE_END);
redshuf_end_rec.state_mapred = new MapRedState(MapRedState.REDUCE_SHUFFLEWAIT);
redsort_start_rec.state_type = new StateType(StateType.STATE_START);
redsort_end_rec.state_type = new StateType(StateType.STATE_END);
redsort_start_rec.state_mapred = new MapRedState(MapRedState.REDUCE_SORT);
redsort_end_rec.state_mapred = new MapRedState(MapRedState.REDUCE_SORT);
redred_start_rec.state_type = new StateType(StateType.STATE_START);
redred_end_rec.state_type = new StateType(StateType.STATE_END);
redred_start_rec.state_mapred = new MapRedState(MapRedState.REDUCE_REDUCER);
redred_end_rec.state_mapred = new MapRedState(MapRedState.REDUCE_REDUCER);
redshuf_end_rec.generateUniqueID();
redsort_start_rec.generateUniqueID();
redsort_end_rec.generateUniqueID();
redred_start_rec.generateUniqueID();
redred_end_rec.generateUniqueID();
if(fieldNamesList.contains("SHUFFLE_FINISHED") && fieldNamesList.contains("SORT_FINISHED")) {
if (val.getValue("SHUFFLE_FINISHED") == null) return false;
if (val.getValue("SORT_FINISHED") == null) return false;
} else {
return false;
}
redshuf_end_rec.timestamp = val.getValue("SHUFFLE_FINISHED");
redshuf_end_rec.time_start = "";
redshuf_end_rec.time_end = val.getValue("SHUFFLE_FINISHED");
redsort_start_rec.timestamp = val.getValue("SHUFFLE_FINISHED");
redsort_start_rec.time_start = val.getValue("SHUFFLE_FINISHED");
redsort_start_rec.time_end = "";
assert(fieldNamesList.contains("SORT_FINISHED"));
redsort_end_rec.timestamp = val.getValue("SORT_FINISHED");
redsort_end_rec.time_start = "";
redsort_end_rec.time_end = val.getValue("SORT_FINISHED");
redred_start_rec.timestamp = val.getValue("SORT_FINISHED");
redred_start_rec.time_start = val.getValue("SORT_FINISHED");
redred_start_rec.time_end = "";
/* redred_end times are exactly the same as the original red_end times */
log.debug("Collecting record " + redshuf_end_rec +
"("+redshuf_end_rec.state_type+") (ReduceType "+FSM_CRK_ReduceType+")");
output.collect(
new ChukwaRecordKey(FSM_CRK_ReduceType,redshuf_end_rec.getUniqueID()),
redshuf_end_rec
);
log.debug("Collecting record " + redsort_start_rec +
"("+redsort_start_rec.state_type+") (ReduceType "+FSM_CRK_ReduceType+")");
output.collect(
new ChukwaRecordKey(FSM_CRK_ReduceType,redsort_start_rec.getUniqueID()),
redsort_start_rec
);
log.debug("Collecting record " + redsort_end_rec +
"("+redsort_end_rec.state_type+") (ReduceType "+FSM_CRK_ReduceType+")");
output.collect(
new ChukwaRecordKey(FSM_CRK_ReduceType,redsort_end_rec.getUniqueID()),
redsort_end_rec
);
log.debug("Collecting record " + redred_start_rec +
"("+redred_start_rec.state_type+") (ReduceType "+FSM_CRK_ReduceType+")");
output.collect(
new ChukwaRecordKey(FSM_CRK_ReduceType,redred_start_rec.getUniqueID()),
redred_start_rec
);
log.debug("Collecting record " + redred_end_rec +
"("+redred_end_rec.state_type+") (ReduceType "+FSM_CRK_ReduceType+")");
output.collect(
new ChukwaRecordKey(FSM_CRK_ReduceType,redred_end_rec.getUniqueID()),
redred_end_rec
);
return true;
}
} // end of mapper class
| 8,278 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/AdaptorNamingUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.nio.charset.Charset;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
public class AdaptorNamingUtils {
public static String synthesizeAdaptorID(String adaptorClassName, String dataType,
String params) throws NoSuchAlgorithmException {
MessageDigest md;
md = MessageDigest.getInstance("MD5");
md.update(adaptorClassName.getBytes(Charset.forName("UTF-8")));
md.update(dataType.getBytes(Charset.forName("UTF-8")));
md.update(params.getBytes(Charset.forName("UTF-8")));
StringBuilder sb = new StringBuilder();
sb.append("adaptor_");
byte[] bytes = md.digest();
for(int i=0; i < bytes.length; ++i) {
if( (bytes[i] & 0xF0) == 0)
sb.append('0');
sb.append( Integer.toHexString(0xFF & bytes[i]) );
}
return sb.toString();
}
}
| 8,279 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/ConstRateAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.util.Random;
import java.util.regex.*;
import org.apache.hadoop.chukwa.*;
import org.apache.hadoop.chukwa.datacollection.*;
import org.apache.hadoop.chukwa.datacollection.adaptor.*;
import org.apache.hadoop.conf.Configuration;
/**
* Emits chunks at a roughly constant data rate. Chunks are in a very particular
* format: the output data is verifiable, but sufficiently non-deterministic
* that two different instances of this adaptor are very likely to have
* distinct outputs.
*
*
* Each chunk is full of random bytes; the randomness comes from
* an instance of java.util.Random seeded with the offset xored
* with the time-of-generation. The time of generation is stored, big-endian,
* in the first eight bytes of each chunk.
*/
public class ConstRateAdaptor extends AbstractAdaptor implements Runnable {
private int SLEEP_VARIANCE = 200;
private int MIN_SLEEP = 300;
private long offset;
private int bytesPerSec;
Random timeCoin;
long seed;
private volatile boolean stopping = false;
public String getCurrentStatus() {
return type.trim() + " " + bytesPerSec + " " + seed;
}
public void start(long offset) throws AdaptorException {
this.offset = offset;
Configuration conf = control.getConfiguration();
MIN_SLEEP = conf.getInt("constAdaptor.minSleep", MIN_SLEEP);
SLEEP_VARIANCE = conf.getInt("constAdaptor.sleepVariance", SLEEP_VARIANCE);
timeCoin = new Random(seed);
long o =0;
while(o < offset)
o += (int) ((timeCoin.nextInt(SLEEP_VARIANCE) + MIN_SLEEP) *
(long) bytesPerSec / 1000L) + 8;
new Thread(this).start(); // this is a Thread.start
}
public String parseArgs(String bytesPerSecParam) {
try {
Matcher m = Pattern.compile("([0-9]+)(?:\\s+([0-9]+))?\\s*").matcher(bytesPerSecParam);
if(!m.matches())
return null;
bytesPerSec = Integer.parseInt(m.group(1));
String rate = m.group(2);
if(rate != null)
seed = Long.parseLong(m.group(2));
else
seed = System.currentTimeMillis();
} catch (NumberFormatException e) {
//("bad argument to const rate adaptor: [" + bytesPerSecParam + "]");
return null;
}
return bytesPerSecParam;
}
public void run() {
try {
while (!stopping) {
int MSToSleep = timeCoin.nextInt(SLEEP_VARIANCE) + MIN_SLEEP;
int arraySize = (int) (MSToSleep * (long) bytesPerSec / 1000L) + 8;
ChunkImpl evt = nextChunk(arraySize );
dest.add(evt);
Thread.sleep(MSToSleep);
} // end while
} catch (InterruptedException ie) {
} // abort silently
}
public ChunkImpl nextChunk(int arraySize) {
byte[] data = new byte[arraySize];
Random dataPattern = new Random(offset ^ seed);
long s = this.seed;
offset += data.length;
dataPattern.nextBytes(data);
for(int i=0; i < 8; ++i) {
data[7-i] = (byte) (s & 0xFF);
s >>= 8;
}
ChunkImpl evt = new ChunkImpl(type, "random ("+ this.seed+")", offset, data,
this);
return evt;
}
public String toString() {
return "const rate " + type;
}
@Override
public long shutdown(AdaptorShutdownPolicy shutdownPolicy) {
stopping = true;
return offset;
}
public static boolean checkChunk(Chunk chunk) {
byte[] data = chunk.getData();
byte[] correctData = new byte[data.length];
long seed = 0;
for(int i=0; i < 8; ++i)
seed = (seed << 8) | (0xFF & data[i] );
seed ^= (chunk.getSeqID() - data.length);
Random dataPattern = new Random(seed);
dataPattern.nextBytes(correctData);
for(int i=8; i < data.length ; ++i)
if(data [i] != correctData[i])
return false;
return true;
}
void test_init(String type) {
this.type = type;
seed = System.currentTimeMillis();
}
}
| 8,280 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/ExceptionUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.io.PrintWriter;
import java.io.StringWriter;
public class ExceptionUtil {
public static String getStackTrace(Throwable t) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
t.printStackTrace(pw);
pw.flush();
return sw.toString();
}
}
| 8,281 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/XssFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import javax.servlet.http.HttpServletRequest;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jsoup.Jsoup;
import org.jsoup.safety.Whitelist;
import org.owasp.esapi.ESAPI;
public class XssFilter {
private HttpServletRequest request = null;
private static Log LOG = LogFactory.getLog(XssFilter.class);
public XssFilter() {
}
public XssFilter(HttpServletRequest request) {
// Return the cleansed request
this.request = request;
}
public String getParameter(String key) {
String value=null;
try {
value=filter(this.request.getParameter(key));
} catch (Exception e) {
LOG.info("XssFilter.getParameter: Cannot get parameter for: "+key);
}
return value;
}
public String[] getParameterValues(String key) {
String[] values=null;
try {
values = this.request.getParameterValues(key);
int i = 0;
for(String value : values) {
values[i] = filter(value);
i++;
}
} catch (Exception e) {
LOG.info("XssFilter.getParameterValues: cannot get parameter for: "+key);
}
return values;
}
/**
* Strips any potential XSS threats out of the value
* @param value is a string
* @return filtered string
*/
public String filter( String value ) {
if( value == null )
return null;
// Use the ESAPI library to avoid encoded attacks.
value = ESAPI.encoder().canonicalize( value );
// Avoid null characters
value = value.replaceAll("\0", "");
// Clean out HTML
value = Jsoup.clean( value, Whitelist.none() );
return value;
}
}
| 8,282 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/ClassUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.io.File;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.JarURLConnection;
import java.net.URL;
import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Enumeration;
import java.util.List;
import java.util.jar.JarEntry;
import java.util.jar.JarFile;
import org.apache.log4j.Logger;
public class ClassUtils {
static Logger log = Logger.getLogger(ClassUtils.class);
/**
* Attempts to list all the classes in the specified package as determined
* by the context class loader
*
* @param pckgname
* the package name to search
* @return a list of classes that exist within that package
* @throws ClassNotFoundException
* if something went wrong
*/
public static List<Class> getClassesForPackage(String pckgname)
throws ClassNotFoundException {
// This will hold a list of directories matching the pckgname.
// There may be more than one if a package is split over multiple jars/paths
List<Class> classes = new ArrayList<Class>();
ArrayList<File> directories = new ArrayList<File>();
try {
ClassLoader cld = Thread.currentThread().getContextClassLoader();
if (cld == null) {
throw new ClassNotFoundException("Can't get class loader.");
}
// Ask for all resources for the path
Enumeration<URL> resources = cld.getResources(pckgname.replace('.', '/'));
while (resources.hasMoreElements()) {
URL res = resources.nextElement();
if (res.getProtocol().equalsIgnoreCase("jar")) {
JarURLConnection conn = (JarURLConnection) res.openConnection();
JarFile jar = conn.getJarFile();
for (JarEntry e : Collections.list(jar.entries())) {
if (e.getName().startsWith(pckgname.replace('.', '/'))
&& e.getName().endsWith(".class") && !e.getName().contains("$")) {
String className = e.getName().replace("/", ".").substring(0,
e.getName().length() - 6);
classes.add(Class.forName(className));
}
}
} else
directories.add(new File(URLDecoder.decode(res.getPath(), "UTF-8")));
}
} catch (NullPointerException x) {
throw new ClassNotFoundException(pckgname + " does not appear to be "
+ "a valid package (Null pointer exception)");
} catch (UnsupportedEncodingException encex) {
throw new ClassNotFoundException(pckgname + " does not appear to be "
+ "a valid package (Unsupported encoding)");
} catch (IOException ioex) {
throw new ClassNotFoundException("IOException was thrown when trying "
+ "to get all resources for " + pckgname);
}
// For every directory identified capture all the .class files
for (File directory : directories) {
if (directory.exists()) {
// Get the list of the files contained in the package
String[] files = directory.list();
if (files != null ) {
for (String file : files) {
// we are only interested in .class files
if (file.endsWith(".class")) {
// removes the .class extension
classes.add(Class.forName(pckgname + '.'
+ file.substring(0, file.length() - 6)));
}
}
}
} else {
throw new ClassNotFoundException(pckgname + " (" + directory.getPath()
+ ") does not appear to be a valid package");
}
}
return classes;
}
public static List<Class> getClassessOfInterface(String thePackage,
Class theInterface) {
List<Class> classList = new ArrayList<Class>();
try {
for (Class discovered : getClassesForPackage(thePackage)) {
if (Arrays.asList(discovered.getInterfaces()).contains(theInterface)) {
classList.add(discovered);
}
}
} catch (ClassNotFoundException ex) {
log.error(ExceptionUtil.getStackTrace(ex));
}
return classList;
}
}
| 8,283 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/DriverManagerUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.Properties;
public class DriverManagerUtil {
@SuppressWarnings("unchecked")
public static Class loadDriver() throws ClassNotFoundException {
String jdbcDriver = System.getenv("JDBC_DRIVER");
if(jdbcDriver == null) {
jdbcDriver = "com.mysql.jdbc.Driver";
}
return Class.forName(jdbcDriver);
}
public static Connection getConnection(String url) throws SQLException {
ConnectionInfo info = new ConnectionInfo(url);
return DriverManager.getConnection(info.getUri(), info.getProperties());
}
public static class ConnectionInfo {
private Properties properties = new Properties();
private String uri = null;
public ConnectionInfo(String url) {
int pos = url.indexOf('?');
if(pos == -1) {
uri = url;
} else {
uri = url.substring(0, pos);
String[] paras = url.substring(pos + 1).split("&");
for(String s : paras) {
if(s==null || s.length()==0) {
continue;
}
String[] kv = s.split("=");
if(kv.length > 1) {
properties.put(kv[0], kv[1]);
}
else {
properties.put(kv[0], "");
}
}
}
}
public Properties getProperties() {
return properties;
}
public String getUri() {
return uri;
}
}
}
| 8,284 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/RegexUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
/*>>>
import checkers.nullness.quals.*;
import checkers.regex.quals.*;
*/
/**
* Utility methods for regular expressions, most notably for testing whether
* a string is a regular expression.
*/
public class RegexUtil {
/**
* A checked version of {@link PatternSyntaxException}.
* <p>
* This exception is useful when an illegal regex is detected but the
* contextual information to report a helpful error message is not available
* at the current depth in the call stack. By using a checked
* PatternSyntaxException the error must be handled up the call stack where
* a better error message can be reported.
* <p>
*
* Typical usage is:
* <pre>
* void myMethod(...) throws CheckedPatternSyntaxException {
* ...
* if (! isRegex(myString)) {
* throw new CheckedPatternSyntaxException(...);
* }
* ... Pattern.compile(myString) ...
* </pre>
*
* Simply calling <tt>Pattern.compile</tt> would have a similar effect,
* in that <tt>PatternSyntaxException</tt> would be thrown at run time if
* <tt>myString</tt> is not a regular expression. There are two problems
* with such an approach. First, a client of <tt>myMethod</tt> might
* forget to handle the exception, since <tt>PatternSyntaxException</tt>
* is not checked. Also, the Regex Checker would issue a warning about
* the call to <tt>Pattern.compile</tt> that might throw an exception.
* The above usage pattern avoids both problems.
*
* @see PatternSyntaxException
*/
public static class CheckedPatternSyntaxException extends Exception {
private static final long serialVersionUID = 6266881831979001480L;
private final PatternSyntaxException pse;
/**
* Constructs a new CheckedPatternSyntaxException equivalent to the
* given {@link PatternSyntaxException}.
* <p>
* Consider calling this constructor with the result of
* {@link RegexUtil#regexError}.
* @param pse is PatternSyntaxException object
*/
public CheckedPatternSyntaxException(PatternSyntaxException pse) {
this.pse = pse;
}
/**
* Constructs a new CheckedPatternSyntaxException.
*
* @param desc A description of the error
* @param regex The erroneous pattern
* @param index The approximate index in the pattern of the error,
* or {@code -1} if the index is not known
*/
public CheckedPatternSyntaxException(String desc, String regex, int index) {
this(new PatternSyntaxException(desc, regex, index));
}
/**
* Retrieves the description of the error.
*
* @return The description of the error
*/
public String getDescription() {
return pse.getDescription();
}
/**
* Retrieves the error index.
*
* @return The approximate index in the pattern of the error, or {@code -1}
* if the index is not known
*/
public int getIndex() {
return pse.getIndex();
}
/**
* Returns a multi-line string containing the description of the syntax
* error and its index, the erroneous regular-expression pattern, and a
* visual indication of the error index within the pattern.
*
* @return The full detail message
*/
public String getMessage() {
return pse.getMessage();
}
/**
* Retrieves the erroneous regular-expression pattern.
*
* @return The erroneous pattern
*/
public String getPattern() {
return pse.getPattern();
}
}
private RegexUtil() {
throw new AssertionError("Class RegexUtil shouldn't be instantiated");
}
/**
* Returns true if the argument is a syntactically valid regular
* expression.
* @param s is regular expression
* @return true if there is a match
*/
public static boolean isRegex(String s) {
return isRegex(s, 0);
}
/**
* Returns true if the argument is a syntactically valid regular
* expression with at least the given number of groups.
* @param s is regular expression
* @param groups is number of groups to match
* @return true if there is a match
*/
/*>>>
@SuppressWarnings("regex") // RegexUtil
*/
/*@Pure*/
public static boolean isRegex(String s, int groups) {
Pattern p;
try {
p = Pattern.compile(s);
} catch (PatternSyntaxException e) {
return false;
}
return getGroupCount(p) >= groups;
}
/**
* Returns true if the argument is a syntactically valid regular
* expression.
* @param c is a character
* @return true if there is a match
*/
public static boolean isRegex(char c) {
return isRegex(Character.toString(c));
}
/**
* Returns null if the argument is a syntactically valid regular
* expression. Otherwise returns a string describing why the argument is
* not a regex.
* @param s is regular expression
* @return null if s is a regular expression
*/
public static String regexError(String s) {
return regexError(s, 0);
}
/**
* Returns null if the argument is a syntactically valid regular
* expression with at least the given number of groups. Otherwise returns
* a string describing why the argument is not a regex.
* @param s is regular expression
* @param groups is number of groups to match
* @return null if s is a regular expression
*/
public static String regexError(String s, int groups) {
try {
Pattern p = Pattern.compile(s);
int actualGroups = getGroupCount(p);
if (actualGroups < groups) {
return regexErrorMessage(s, groups, actualGroups);
}
} catch (PatternSyntaxException e) {
return e.getMessage();
}
return null;
}
/**
* Returns null if the argument is a syntactically valid regular
* expression. Otherwise returns a PatternSyntaxException describing
* why the argument is not a regex.
* @param s is regular expression
* @return null if s is a regular expression
*/
public static PatternSyntaxException regexException(String s) {
return regexException(s, 0);
}
/**
* Returns null if the argument is a syntactically valid regular
* expression with at least the given number of groups. Otherwise returns a
* PatternSyntaxException describing why the argument is not a regex.
* @param s is regular expression
* @param groups is number of groups to match
* @return null if s is a regular expression
*/
public static PatternSyntaxException regexException(String s, int groups) {
try {
Pattern p = Pattern.compile(s);
int actualGroups = getGroupCount(p);
if (actualGroups < groups) {
return new PatternSyntaxException(regexErrorMessage(s, groups, actualGroups), s, -1);
}
} catch (PatternSyntaxException pse) {
return pse;
}
return null;
}
/**
* Returns the argument as a {@code @Regex String} if it is a regex,
* otherwise throws an error. The purpose of this method is to suppress Regex
* Checker warnings. Once the the Regex Checker supports flow-sensitivity, it
* should be very rarely needed.
* @param s is a regular expression
* @return null if s is a regular expression
*/
public static String asRegex(String s) {
return asRegex(s, 0);
}
/**
* Returns the argument as a {@code @Regex(groups) String} if it is a regex
* with at least the given number of groups, otherwise throws an error. The
* purpose of this method is to suppress Regex Checker warnings. Once the the
* Regex Checker supports flow-sensitivity, it should be very rarely needed.
* @param s is a regular expression
* @param groups is number of group to match
* @return null if s is a regular expression
*/
public static String asRegex(String s, int groups) {
try {
Pattern p = Pattern.compile(s);
int actualGroups = getGroupCount(p);
if (actualGroups < groups) {
throw new Error(regexErrorMessage(s, groups, actualGroups));
}
return s;
} catch (PatternSyntaxException e) {
throw new Error(e);
}
}
/**
* Generates an error message for s when expectedGroups are needed, but s
* only has actualGroups.
*/
private static String regexErrorMessage(String s, int expectedGroups, int actualGroups) {
return "regex \"" + s + "\" has " + actualGroups + " groups, but " +
expectedGroups + " groups are needed.";
}
/**
* Returns the count of groups in the argument.
*/
private static int getGroupCount(Pattern p) {
return p.matcher("").groupCount();
}
}
| 8,285 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/DumpArchive.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.Charset;
import java.util.*;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.conf.Configuration;
/**
* Tool for exploring the contents of the Chukwa data archive, or a collection
* of Chukwa sequence files.
*
* Limitation: DumpArchive infers the filesystem to dump from based on the first
* path argument, and will behave strangely if you try to dump files
* from different filesystems in the same invocation.
*
*/
public class DumpArchive {
static boolean summarize = false;
static HashMap<String, Integer> counts = new LinkedHashMap<String, Integer>();
/**
* @param args is command line parameters
* @throws URISyntaxException if problem parsing HDFS URL
* @throws IOException if problem access HDFS
*/
public static void main(String[] args) throws IOException, URISyntaxException {
int firstArg = 0;
if(args.length == 0) {
System.out.println("Usage: DumpArchive [--summarize] <sequence files>");
}
if(args[0].equals("--summarize")) {
firstArg = 1;
summarize= true;
}
ChukwaConfiguration conf = new ChukwaConfiguration();
FileSystem fs;
if(args[firstArg].contains("://")) {
fs = FileSystem.get(new URI(args[firstArg]), conf);
} else {
String fsName = conf.get("writer.hdfs.filesystem");
if(fsName != null)
fs = FileSystem.get(conf);
else
fs = FileSystem.getLocal(conf);
}
ArrayList<Path> filesToSearch = new ArrayList<Path>();
for(int i=firstArg; i < args.length; ++i){
Path[] globbedPaths = FileUtil.stat2Paths(fs.globStatus(new Path(args[i])));
for(Path p: globbedPaths)
filesToSearch.add(p);
}
int tot = filesToSearch.size();
int i=1;
System.err.println("total of " + tot + " files to search");
for(Path p: filesToSearch) {
System.err.println("scanning " + p.toUri() + "("+ (i++) +"/"+tot+")");
dumpFile(p, conf, fs);
}
if(summarize) {
for(Map.Entry<String, Integer> count: counts.entrySet()) {
System.out.println(count.getKey()+ ") ===> " + count.getValue());
}
}
}
private static void dumpFile(Path p, Configuration conf,
FileSystem fs) throws IOException {
SequenceFile.Reader r = new SequenceFile.Reader(fs, p, conf);
ChukwaArchiveKey key = new ChukwaArchiveKey();
ChunkImpl chunk = ChunkImpl.getBlankChunk();
try {
while (r.next(key, chunk)) {
String entryKey = chunk.getSource() +":"+chunk.getDataType() +":" +
chunk.getStreamName();
Integer oldC = counts.get(entryKey);
if(oldC != null)
counts.put(entryKey, oldC + 1);
else
counts.put(entryKey, Integer.valueOf(1));
if(!summarize) {
System.out.println("\nTimePartition: " + key.getTimePartition());
System.out.println("DataType: " + key.getDataType());
System.out.println("StreamName: " + key.getStreamName());
System.out.println("SeqId: " + key.getSeqId());
System.out.println("\t\t =============== ");
System.out.println("Cluster : " + chunk.getTags());
System.out.println("DataType : " + chunk.getDataType());
System.out.println("Source : " + chunk.getSource());
System.out.println("Application : " + chunk.getStreamName());
System.out.println("SeqID : " + chunk.getSeqID());
System.out.println("Data : " + new String(chunk.getData(), Charset.forName("UTF-8")));
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
| 8,286 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/DumpChunks.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.regex.*;
import java.util.*;
import java.io.*;
import org.apache.hadoop.chukwa.*;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.util.RegexUtil.CheckedPatternSyntaxException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.conf.Configuration;
public class DumpChunks {
/**
* Tries to find chunks matching a given pattern.
* Takes as input a set of &-delimited patterns, followed
* by a list of file names.
*
* E.g: Dump datatype=Iostat&source=/my/log/.* *.done
* @param args is command line parameters
* @throws IOException if problem access HDFS
* @throws URISyntaxException if error parsing HDFS URL
*/
public static void main(String[] args) throws IOException, URISyntaxException {
if(args.length < 2) {
System.out.println("usage: Dump [-s] pattern1,pattern2,pattern3... file1 file2 file3...");
return;
}
ChukwaConfiguration conf = new ChukwaConfiguration();
dump(args, conf, System.out);
}
static FileSystem getFS(Configuration conf, String uri) throws IOException, URISyntaxException {
FileSystem fs;
if(uri.contains("://")) {
fs = FileSystem.get(new URI(uri), conf);
} else {
String fsName = conf.get("writer.hdfs.filesystem");
if(fsName == null)
fs = FileSystem.getLocal(conf);
else
fs = FileSystem.get(conf);
}
System.err.println("filesystem is " + fs.getUri());
return fs;
}
static void dump(String[] args, Configuration conf, PrintStream out) throws IOException, URISyntaxException {
int filterArg = 0;
boolean summarize = false;
boolean nosort = false;
if(args[0].equals("-s")) {
filterArg++;
summarize = true;
} else if(args[0].equals("--nosort")) {
filterArg++;
nosort = true;
}
Filter patterns = null;
if(args[filterArg].toLowerCase().equals("all"))
patterns = Filter.ALL;
else {
try {
patterns = new Filter(args[filterArg]);
} catch (CheckedPatternSyntaxException pse) {
System.err.println("Error parsing \"tags\" regular expression: " + pse.getMessage());
return;
}
}
System.err.println("Patterns:" + patterns);
ArrayList<Path> filesToSearch = new ArrayList<Path>();
FileSystem fs = getFS(conf, args[filterArg + 1]);
for(int i=filterArg + 1; i < args.length; ++i){
Path[] globbedPaths = FileUtil.stat2Paths(fs.globStatus(new Path(args[i])));
if(globbedPaths != null)
for(Path p: globbedPaths)
filesToSearch.add(p);
}
System.err.println("expands to " + filesToSearch.size() + " actual files");
DumpChunks dc;
if(summarize)
dc = new DumpAndSummarize();
else if(nosort)
dc = new DumpNoSort(out);
else
dc= new DumpChunks();
try {
for(Path p: filesToSearch) {
SequenceFile.Reader r = new SequenceFile.Reader(fs, p, conf);
ChukwaArchiveKey key = new ChukwaArchiveKey();
ChunkImpl chunk = ChunkImpl.getBlankChunk();
while (r.next(key, chunk)) {
if(patterns.matches(chunk)) {
dc.updateMatchCatalog(key.getStreamName(), chunk);
chunk = ChunkImpl.getBlankChunk();
}
}
}
dc.displayResults(out);
} catch (Exception e) {
e.printStackTrace();
}
}
public DumpChunks() {
matchCatalog = new HashMap<String, SortedMap<Long, ChunkImpl> >();
}
Map<String, SortedMap<Long, ChunkImpl>> matchCatalog;
protected void displayResults(PrintStream out) throws IOException{
for(Map.Entry<String,SortedMap<Long, ChunkImpl>> streamE: matchCatalog.entrySet()) {
String header = streamE.getKey();
SortedMap<Long, ChunkImpl> stream = streamE.getValue();
long nextToPrint = 0;
if(stream.firstKey() > 0)
System.err.println("---- map starts at "+ stream.firstKey());
for(Map.Entry<Long, ChunkImpl> e: stream.entrySet()) {
if(e.getKey() >= nextToPrint) {
if(e.getKey() > nextToPrint)
System.err.println("---- printing bytes starting at " + e.getKey());
out.write(e.getValue().getData());
nextToPrint = e.getValue().getSeqID();
} else if(e.getValue().getSeqID() < nextToPrint) {
continue; //data already printed
} else {
//tricky case: chunk overlaps with already-printed data, but not completely
ChunkImpl c = e.getValue();
long chunkStartPos = e.getKey();
int numToPrint = (int) (c.getSeqID() - nextToPrint);
int printStartOffset = (int) ( nextToPrint - chunkStartPos);
out.write(c.getData(), printStartOffset, numToPrint);
nextToPrint = c.getSeqID();
}
}
out.println("\n--------"+header + "--------");
}
}
protected void updateMatchCatalog(String streamName, ChunkImpl chunk) throws IOException {
SortedMap<Long, ChunkImpl> chunksInStream = matchCatalog.get(streamName);
if(chunksInStream == null ) {
chunksInStream = new TreeMap<Long, ChunkImpl>();
matchCatalog.put(streamName, chunksInStream);
}
long startPos = chunk.getSeqID() - chunk.getLength();
ChunkImpl prevMatch = chunksInStream.get(startPos);
if(prevMatch == null)
chunksInStream.put(startPos, chunk);
else { //pick longest
if(chunk.getLength() > prevMatch.getLength())
chunksInStream.put (startPos, chunk);
}
}
static class DumpAndSummarize extends DumpChunks {
Map<String, Integer> matchCounts = new LinkedHashMap<String, Integer>();
Map<String, Long> byteCounts = new LinkedHashMap<String, Long>();
protected void displayResults(PrintStream out) throws IOException{
for(Map.Entry<String, Integer> s: matchCounts.entrySet()) {
out.print(s.getKey());
out.print(" ");
out.print(s.getValue());
out.print(" chunks ");
out.print(byteCounts.get(s.getKey()));
out.println(" bytes");
}
}
protected void updateMatchCatalog(String streamName, ChunkImpl chunk) {
Integer i = matchCounts.get(streamName);
if(i != null) {
matchCounts.put(streamName, i+1);
Long b = byteCounts.get(streamName);
byteCounts.put(streamName, b + chunk.getLength());
} else {
matchCounts.put(streamName, Integer.valueOf(1));
byteCounts.put(streamName, Long.valueOf(chunk.getLength()));
}
}
}
static class DumpNoSort extends DumpChunks {
PrintStream out;
public DumpNoSort(PrintStream out) {
this.out = out;
}
//Do some display
protected void updateMatchCatalog(String streamName, ChunkImpl chunk) throws IOException {
out.write(chunk.getData());
}
protected void displayResults(PrintStream out) throws IOException{
//did this in updateMatchCatalog
}
}
}
| 8,287 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/RestUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import javax.ws.rs.core.MediaType;
import org.apache.log4j.Logger;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.ClientHandlerException;
import com.sun.jersey.api.client.WebResource;
/* This should contain set of helper methods to convert the
* response returned from any web servers to required formats
* This can be modified to accept different headers based on the
* file formats.
*/
public class RestUtil {
private static WebResource webResource;
private static Client webClient;
private static Logger log = Logger.getLogger(RestUtil.class);
public static String getResponseAsString(String URI) {
if (URI == null) {
throw new IllegalStateException("URI cannot be blank");
}
String response = null;
webClient = Client.create();
try {
webResource = webClient.resource(URI);
response = webResource.accept(MediaType.APPLICATION_JSON_TYPE).get(
String.class);
} catch (ClientHandlerException e) {
Throwable t = e.getCause();
if (t instanceof java.net.ConnectException) {
log.warn("Connect exception trying to connect to [" + URI
+ "]. Make sure the service is running");
} else {
log.error(ExceptionUtil.getStackTrace(e));
}
} finally {
if (webClient != null) {
webClient.destroy();
}
}
return response;
}
} | 8,288 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/ClusterConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.io.*;
import java.nio.charset.Charset;
import java.util.*;
public class ClusterConfig {
private HashMap<String, String> clusterMap = new HashMap<String, String>();
private String path = System.getenv("CHUKWA_CONF_DIR") + File.separator;
static public String getContents(File aFile) {
// ...checks on aFile are elided
StringBuffer contents = new StringBuffer();
try {
BufferedReader input = new BufferedReader(new InputStreamReader(new FileInputStream(aFile.getAbsolutePath()), Charset.forName("UTF-8")));
try {
String line = null; // not declared within while loop
/*
* readLine is a bit quirky : it returns the content of a line MINUS the
* newline. it returns null only for the END of the stream. it returns
* an empty String if two newlines appear in a row.
*/
while ((line = input.readLine()) != null) {
contents.append(line);
contents.append(System.getProperty("line.separator"));
}
} finally {
input.close();
}
} catch (IOException ex) {
ex.printStackTrace();
}
return contents.toString();
}
public ClusterConfig() {
File cc = new File(path + "jdbc.conf");
String buffer = getContents(cc);
String[] lines = buffer.split("\n");
for (String line : lines) {
String[] data = line.split("=", 2);
clusterMap.put(data[0], data[1]);
}
}
public String getURL(String cluster) {
String url = clusterMap.get(cluster);
return url;
}
public Iterator<String> getClusters() {
Set<String> keys = clusterMap.keySet();
Iterator<String> i = keys.iterator();
return i;
}
}
| 8,289 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/MaxRateSender.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.util.Random;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.datacollection.*;
import org.apache.hadoop.chukwa.datacollection.agent.AdaptorManager;
import org.apache.hadoop.chukwa.datacollection.adaptor.*;
public class MaxRateSender extends Thread implements Adaptor {
public static final int BUFFER_SIZE = 60 * 1024;
public static final String ADAPTOR_NAME = "MaxRateSender";
private volatile boolean stopping = false;
private long offset;
private String type;
ChunkReceiver dest;
public String getCurrentStatus() {
return type;
}
public void start(String adaptorID, String type, long offset,
ChunkReceiver dest) throws AdaptorException {
this.setName("MaxRateSender adaptor");
this.offset = offset;
this.type = type;
this.dest = dest;
super.start(); // this is a Thread.start
}
@Override
public String parseArgs(String d, String s,AdaptorManager c) {
return s;
}
public void run() {
Random r = new Random();
try {
while (!stopping) {
byte[] data = new byte[BUFFER_SIZE];
r.nextBytes(data);
offset += data.length;
ChunkImpl evt = new ChunkImpl(type, "random data source", offset, data,
this);
dest.add(evt);
}
} catch (InterruptedException ie) {
}
}
public String toString() {
return ADAPTOR_NAME;
}
@Override
public long shutdown(AdaptorShutdownPolicy shutdownPolicy) {
stopping = true;
return offset;
}
@Override
public String getType() {
return type;
}
}
| 8,290 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/ConstRateValidator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.*;
import org.apache.hadoop.chukwa.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
public class ConstRateValidator extends Configured implements Tool{
public static class ByteRange implements WritableComparable<ByteRange> {
String stream;
String split ="";
public long start;
public long len;
public ByteRange() {
start=len=0;
}
public ByteRange(ChunkImpl val) {
len = val.getLength();
start = val.getSeqID() - len;
this.stream = val.getSource()+":"+val.getStreamName() ;
}
@Override
public void readFields(DataInput in) throws IOException {
stream = in.readUTF();
split = in.readUTF();
start = in.readLong();
len = in.readLong();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(stream);
out.writeUTF(split);
out.writeLong(start);
out.writeLong(len);
}
public static ByteRange read(DataInput in) throws IOException {
ByteRange b = new ByteRange();
b.readFields(in);
return b;
}
@Override
public int compareTo(ByteRange o) {
int c = stream.compareTo(o.stream);
if(c != 0)
return c;
if(start > o.start)
return 1;
else if (start < o.start)
return -1;
else {
if(len > o.len)
return 1;
else if(len < o.len)
return -1;
else
return split.compareTo(o.split);
}
}
public boolean equals(Object o) {
if(o instanceof ByteRange) {
ByteRange rhs = (ByteRange) o;
return stream.equals(rhs.stream) &&
split.equals(rhs.split)&& rhs.start == start && rhs.len == len;
} else
return false;
}
public int hashCode() {
return (int) (
stream.hashCode() ^ (len>>32) ^ (len & 0xFFFFFFFF) ^ (start >> 32)
^ (start & 0xFFFFFFFF));
}
}
/////// State machine; expects chunks in order ////////
public static class ValidatorSM {
public long ok=0, missingBytes=0,dupBytes=0;
long consecDupchunks=0;
long nextExpectedStart = 0;
public long chunks;
public long dupChunks;
public Set<String> filesContaining = new LinkedHashSet<String>();
public String closeSM() {
if(consecDupchunks > 0)
return consecDupchunks + " consecutive duplicate chunks ending at " + consecDupchunks;
else
return null;
}
public String advanceSM(ByteRange b) {
if(!b.split.equals(""))
filesContaining.add(b.split);
chunks++;
if(b.start == nextExpectedStart) {
String msg = null;
if(consecDupchunks > 0)
msg = consecDupchunks + " consecutive duplicative chunks ending at " + b.start;
consecDupchunks = 0;
nextExpectedStart += b.len;
ok += b.len;
return msg;
} else{
// Text msg = new Text(b.stream + " " + consecOKchunks +
// "consecutive OK chunks ending at " + nextExpectedStart);
String msg;
if(b.start < nextExpectedStart) { //duplicate bytes
consecDupchunks ++;
dupChunks++;
long duplicatedBytes;
if(b.start + b.len <= nextExpectedStart) {
duplicatedBytes = b.len;
msg =" dupchunk of length " + b.len + " at " + b.start;
} else {
duplicatedBytes = b.start + b.len - nextExpectedStart;
ok += b.len - duplicatedBytes;
msg = " overlap of " + duplicatedBytes+ " starting at " + b.start +
" (total chunk len ="+b.len+")";
}
dupBytes += duplicatedBytes;
nextExpectedStart = Math.max(b.start + b.len, nextExpectedStart);
} else { //b.start > nextExpectedStart ==> missing bytes
consecDupchunks = 0;
long missing = (b.start - nextExpectedStart);
msg = "==Missing "+ missing+ " bytes starting from " + nextExpectedStart;
nextExpectedStart = b.start + b.len;
if(b.start < 0 || b.len < 0)
System.out.println("either len or start was negative; something is seriously wrong");
missingBytes += missing;
}
return msg;
} //end not-OK
} //end advance
} //end class
/////// Map Class /////////
public static class MapClass extends Mapper <ChukwaArchiveKey, ChunkImpl, ByteRange, NullWritable> {
@Override
protected void map(ChukwaArchiveKey key, ChunkImpl val,
Mapper<ChukwaArchiveKey, ChunkImpl,ByteRange, NullWritable>.Context context)
throws IOException, InterruptedException
{
boolean valid = ConstRateAdaptor.checkChunk(val);
String fname = "unknown";
ByteRange ret = new ByteRange(val);
InputSplit inSplit = context.getInputSplit();
if(inSplit instanceof FileSplit) {
FileSplit fs = (FileSplit) inSplit;
fname = fs.getPath().getName();
}
ret.split = fname;
if(!valid) {
context.getCounter("app", "badchunks").increment(1);
}
context.write(ret, NullWritable.get());
}
}
public static class ReduceClass extends Reducer<ByteRange, NullWritable, Text,Text> {
ValidatorSM sm;
String curStream = "";
public ReduceClass() {
sm = new ValidatorSM();
}
// @Override
// protected void setup(Reducer<ByteRange, NullWritable, Text,Text>.Context context) { }
@Override
protected void reduce(ByteRange b, Iterable<NullWritable> vals,
Reducer<ByteRange, NullWritable, Text,Text>.Context context) {
try {
if(!curStream.equals(b.stream)) {
if(!curStream.equals("")) {
printEndOfStream(context);
}
System.out.println("rolling over to new stream " + b.stream);
curStream = b.stream;
sm = new ValidatorSM();
}
String msg = sm.advanceSM(b);
if(msg != null)
context.write(new Text(b.stream), new Text(msg));
} catch(InterruptedException e) {
} catch(IOException e) {
e.printStackTrace();
}
}
@Override
protected void cleanup(Reducer<ByteRange, NullWritable, Text,Text>.Context context)
throws IOException, InterruptedException{
printEndOfStream(context);
}
public void printEndOfStream(Reducer<ByteRange, NullWritable, Text,Text>.Context context)
throws IOException, InterruptedException {
Text cs = new Text(curStream);
String t = sm.closeSM();
if(t != null)
context.write(cs, new Text(t));
if(!sm.filesContaining.isEmpty()) {
StringBuilder sb = new StringBuilder();
sb.append("Data contained in");
for(String s: sm.filesContaining)
sb.append(" ").append(s);
context.write(cs, new Text(sb.toString()));
}
context.write(cs, new Text("total of " + sm.chunks + " chunks ("
+ sm.dupChunks + " dups). " +" High byte =" + (sm.nextExpectedStart-1)));
context.getCounter("app", "missing bytes").increment(sm.missingBytes);
context.getCounter("app", "duplicate bytes").increment(sm.dupBytes);
context.getCounter("app", "OK Bytes").increment(sm.ok);
}
} //end reduce class
public static void main(String[] args) throws Exception {
// System.out.println("specify -D textOutput=true for text output");
int res = ToolRunner.run(new Configuration(),
new ConstRateValidator(), args);
return;
}
@Override
public int run(String[] real_args) throws Exception {
GenericOptionsParser gop = new GenericOptionsParser(getConf(), real_args);
Configuration conf = gop.getConfiguration();
String[] args = gop.getRemainingArgs();
Job validate = new Job(conf);
validate.setJobName("Chukwa Test pattern validator");
validate.setJarByClass(this.getClass());
validate.setInputFormatClass(SequenceFileInputFormat.class);
validate.setMapperClass(MapClass.class);
validate.setMapOutputKeyClass(ByteRange.class);
validate.setMapOutputValueClass(NullWritable.class);
validate.setReducerClass(ReduceClass.class);
validate.setOutputFormatClass(TextOutputFormat.class);
FileInputFormat.setInputPaths(validate, new Path(args[0]));
FileOutputFormat.setOutputPath(validate, new Path(args[1]));
validate.submit();
return 0;
}
}
| 8,291 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/RecordConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
public class RecordConstants {
static final char[] CTRL_A = { '\u0001' };
static final char[] CTRL_B = { '\u0002' };
static final char[] CTRL_C = { '\u0003' };
static final char[] CTRL_D = { '\u0004' };
// public static final String FIELD_SEPARATOR = new String(CTRL_A);
public static final String DEFAULT_FIELD_SEPARATOR = "-#-";
public static final String DEFAULT_RECORD_SEPARATOR = "\n";
public static final String RECORD_SEPARATOR_ESCAPE_SEQ = new String(CTRL_D);// may
// want
// this
// to
// be
// very
// obscure
// ,
// e
// .
// g
// .
// new
// String
// (
// CTRL_B
// )
// +
// new
// String
// (
// CTRL_C
// )
// +
// new
// String
// (
// CTRL_D
// )
/**
* Insert the default chukwa escape sequence in <code>record</code> before all
* occurances of <code>recordSeparator</code> <i>except</i> the final one if
* the final record separator occurs at the end of the <code>record</code>
*
* @param recordSeparator The record separator that we are escaping. This is
* chunk source application specific
* @param record The string representing the entire record, including the
* final record delimiter
* @return The string with appropriate <code>recordSeparator</code>s escaped
*/
public static String escapeAllButLastRecordSeparator(String recordSeparator,
String record) {
String escapedRecord = "";
if (record.endsWith(recordSeparator)) {
escapedRecord = record.substring(0,
record.length() - recordSeparator.length()).replaceAll(
recordSeparator, RECORD_SEPARATOR_ESCAPE_SEQ + recordSeparator)
+ recordSeparator;
}
return escapedRecord;
}
/**
* Insert the default chukwa escape sequence in <code>record</code> before all
* occurances of <code>recordSeparator</code>. This is assuming that you are
* not passing the final record separator in with the <code>record</code>,
* because it would be escaped too.
*
* @param recordSeparator The record separator that we are escaping. This is
* chunk source application specific
* @param record The string representing the entire record, including the
* final record delimiter
* @return The string with all <code>recordSeparator</code>s escaped
*/
public static String escapeAllRecordSeparators(String recordSeparator,
String record) {
return record.replaceAll(recordSeparator, RECORD_SEPARATOR_ESCAPE_SEQ
+ recordSeparator);
}
public static String recoverRecordSeparators(String recordSeparator,
String record) {
return record.replaceAll(RECORD_SEPARATOR_ESCAPE_SEQ + recordSeparator,
recordSeparator);
}
}
| 8,292 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/DumpRecord.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
public class DumpRecord {
/**
* @param args is command line parameters
* @throws URISyntaxException if problem parsing URL
* @throws IOException if problem reading files on HDFS
*/
public static void main(String[] args) throws IOException, URISyntaxException {
System.out.println("Input file:" + args[0]);
ChukwaConfiguration conf = new ChukwaConfiguration();
String fsName = conf.get("writer.hdfs.filesystem");
FileSystem fs = FileSystem.get(new URI(fsName), conf);
SequenceFile.Reader r = new SequenceFile.Reader(fs, new Path(args[0]), conf);
ChukwaRecordKey key = new ChukwaRecordKey();
ChukwaRecord record = new ChukwaRecord();
try {
while (r.next(key, record)) {
System.out.println("\t ===== KEY ===== ");
System.out.println("DataType: " + key.getReduceType());
System.out.println("\nKey: " + key.getKey());
System.out.println("\t ===== Value =====");
String[] fields = record.getFields();
System.out.println("Timestamp : " + record.getTime());
for (String field : fields) {
System.out.println("[" + field + "] :" + record.getValue(field));
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
| 8,293 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/TempFileUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.io.*;
import java.nio.charset.Charset;
import java.util.Calendar;
import java.util.Random;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
public class TempFileUtil {
public static File makeBinary(int length) throws IOException {
File tmpOutput = new File(System.getProperty("test.build.data", "/tmp"),
"chukwaTest");
FileOutputStream fos = null;
try {
fos = new FileOutputStream(tmpOutput);
Random r = new Random();
byte[] randomData = new byte[length];
r.nextBytes(randomData);
randomData[length - 1] = '\n';// need data to end with \n since default
// tailer uses that
fos.write(randomData);
fos.flush();
} finally {
if(fos != null) {
fos.close();
}
}
return tmpOutput;
}
static class RandSeqFileWriter {
java.util.Random r = new java.util.Random();
long lastSeqID = 0;
public ChunkImpl getARandomChunk() {
int ms = r.nextInt(1000);
String line = "2008-05-29 10:42:22," + ms
+ " INFO org.apache.hadoop.dfs.DataNode: Some text goes here"
+ r.nextInt() + "\n";
ChunkImpl c = new ChunkImpl("HadoopLogProcessor", "test",
line.length() + lastSeqID, line.getBytes(Charset.forName("UTF-8")), null);
lastSeqID += line.length();
c.addTag("cluster=\"foocluster\"");
return c;
}
}
public static void writeASinkFile(Configuration conf, FileSystem fileSys, Path dest,
int chunks) throws IOException {
FSDataOutputStream out = fileSys.create(dest);
Calendar calendar = Calendar.getInstance();
SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(conf, out,
ChukwaArchiveKey.class, ChunkImpl.class,
SequenceFile.CompressionType.NONE, null);
RandSeqFileWriter rw = new RandSeqFileWriter();
for (int i = 0; i < chunks; ++i) {
ChunkImpl chunk = rw.getARandomChunk();
ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
calendar.set(Calendar.YEAR, 2008);
calendar.set(Calendar.MONTH, Calendar.MAY);
calendar.set(Calendar.DAY_OF_MONTH, 29);
calendar.set(Calendar.HOUR, 10);
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
archiveKey.setTimePartition(calendar.getTimeInMillis());
archiveKey.setDataType(chunk.getDataType());
archiveKey.setStreamName(chunk.getStreamName());
archiveKey.setSeqId(chunk.getSeqID());
seqFileWriter.append(archiveKey, chunk);
}
seqFileWriter.close();
out.close();
}
public static File makeTestFile(String name, int size,File baseDir) throws IOException {
File tmpOutput = new File(baseDir, name);
FileOutputStream fos = new FileOutputStream(tmpOutput);
PrintWriter pw = new PrintWriter(new OutputStreamWriter(fos, Charset.forName("UTF-8")));
for (int i = 0; i < size; ++i) {
pw.print(i + " ");
pw.println("abcdefghijklmnopqrstuvwxyz");
}
pw.flush();
pw.close();
return tmpOutput;
}
public static File makeTestFile(String name, int size) throws IOException {
return makeTestFile(name, size, new File(System.getProperty("test.build.data", "/tmp")));
}
public static File makeTestFile(File baseDir) throws IOException {
return makeTestFile("atemp",10, baseDir);
}
public static File makeTestFile() throws IOException {
return makeTestFile("atemp",80, new File(System.getProperty("test.build.data", "/tmp")));
}
}
| 8,294 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/CreateRecordFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.chukwa.extraction.demux.processor.mapper.MapProcessor;
import org.apache.hadoop.chukwa.extraction.demux.processor.mapper.TsProcessor;
import org.apache.hadoop.chukwa.extraction.demux.Demux;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.File;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.nio.charset.Charset;
/**
* Helper class used to create sequence files of Chukwa records
*/
public class CreateRecordFile {
public static void makeTestSequenceFile(File inputFile,
Path outputFile,
String clusterName,
String dataType,
String streamName,
MapProcessor processor) throws IOException {
//initialize the output collector and the default processor
MockOutputCollector collector = new MockOutputCollector();
if (processor == null) processor = new TsProcessor();
//initialize the sequence file writer
Configuration conf = new Configuration();
FileSystem fs = outputFile.getFileSystem(conf);
FSDataOutputStream out = fs.create(outputFile);
SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(conf, out,
ChukwaRecordKey.class, ChukwaRecord.class,
SequenceFile.CompressionType.NONE, null);
long lastSeqID = 0;
String line;
FileInputStream fis = new FileInputStream(inputFile);
BufferedReader reader = new BufferedReader(new InputStreamReader(fis, Charset.forName("UTF-8")));
// for each line, create a chunk and an arckive key, pass it to the
// processor, then write it to the sequence file.
while ((line = reader.readLine()) != null) {
ChunkImpl chunk = new ChunkImpl(dataType, streamName,
line.length() + lastSeqID, line.getBytes(Charset.forName("UTF-8")), null);
lastSeqID += line.length();
chunk.addTag("cluster=\"" + clusterName + "\"");
ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
archiveKey.setTimePartition(System.currentTimeMillis());
archiveKey.setDataType(chunk.getDataType());
archiveKey.setStreamName(chunk.getStreamName());
archiveKey.setSeqId(chunk.getSeqID());
processor.process(archiveKey, chunk, collector, Reporter.NULL);
seqFileWriter.append(collector.getChukwaRecordKey(),
collector.getChukwaRecord());
}
out.flush();
out.close();
seqFileWriter.close();
reader.close();
}
private static class MockOutputCollector
implements OutputCollector<ChukwaRecordKey, ChukwaRecord> {
ChukwaRecordKey chukwaRecordKey;
ChukwaRecord chukwaRecord;
public void collect(ChukwaRecordKey chukwaRecordKey,
ChukwaRecord chukwaRecord) throws IOException {
this.chukwaRecordKey = chukwaRecordKey;
this.chukwaRecord = chukwaRecord;
}
public ChukwaRecordKey getChukwaRecordKey() { return chukwaRecordKey; }
public ChukwaRecord getChukwaRecord() { return chukwaRecord; }
}
public static void main(String[] args) throws IOException,
ClassNotFoundException,
IllegalAccessException,
InstantiationException {
if(args.length == 0 || (args.length==1 && args[0].contains("-h"))) {
usage();
return;
}
File inputFile = new File(args[0]);
Path outputFile = new Path(args[1]);
String clusterName = "testClusterName";
String dataType = "testDataType";
String streamName = "testStreamName";
MapProcessor processor = new TsProcessor();
Path confFile = null;
if (args.length > 2) clusterName = args[2];
if (args.length > 3) dataType = args[3];
if (args.length > 4) streamName = args[4];
if (args.length > 5) {
Class<?> clazz = null;
try {
clazz = Class.forName(args[5]);
}
catch (ClassNotFoundException e) {
try {
clazz = Class.forName(
"org.apache.hadoop.chukwa.extraction.demux.processor.mapper." + args[5]);
}
catch (Exception e2) {
throw e;
}
}
processor = (MapProcessor)clazz.newInstance();
}
if (args.length > 6) {
confFile = new Path(args[6]);
Demux.jobConf = new JobConf(confFile);
}
System.out.println("Creating sequence file using the following input:");
System.out.println("inputFile : " + inputFile);
System.out.println("outputFile : " + outputFile);
System.out.println("clusterName: " + clusterName);
System.out.println("dataType : " + dataType);
System.out.println("streamName : " + streamName);
System.out.println("processor : " + processor.getClass().getName());
System.out.println("confFile : " + confFile);
makeTestSequenceFile(inputFile, outputFile, clusterName, dataType, streamName, processor);
System.out.println("Done");
}
public static void usage() {
System.out.println("Usage: java " + CreateRecordFile.class.toString().split(" ")[1] + " <inputFile> <outputFile> [<clusterName> <dataType> <streamName> <processorClass> [confFile]]");
System.out.println("Description: Takes a plain text input file and generates a Hadoop sequence file contaning ChukwaRecordKey,ChukwaRecord entries");
System.out.println("Parameters: inputFile - Text input file to read");
System.out.println(" outputFile - Sequence file to create");
System.out.println(" clusterName - Cluster name to use in the records");
System.out.println(" dataType - Data type to use in the records");
System.out.println(" streamName - Stream name to use in the records");
System.out.println(" processorClass - Processor class to use. Defaults to TsProcessor");
System.out.println(" confFile - File to use to create the JobConf");
}
}
| 8,295 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/CopySequenceFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.io.IOException;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.log4j.Logger;
/**
* This class is used by LocalToRemoteHDFSMover to
* convert .chukwa files to .done before moving them.
* By creating a new sequence file and copying all valid chunks to it,
* it makes sure that no corrupt sequence files get into HDFS.
*/
public class CopySequenceFile {
static Logger log = Logger.getLogger(CopySequenceFile.class);
private static SequenceFile.Writer seqFileWriter = null;
private static SequenceFile.Reader seqFileReader = null;
private static FSDataOutputStream newOutputStr = null;
public static void createValidSequenceFile(Configuration conf,
String originalFileDir,
String originalFileName,
FileSystem localFs) {
try {
if (!originalFileDir.endsWith("/")) {
originalFileDir += "/";
}
String originalCompleteDir= originalFileDir + originalFileName;
Path originalPath= new Path(originalCompleteDir);
int extensionIndex= originalFileName.indexOf(".chukwa",0);
String recoverFileName=originalFileName.substring(0, extensionIndex)+".recover";
String recoverDir= originalFileDir + recoverFileName;
Path recoverPath= new Path(recoverDir);
String recoverDoneFileName=originalFileName.substring(0, extensionIndex)+".recoverDone";
String recoverDoneDir= originalFileDir + recoverDoneFileName;
Path recoverDonePath= new Path(recoverDoneDir);
String doneFileName=originalFileName.substring(0, extensionIndex)+".done";
String doneDir= originalFileDir + doneFileName;
Path donePath= new Path(doneDir);
ChukwaArchiveKey key = new ChukwaArchiveKey();
ChunkImpl evt = ChunkImpl.getBlankChunk();
newOutputStr = localFs.create(recoverPath);
seqFileWriter = SequenceFile.createWriter(conf, newOutputStr,
ChukwaArchiveKey.class, ChunkImpl.class,
SequenceFile.CompressionType.NONE, null);
seqFileReader = new SequenceFile.Reader(localFs, originalPath, conf);
System.out.println("key class name is " + seqFileReader.getKeyClassName());
System.out.println("value class name is " + seqFileReader.getValueClassName());
try {
while (seqFileReader.next(key, evt)) {
seqFileWriter.append(key, evt);
}
} catch (ChecksumException e) { //The exception occurs when we read a bad chunk while copying
log.info("Encountered Bad Chunk while copying .chukwa file, continuing",e);
}
seqFileReader.close();
seqFileWriter.close();
newOutputStr.close();
try {
localFs.rename(recoverPath, recoverDonePath); //Rename the destination file from .recover to .recoverDone
localFs.delete(originalPath,false); //Delete Original .chukwa file
localFs.rename(recoverDonePath, donePath); //rename .recoverDone to .done
} catch (Exception e) {
log.warn("Error occured while renaming .recoverDone to .recover or deleting .chukwa",e);
e.printStackTrace();
}
} catch(IOException e) {
log.warn("Error during .chukwa file recovery",e);
e.printStackTrace();
}
}
}
| 8,296 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/NagiosHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import org.apache.log4j.Logger;
public class NagiosHelper {
static Logger log = Logger.getLogger(NagiosHelper.class);
public static final int NAGIOS_OK = 0;
public static final int NAGIOS_WARN = 1;
public static final int NAGIOS_CRITICAL = 2;
public static final int NAGIOS_UNKNOWN = 3;
public static void sendNsca(String msg, int state) {
if(state==NAGIOS_OK) {
log.info(msg);
} else if (state==NAGIOS_WARN) {
log.warn(msg);
} else if (state==NAGIOS_CRITICAL) {
log.error(msg);
} else if (state==NAGIOS_UNKNOWN) {
log.warn(msg);
}
}
}
| 8,297 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/HierarchyDataType.java | /*
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.log4j.Logger;
/**
* To support hierarchyDataType according to CHUKWA-648, which is quite similar
* to the idea of Hive's Partition. For example, the user can define the
* dataType as "datatypeLevel1/dataTypeLevel2/dataTypeLevel3" instead of a flat
* structure like: "datatypeLevel1_datatTypeLeve2_dataTypeLevel3" <BR>
* <BR>
* The hierarchyDataType makes the filtering work much more easy when doing the
* analysis job. For example, if the user focuses on all data under
* "datatypeLevel1/dataTypeLevel2" category, he only needs to go through all
* level2 related sub-directories.
*/
public class HierarchyDataType {
static Logger log = Logger.getLogger(HierarchyDataType.class);
/**
* List all matched files under the directory and its sub-dirs
* @param fs The file system
* @param path The parent folder
* @param filter The pattern matcher to filter the required files
* @param recursive is a flag to search recursively
* @return list of FileStatus
*/
public static List<FileStatus> globStatus(FileSystem fs, Path path,
PathFilter filter, boolean recursive) {
List<FileStatus> results = new ArrayList<FileStatus>();
try {
FileStatus[] candidates = fs.globStatus(path);
for (FileStatus candidate : candidates) {
log.debug("candidate is:" + candidate);
Path p = candidate.getPath();
if (candidate.isDir() && recursive) {
StringBuilder subpath = new StringBuilder(p.toString());
subpath.append("/*");
log.debug("subfolder is:" + p);
results.addAll(globStatus(fs, new Path(subpath.toString()), filter,
recursive));
} else {
log.debug("Eventfile is:" + p);
FileStatus[] qualifiedfiles = fs.globStatus(p, filter);
if (qualifiedfiles != null && qualifiedfiles.length > 0) {
log.debug("qualified Eventfile is:" + p);
Collections.addAll(results, qualifiedfiles);
}
}
}
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
log.debug("results.length: " + results.size());
return results;
}
/**
* List all files under certain path and its sub-directories
* @param fs The file system
* @param path The parent folder
* @param recursive is flag to search recursive
* @return The list of all sub-dirs
*/
public static List<FileStatus> globStatus(FileSystem fs, Path path,
boolean recursive) {
List<FileStatus> results = new ArrayList<FileStatus>();
try {
FileStatus[] candidates = fs.listStatus(path);
if (candidates.length > 0) {
for (FileStatus candidate : candidates) {
log.debug("candidate is:" + candidate);
Path p = candidate.getPath();
if (candidate.isDir() && recursive) {
results.addAll(globStatus(fs, p, recursive));
}
}
} else {
log.debug("path is:" + path);
results.add(fs.globStatus(path)[0]);
}
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return results;
}
/**
* Get the hierarchyDataType format from the directory.
*
* @param path The data path
* @param cluster The cluster's folder
* @return The hierarchyDataType
*/
public static String getDataType(Path path, Path cluster) {
log.debug("datasource path: " + path + " cluster path: " + cluster);
String Cluster = cluster.toString();
if (!Cluster.endsWith("/")) {
Cluster = Cluster + "/";
}
String dataType = path.toString().replaceFirst(Cluster, "");
log.debug("The datatype is: " + dataType);
return dataType;
}
/**
* Get the directory without first and last slash mark.
*
* @param datasource is a string
* @return same string with ending slash trimmed
*/
public static String trimSlash(String datasource) {
String results = datasource;
if (datasource.startsWith("/")) {
results = datasource.replaceFirst("/", "");
}
if (results.endsWith("/")) {
results = results.substring(0, results.length()-1);
}
return results;
}
/**
* Transform the hierarchyDatatType directory into its filename (without any
* slash mark)
*
* @param datasource is a string
* @return path to data source
*/
public static String getHierarchyDataTypeFileName(String datasource){
return datasource.replace("/", CHUKWA_CONSTANT.HIERARCHY_CONNECTOR);
}
/**
* Transform the hierarchyDataType filename into its directory name (with
* slash mark)
*
* @param datasource is a string
* @return path to data source
*/
public static String getHierarchyDataTypeDirectory(String datasource) {
return datasource.replace(CHUKWA_CONSTANT.HIERARCHY_CONNECTOR, "/");
}
}
| 8,298 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/util/HBaseUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.nio.charset.Charset;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Calendar;
import java.util.TimeZone;
import org.apache.hadoop.chukwa.extraction.hbase.AbstractProcessor;
import org.apache.log4j.Logger;
import org.mortbay.log.Log;
public class HBaseUtil {
private static Logger LOG = Logger.getLogger(HBaseUtil.class);
static MessageDigest md5 = null;
static {
try {
md5 = MessageDigest.getInstance("md5");
} catch (NoSuchAlgorithmException e) {
LOG.warn(ExceptionUtil.getStackTrace(e));
}
}
public HBaseUtil() throws NoSuchAlgorithmException {
}
public byte[] buildKey(long time, String metricGroup, String metric,
String source) {
String fullKey = new StringBuilder(metricGroup).append(".")
.append(metric).toString();
return buildKey(time, fullKey, source);
}
public static byte[] buildKey(long time, String primaryKey) {
Calendar c = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
c.setTimeInMillis(time);
byte[] day = Integer.toString(c.get(Calendar.DAY_OF_YEAR)).getBytes(Charset.forName("UTF-8"));
byte[] pk = getHash(primaryKey);
byte[] key = new byte[14];
System.arraycopy(day, 0, key, 0, day.length);
System.arraycopy(pk, 0, key, 2, 6);
return key;
}
public static byte[] buildKey(long time, String primaryKey, String source) {
Calendar c = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
c.setTimeInMillis(time);
byte[] day = Integer.toString(c.get(Calendar.DAY_OF_YEAR)).getBytes(Charset.forName("UTF-8"));
byte[] pk = getHash(primaryKey);
byte[] src = getHash(source);
byte[] key = new byte[14];
System.arraycopy(day, 0, key, 0, day.length);
System.arraycopy(pk, 0, key, 2, 6);
System.arraycopy(src, 0, key, 8, 6);
return key;
}
private static byte[] getHash(String key) {
byte[] hash = new byte[6];
System.arraycopy(md5.digest(key.getBytes(Charset.forName("UTF-8"))), 0, hash, 0, 6);
return hash;
}
}
| 8,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.