index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/datacollection/writer/test | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/datacollection/writer/test/demux/TextParser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer.test.demux;
import org.apache.hadoop.chukwa.datacollection.writer.hbase.Annotation.Table;
import org.apache.hadoop.chukwa.extraction.demux.processor.mapper.AbstractProcessor;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.log4j.Logger;
@Table(name="Test",columnFamily="TestColumnFamily")
public class TextParser extends AbstractProcessor {
static Logger log = Logger.getLogger(TextParser.class);
public static final String reduceType = "TestColumnFamily";
public final String recordType = this.getClass().getName();
public TextParser() {
}
public String getDataType() {
return recordType;
}
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
throws Throwable {
ChukwaRecord record = new ChukwaRecord();
String[] parts = recordEntry.split("\\s");
record.add("timestamp", parts[0]);
record.add(parts[1], parts[2]);
key.setKey(parts[0]+"/"+parts[1]+"/"+parts[0]);
long timestamp = Long.parseLong(parts[0]);
this.buildGenericRecord(record, null, timestamp, reduceType);
output.collect(key, record);
}
} | 8,100 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/datacollection/writer | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/datacollection/writer/solr/TestSolrWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.writer.solr;
import java.util.ArrayList;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.log4j.Logger;
import org.apache.solr.SolrJettyTestBase;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.core.CoreContainer;
import junit.framework.Assert;
public class TestSolrWriter extends SolrJettyTestBase {
private static Logger log = Logger.getLogger(TestSolrWriter.class);
private static EmbeddedSolrServer server;
CoreContainer container;
public void setUp() {
try {
String dataDir = System.getProperty("CHUKWA_DATA_DIR", "target/test/var");
container = new CoreContainer(dataDir);
container.load();
server = new EmbeddedSolrServer(container, "collection1" );
super.setUp();
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
Assert.fail(e.getMessage());
}
}
public void tearDown() throws Exception {
if (server != null) {
server.shutdown();
}
super.tearDown();
}
/**
* Test adding a chunk to solr cloud, then query for the same chunk.
*/
public void testCommit() {
ArrayList<Chunk> chunks = new ArrayList<Chunk>();
chunks.add(new ChunkImpl("Hadoop", "namenode",
System.currentTimeMillis(), "This is a test.".getBytes(), null));
try {
QueryResponse rsp = server.query(new SolrQuery("*:*"));
Assert.assertEquals(0, rsp.getResults().getNumFound());
SolrWriter sw = new SolrWriter();
sw.add(chunks);
// TODO: not a great way to test this - timing is easily out
// of whack due to parallel tests and various computer specs/load
Thread.sleep(1000); // wait 1 sec
// now check that it comes out...
rsp = server.query(new SolrQuery("data:test"));
int cnt = 0;
while (rsp.getResults().getNumFound() == 0) {
// wait and try again for slower/busier machines
// and/or parallel test effects.
if (cnt++ == 10) {
break;
}
Thread.sleep(2000); // wait 2 seconds...
rsp = server.query(new SolrQuery("data:test"));
}
Assert.assertEquals(1, rsp.getResults().getNumFound());
} catch (Exception e) {
}
}
}
| 8,101 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/datacollection/connector/TestFailedCollector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.connector;
import java.io.File;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.apache.hadoop.chukwa.datacollection.connector.http.HttpConnector;
import org.apache.hadoop.chukwa.datacollection.controller.ChukwaAgentController;
import org.apache.hadoop.chukwa.util.TempFileUtil;
import junit.framework.TestCase;
public class TestFailedCollector extends TestCase {
public void testFailedCollector() {
try {
ChukwaAgent agent = ChukwaAgent.getAgent();
boolean failed = false;
HttpConnector connector = new HttpConnector(agent,
"http://localhost:1234/chukwa");
connector.start();
ChukwaConfiguration cc = new ChukwaConfiguration();
int portno = cc.getInt("chukwaAgent.control.port", 9093);
ChukwaAgentController cli = new ChukwaAgentController("localhost", portno);
File tmpOutput = TempFileUtil.makeBinary(2000);
cli.addFile("unknown", tmpOutput.getAbsolutePath());
System.out.println("have " + agent.adaptorCount() + " running adaptors");
cli.removeFile("unknown", tmpOutput.getAbsolutePath());
tmpOutput.delete();
assertFalse(failed);
System.out.println("done");
agent.shutdown();
connector.shutdown();
Thread.sleep(2000);
} catch (Exception e) {
e.printStackTrace();
}
}
}
| 8,102 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/tools | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/tools/backfilling/TestBackfillingLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.tools.backfilling;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import java.nio.ByteBuffer;
import junit.framework.Assert;
import junit.framework.TestCase;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.chukwa.datacollection.writer.parquet.ChukwaAvroSchema;
import org.apache.hadoop.chukwa.validationframework.util.MD5;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.parquet.avro.AvroParquetReader;
import org.apache.parquet.avro.AvroReadSupport;
public class TestBackfillingLoader extends TestCase{
private String cluster = "chukwa";
public void testBackfillingLoaderWithCharFileTailingAdaptorUTF8NewLineEscaped() {
String tmpDir = System.getProperty("test.build.data", "/tmp");
long ts = System.currentTimeMillis();
String dataDir = tmpDir + "/TestBackfillingLoader_" + ts;
Configuration conf = new Configuration();
conf.set("writer.hdfs.filesystem", "file:///");
conf.set("chukwaCollector.outputDir", dataDir + "/log/");
conf.set("chukwaCollector.rotateInterval", "" + (Integer.MAX_VALUE -1));
String cluster = "chukwa";
String machine = "machine_" + ts;
String adaptorName = "org.apache.hadoop.chukwa.datacollection.adaptor.filetailer.CharFileTailingAdaptorUTF8NewLineEscaped";
String recordType = "MyRecordType_" + ts;
try {
FileSystem fs = FileSystem.getLocal(conf);
File in1Dir = new File(dataDir + "/input");
in1Dir.mkdirs();
int lineCount = 107;
File inputFile = makeTestFile(dataDir + "/input/in1.txt",lineCount);
long size = inputFile.length();
String logFile = inputFile.getAbsolutePath();
System.out.println("Output:" + logFile);
System.out.println("File:" + inputFile.length());
BackfillingLoader loader = new BackfillingLoader(conf,cluster,machine,adaptorName,recordType,logFile);
loader.process();
File finalOutputFile = new File(dataDir + "/input/in1.txt.sav");
Assert.assertTrue(inputFile.exists() == false);
Assert.assertTrue(finalOutputFile.exists() == true);
String doneFile = null;
File directory = new File(dataDir + "/log/");
String[] files = directory.list();
for(String file: files) {
if ( file.endsWith(".done") ){
doneFile = dataDir + "/log/" + file;
break;
}
}
long seqId = validateDataSink(fs,conf,doneFile,finalOutputFile,
cluster, recordType, machine, logFile);
Assert.assertTrue(seqId == size);
} catch (Throwable e) {
e.printStackTrace();
Assert.fail();
}
try {
FileUtils.deleteDirectory(new File(dataDir));
} catch (IOException e) {
e.printStackTrace();
}
}
public void testBackfillingLoaderWithFileAdaptor() {
String tmpDir = System.getProperty("test.build.data", "/tmp");
long ts = System.currentTimeMillis();
String dataDir = tmpDir + "/TestBackfillingLoader_" + ts;
Configuration conf = new Configuration();
conf.set("writer.hdfs.filesystem", "file:///");
conf.set("chukwaCollector.outputDir", dataDir + "/log/");
conf.set("chukwaCollector.rotateInterval", "" + (Integer.MAX_VALUE -1));
String machine = "machine_" + ts;
String adaptorName = "org.apache.hadoop.chukwa.datacollection.adaptor.FileAdaptor";
String recordType = "MyRecordType_" + ts;
try {
FileSystem fs = FileSystem.getLocal(conf);
File in1Dir = new File(dataDir + "/input");
in1Dir.mkdirs();
int lineCount = 118;
File inputFile = makeTestFile(dataDir + "/input/in2.txt",lineCount);
long size = inputFile.length();
String logFile = inputFile.getAbsolutePath();
System.out.println("Output:" + logFile);
System.out.println("File:" + inputFile.length());
BackfillingLoader loader = new BackfillingLoader(conf,cluster,machine,adaptorName,recordType,logFile);
loader.process();
File finalOutputFile = new File(dataDir + "/input/in2.txt.sav");
Assert.assertTrue("Input file exists", inputFile.exists() == false);
Assert.assertTrue("Final input file exists", finalOutputFile.exists() == true);
String doneFile = null;
File directory = new File(dataDir + "/log/");
String[] files = directory.list();
for(String file: files) {
if ( file.endsWith(".done") ){
doneFile = dataDir + "/log/" + file;
break;
}
}
long seqId = validateDataSink(fs,conf,doneFile,finalOutputFile,
cluster, recordType, machine, logFile);
Assert.assertTrue(seqId == size);
} catch (Throwable e) {
e.printStackTrace();
Assert.fail();
}
try {
FileUtils.deleteDirectory(new File(dataDir));
} catch (IOException e) {
e.printStackTrace();
}
}
public void testBackfillingLoaderWithCharFileTailingAdaptorUTF8NewLineEscapedBigFile() {
String tmpDir = System.getProperty("test.build.data", "/tmp");
long ts = System.currentTimeMillis();
String dataDir = tmpDir + "/TestBackfillingLoader_" + ts;
Configuration conf = new Configuration();
conf.set("writer.hdfs.filesystem", "file:///");
conf.set("chukwaCollector.outputDir", dataDir + "/log/");
conf.set("chukwaCollector.rotateInterval", "" + (Integer.MAX_VALUE -1));
String machine = "machine_" + ts;
String adaptorName = "org.apache.hadoop.chukwa.datacollection.adaptor.filetailer.CharFileTailingAdaptorUTF8NewLineEscaped";
String recordType = "MyRecordType_" + ts;
try {
FileSystem fs = FileSystem.getLocal(conf);
File in1Dir = new File(dataDir + "/input");
in1Dir.mkdirs();
int lineCount = 1024*1024;//34MB
File inputFile = makeTestFile(dataDir + "/input/in1.txt",lineCount);
long size = inputFile.length();
String logFile = inputFile.getAbsolutePath();
System.out.println("Output:" + logFile);
System.out.println("File:" + inputFile.length());
BackfillingLoader loader = new BackfillingLoader(conf,cluster,machine,adaptorName,recordType,logFile);
loader.process();
File finalOutputFile = new File(dataDir + "/input/in1.txt.sav");
Assert.assertTrue(inputFile.exists() == false);
Assert.assertTrue(finalOutputFile.exists() == true);
String doneFile = null;
File directory = new File(dataDir + "/log/");
String[] files = directory.list();
for(String file: files) {
if ( file.endsWith(".done") ){
doneFile = dataDir + "/log/" + file;
break;
}
}
long seqId = validateDataSink(fs,conf,doneFile,finalOutputFile,
cluster, recordType, machine, logFile);
Assert.assertTrue(seqId == size);
} catch (Throwable e) {
e.printStackTrace();
Assert.fail();
}
try {
FileUtils.deleteDirectory(new File(dataDir));
} catch (IOException e) {
e.printStackTrace();
}
}
public void testBackfillingLoaderWithCharFileTailingAdaptorUTF8NewLineEscapedBigFileLocalWriter() {
String tmpDir = System.getProperty("test.build.data", "/tmp");
long ts = System.currentTimeMillis();
String dataDir = tmpDir + "/TestBackfillingLoader_" + ts;
Configuration conf = new Configuration();
conf.set("writer.hdfs.filesystem", "file:///");
conf.set("chukwaCollector.outputDir", dataDir + "/log/");
conf.set("chukwaCollector.rotateInterval", "" + (Integer.MAX_VALUE -1));
conf.set("chukwaCollector.localOutputDir", dataDir + "/log/");
conf.set("chukwaCollector.writerClass", "org.apache.hadoop.chukwa.datacollection.writer.localfs.LocalWriter");
conf.set("chukwaCollector.minPercentFreeDisk", "2");//so unit tests pass on machines with full-ish disks
String machine = "machine_" + ts;
String adaptorName = "org.apache.hadoop.chukwa.datacollection.adaptor.filetailer.CharFileTailingAdaptorUTF8NewLineEscaped";
String recordType = "MyRecordType_" + ts;
try {
FileSystem fs = FileSystem.getLocal(conf);
File in1Dir = new File(dataDir + "/input");
in1Dir.mkdirs();
int lineCount = 1024*1024*2;//64MB
File inputFile = makeTestFile(dataDir + "/input/in1.txt",lineCount);
long size = inputFile.length();
String logFile = inputFile.getAbsolutePath();
System.out.println("Output:" + logFile);
System.out.println("File:" + inputFile.length());
BackfillingLoader loader = new BackfillingLoader(conf,cluster,machine,adaptorName,recordType,logFile);
loader.process();
File finalOutputFile = new File(dataDir + "/input/in1.txt.sav");
Assert.assertTrue(inputFile.exists() == false);
Assert.assertTrue(finalOutputFile.exists() == true);
String doneFile = null;
File directory = new File(dataDir + "/log/");
String[] files = directory.list();
for(String file: files) {
if ( file.endsWith(".done") ){
doneFile = dataDir + "/log/" + file;
break;
}
}
long seqId = validateDataSink(fs,conf,doneFile,finalOutputFile,
cluster, recordType, machine, logFile);
Assert.assertTrue(seqId == size);
} catch (Throwable e) {
e.printStackTrace();
Assert.fail();
}
try {
FileUtils.deleteDirectory(new File(dataDir));
} catch (IOException e) {
e.printStackTrace();
}
}
protected long validateDataSink(FileSystem fs,Configuration conf, String dataSinkFile, File logFile,
String cluster,String dataType, String source, String application) throws Throwable {
AvroParquetReader<GenericRecord> reader = null;
long lastSeqId = -1;
FileOutputStream out = null;
try {
Schema chukwaAvroSchema = ChukwaAvroSchema.getSchema();
AvroReadSupport.setRequestedProjection(conf, chukwaAvroSchema);
reader = new AvroParquetReader<GenericRecord>(conf, new Path(dataSinkFile));
String dataSinkDumpName = dataSinkFile + ".dump";
out = new FileOutputStream(new File(dataSinkDumpName), true);
GenericRecord record = null;
while ( true ) {
record = reader.read();
if(record == null)
break;
Assert.assertTrue(record.get("tags").toString().contains(cluster));
Assert.assertTrue(dataType.equals(record.get("dataType")));
Assert.assertTrue(source.equals(record.get("source")));
byte[] data = ((ByteBuffer)record.get("data")).array();
out.write(data);
lastSeqId = ((Long)record.get("seqId")).longValue();
}
out.close();
out = null;
reader.close();
reader = null;
String dataSinkMD5 = MD5.checksum(new File(dataSinkDumpName));
String logFileMD5 = MD5.checksum(logFile);
Assert.assertTrue(dataSinkMD5.equals(logFileMD5));
}
finally {
if (out != null) {
out.close();
}
if (reader != null) {
reader.close();
}
}
return lastSeqId;
}
private File makeTestFile(final String name, int size) throws IOException {
File tmpOutput = new File(name);
FileOutputStream fos = new FileOutputStream(tmpOutput);
PrintWriter pw = new PrintWriter(fos);
for (int i = 0; i < size; ++i) {
pw.print(i + " ");
pw.println("abcdefghijklmnopqrstuvwxyz");
}
pw.flush();
pw.close();
fos.close();
return tmpOutput;
}
}
| 8,103 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/analysis/salsa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/analysis/salsa/fsm/TestFSMBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.analysis.salsa.fsm;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.FilenameFilter;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.regex.*;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.database.TableCreator;
import org.apache.hadoop.chukwa.datacollection.DataFactory;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent.AlreadyRunningException;
import org.apache.hadoop.chukwa.datacollection.connector.http.HttpConnector;
import org.apache.hadoop.chukwa.datacollection.collector.CaptureWriter;
import org.apache.hadoop.chukwa.datacollection.collector.servlet.ServletCollector;
import org.apache.hadoop.chukwa.datacollection.controller.ChukwaAgentController;
import org.apache.hadoop.chukwa.datacollection.sender.ChukwaHttpSender;
import org.apache.hadoop.chukwa.datacollection.sender.RetryListOfCollectors;
import org.apache.hadoop.chukwa.datacollection.test.ConsoleOutConnector;
import org.apache.hadoop.chukwa.datacollection.writer.PipelineStageWriter;
import org.apache.hadoop.chukwa.dataloader.MetricDataLoader;
import org.apache.hadoop.conf.Configuration;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.nio.SelectChannelConnector;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.ServletHolder;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobPriority;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.chukwa.extraction.demux.ChukwaRecordOutputFormat;
import org.apache.hadoop.chukwa.extraction.demux.ChukwaRecordPartitioner;
import org.apache.hadoop.chukwa.extraction.demux.Demux;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.util.DatabaseWriter;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.chukwa.analysis.salsa.fsm.*;
import junit.framework.TestCase;
public class TestFSMBuilder extends TestCase {
private static Log log = LogFactory.getLog(TestFSMBuilder.class);
int LINES = 10000;
int THREADS = 2;
private MiniDFSCluster dfs = null;
int NUM_HADOOP_SLAVES = 4;
private FileSystem fileSys = null;
private MiniMRCluster mr = null;
private Server jettyCollector = null;
private ChukwaAgent agent = null;
private HttpConnector conn = null;
private ChukwaHttpSender sender = null;
private int agentPort = 9093;
private int collectorPort = 9990;
private static final String dataSink = "/demux/input";
private static final String fsmSink = "/analysis/salsafsm";
private static Path DEMUX_INPUT_PATH = null;
private static Path DEMUX_OUTPUT_PATH = null;
private static Path FSM_OUTPUT_PATH = null;
private ChukwaConfiguration conf = new ChukwaConfiguration();
private static SimpleDateFormat day = new java.text.SimpleDateFormat("yyyyMMdd_HH_mm");
private static String cluster = "demo";
long[] timeWindow = {7, 30, 91, 365, 3650};
long current = 1244617200000L; // 2009-06-10
private String testBuildDir = System.getProperty("test.build.data", "/tmp");
private File dfsDataDir = new File(testBuildDir+"/dfs");
public void setUp() {
// Startup HDFS cluster - stored collector-ed JobHistory chunks
// Startup MR cluster - run Demux, FSMBuilder
// Startup collector
// Startup agent
System.out.println("In setUp()");
try {
System.setProperty("hadoop.log.dir", System.getProperty(
"test.build.data", "/tmp"));
} catch (Exception e) {
e.printStackTrace();
fail("Could not set up: " + e.toString());
}
// Startup HDFS cluster - stored collector-ed JobHistory chunks
try {
if(dfsDataDir.exists()) {
FileUtils.deleteDirectory(dfsDataDir);
}
dfs = new MiniDFSCluster(conf, NUM_HADOOP_SLAVES, true, null);
fileSys = dfs.getFileSystem();
DEMUX_INPUT_PATH = new Path(fileSys.getUri().toString()+File.separator+dataSink);
DEMUX_OUTPUT_PATH = new Path(fileSys.getUri().toString()+File.separator+"/demux/output");
} catch(Exception e) {
e.printStackTrace();
fail("Fail to startup HDFS cluster.");
}
// Startup MR Cluster
try {
mr = new MiniMRCluster(NUM_HADOOP_SLAVES, fileSys.getUri()
.toString(), 1);
} catch(Exception e) {
fail("Fail to startup Map/reduce cluster.");
}
// Startup collector
try {
// Configure Collector
conf.set("chukwaCollector.chunkSuppressBufferSize", "10");
conf.set("writer.hdfs.filesystem",fileSys.getUri().toString());
conf.set("chukwaCollector.outputDir",dataSink);
conf.set("chukwaCollector.rotateInterval", "10000");
// Set up jetty connector
SelectChannelConnector jettyConnector = new SelectChannelConnector();
jettyConnector.setLowResourcesConnections(THREADS-1);
jettyConnector.setLowResourceMaxIdleTime(1500);
jettyConnector.setPort(collectorPort);
// Set up jetty server proper, using connector
jettyCollector = new Server(collectorPort);
Context root = new Context(jettyCollector, "/", Context.SESSIONS);
root.addServlet(new ServletHolder(new ServletCollector(conf)), "/*");
jettyCollector.start();
jettyCollector.setStopAtShutdown(true);
Thread.sleep(10000);
} catch(Exception e) {
fail("Fail to startup collector.");
}
// Startup agent
try {
// Configure Agent
conf.set("chukwaAgent.tags", "cluster=\"demo\"");
DataFactory.getInstance().addDefaultTag(conf.get("chukwaAgent.tags", "cluster=\"unknown\""));
conf.set("chukwaAgent.checkpoint.dir", System.getenv("CHUKWA_DATA_DIR")+File.separator+"tmp");
conf.set("chukwaAgent.checkpoint.interval", "10000");
int portno = conf.getInt("chukwaAgent.control.port", agentPort);
agent = ChukwaAgent.getAgent();
agent.start();
conn = new HttpConnector(agent, "http://localhost:"+collectorPort+"/chukwa");
conn.start();
sender = new ChukwaHttpSender(conf);
ArrayList<String> collectorList = new ArrayList<String>();
collectorList.add("http://localhost:"+collectorPort+"/chukwa");
sender.setCollectors(new RetryListOfCollectors(collectorList, conf));
} catch (AlreadyRunningException e) {
fail("Chukwa Agent is already running");
}
System.out.println("Done setUp().");
}
public String readFile(File aFile) {
StringBuffer contents = new StringBuffer();
try {
BufferedReader input = new BufferedReader(new FileReader(aFile));
try {
String line = null; // not declared within while loop
while ((line = input.readLine()) != null) {
contents.append(line);
contents.append(System.getProperty("line.separator"));
}
} finally {
input.close();
}
} catch (IOException ex) {
ex.printStackTrace();
}
return contents.toString();
}
public void tearDown() {
FileSystem fs;
System.out.println("In tearDown()");
try {
fs = dfs.getFileSystem();
fs.delete(DEMUX_OUTPUT_PATH, true);
agent.shutdown();
conn.shutdown();
jettyCollector.stop();
mr.shutdown();
dfs.shutdown();
if(dfsDataDir.exists()) {
FileUtils.deleteDirectory(dfsDataDir);
}
Thread.sleep(2000);
} catch(Exception e) {
e.printStackTrace();
fail(e.toString());
}
System.out.println("Done tearDown()");
}
/**
* Performs tasks common to all tests
* Sets up agent to collect samples of the 2 log types in use
* (job history logs via JobLog and clienttrace via ClientTrace log types)
* Calls Demux to process the logs
*/
protected void initialTasks () {
System.out.println("In initialTasks()");
try {
// Test Chukwa Agent Controller and Agent Communication
ChukwaAgentController cli = new ChukwaAgentController("localhost", agentPort);
String[] source = new File(System.getenv("CHUKWA_DATA_DIR") + File.separator + "log").list(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".log");
}
});
System.out.println(System.getenv("CHUKWA_DATA_DIR") + File.separator + "log");
for(String fname : source) {
if (!(fname.equals("JobHistory.log") || fname.equals("ClientTrace.log"))) {
continue;
}
StringBuilder fullPath = new StringBuilder();
fullPath.append(System.getProperty("CHUKWA_DATA_DIR"));
fullPath.append(File.separator);
fullPath.append("log");
fullPath.append(File.separator);
fullPath.append(fname);
String recordType = fname.substring(0,fname.indexOf("."));
String adaptorId = cli.add(
"org.apache.hadoop.chukwa.datacollection.adaptor.filetailer.CharFileTailingAdaptorUTF8NewLineEscaped",
recordType, "0 " + fullPath.toString(), 0);
assertNotNull(adaptorId);
Thread.sleep(2000);
}
cli.removeAll();
Thread.sleep(30000);
} catch (Exception e) {
e.printStackTrace();
fail(e.toString());
}
// Test Data Sink files written by Collector
Path demuxDir = new Path(dataSink+"/*");
FileSystem fs;
try {
fs = dfs.getFileSystem();
FileStatus[] events = fs.globStatus(demuxDir);
log.info("Number of data sink files written:"+events.length);
assertTrue(events.length!=0);
} catch (IOException e) {
e.printStackTrace();
fail("File System Error.");
}
// Test Demux
log.info("Testing demux");
try {
//ChukwaConfiguration conf = new ChukwaConfiguration();
System.setProperty("hadoop.log.dir", System.getProperty(
"test.build.data", "/tmp"));
String[] sortArgs = { DEMUX_INPUT_PATH.toString(), DEMUX_OUTPUT_PATH.toString() };
// JobConf job = mr.createJobConf();
JobConf job = new JobConf(new ChukwaConfiguration(), Demux.class);
job.addResource(System.getenv("CHUKWA_CONF_DIR")+File.separator+"chukwa-demux-conf.xml");
job.setJobName("Chukwa-Demux_" + day.format(new Date()));
job.setInputFormat(SequenceFileInputFormat.class);
job.setMapperClass(Demux.MapClass.class);
job.setPartitionerClass(ChukwaRecordPartitioner.class);
job.setReducerClass(Demux.ReduceClass.class);
job.setOutputKeyClass(ChukwaRecordKey.class);
job.setOutputValueClass(ChukwaRecord.class);
job.setOutputFormat(ChukwaRecordOutputFormat.class);
job.setJobPriority(JobPriority.VERY_HIGH);
job.setNumMapTasks(2);
job.setNumReduceTasks(1);
Path input = new Path(fileSys.getUri().toString()+File.separator+dataSink+File.separator+"*.done");
FileInputFormat.setInputPaths(job, input);
FileOutputFormat.setOutputPath(job, DEMUX_OUTPUT_PATH);
String[] jars = new File(System.getenv("CHUKWA_HOME")).list(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".jar");
}
});
job.setJar(System.getenv("CHUKWA_HOME")+File.separator+jars[0]);
//assertEquals(ToolRunner.run(job, new Demux(), sortArgs), 0);
JobClient.runJob(job);
} catch (Exception e) {
fail(ExceptionUtil.getStackTrace(e));
}
System.out.println("Done initialTasks()");
}
public void testFSMBuilder_JobHistory020 () {
initialTasks();
// Test FSMBuilder (job history only)
log.info("Testing FSMBuilder (Job History only)");
System.out.println("In JobHistory020");
// Run FSMBuilder on Demux output
try {
JobConf job = new JobConf(new ChukwaConfiguration(), FSMBuilder.class);
job.addResource(System.getenv("CHUKWA_CONF_DIR")+File.separator+"chukwa-demux-conf.xml");
job.setJobName("Chukwa-FSMBuilder_" + day.format(new Date()));
job.setMapperClass(JobHistoryTaskDataMapper.class);
job.setPartitionerClass(FSMIntermedEntryPartitioner.class);
job.setReducerClass(FSMBuilder.FSMReducer.class);
job.setMapOutputValueClass(FSMIntermedEntry.class);
job.setMapOutputKeyClass(ChukwaRecordKey.class);
job.setInputFormat(SequenceFileInputFormat.class);
job.setOutputKeyClass(ChukwaRecordKey.class);
job.setOutputValueClass(ChukwaRecord.class);
job.setOutputFormat(ChukwaRecordOutputFormat.class);
job.setNumReduceTasks(1);
Path inputPath = new Path(DEMUX_OUTPUT_PATH.toString()+File.separator+"/*/*/TaskData*.evt");
this.FSM_OUTPUT_PATH = new Path(fileSys.getUri().toString()+File.separator+fsmSink);
FileInputFormat.setInputPaths(job, inputPath);
FileOutputFormat.setOutputPath(job, FSM_OUTPUT_PATH);
String[] jars = new File(System.getenv("CHUKWA_HOME")).list(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".jar");
}
});
job.setJar(System.getenv("CHUKWA_HOME")+File.separator+jars[0]);
JobClient.runJob(job);
} catch (Exception e) {
fail("Error running FSMBuilder: "+e.toString());
}
System.out.println("Done running FSMBuilder; Checking results");
// Check FSMBuilder output by reading the sequence file(s) generated
// Hard-coded to check the contents of test/samples/JobLog.log
try {
Pattern task_id_pat = Pattern.compile("attempt_[0-9]*_[0-9]*_[mr]_([0-9]*)_[0-9]*");
ChukwaRecordKey key = new ChukwaRecordKey();
ChukwaRecord record = new ChukwaRecord();
// initialize data structures for checking FSM
// should see 10 maps, 8 reduces
boolean mapSeen[] = new boolean[10];
boolean reduceSeen[] = new boolean[8];
boolean reduceShuffleSeen[] = new boolean[8];
boolean reduceSortSeen[] = new boolean[8];
boolean reduceReducerSeen[] = new boolean[8];
for (int i = 0; i < 10; i++) mapSeen[i] = false;
for (int i = 0; i < 8; i++) {
reduceSeen[i] = false;
reduceShuffleSeen[i] = false;
reduceSortSeen[i] = false;
reduceReducerSeen[i] = false;
}
Path fsm_outputs = new Path(FSM_OUTPUT_PATH.toString()+File.separator+
"/*/MAPREDUCE_FSM/MAPREDUCE_FSM*.evt");
FileStatus [] files;
files = fileSys.globStatus(fsm_outputs);
int count = 0;
for (int i = 0; i < files.length; i++) {
SequenceFile.Reader r = new SequenceFile.Reader(fileSys, files[i].getPath(), conf);
System.out.println("Processing files " + files[i].getPath().toString());
while (r.next(key, record)) {
String state_name = record.getValue("STATE_NAME");
String task_id = record.getValue("TASK_ID");
Matcher m = task_id_pat.matcher(task_id);
if (!m.matches()) {
continue;
}
String tasknum_string = m.group(1);
if (tasknum_string == null) {
continue;
}
int tasknum = Integer.parseInt(tasknum_string);
if (state_name.equals("MAP")) {
assertTrue("Map sequence number should be < 10",tasknum < 10);
mapSeen[tasknum] = true;
} else if (state_name.equals("REDUCE")) {
assertTrue("Reduce sequence number should be < 8",tasknum < 8);
reduceSeen[tasknum] = true;
} else if (state_name.equals("REDUCE_SHUFFLEWAIT")) {
assertTrue("Reduce sequence number should be < 8",tasknum < 8);
reduceShuffleSeen[tasknum] = true;
} else if (state_name.equals("REDUCE_SORT")) {
assertTrue("Reduce sequence number should be < 8",tasknum < 8);
reduceSortSeen[tasknum] = true;
} else if (state_name.equals("REDUCE_REDUCER")) {
assertTrue("Reduce sequence number should be < 8",tasknum < 8);
reduceReducerSeen[tasknum] = true;
}
count++;
}
}
System.out.println("Processed " + count + " records.");
assertTrue("Total number of states is 42 - 10 maps + (8 reduces * 4)",count == 42);
// We must have seen all 10 maps and all 8 reduces;
// check for that here
boolean passed = true;
for (int i = 0; i < 10; i++) passed &= mapSeen[i];
for (int i = 0; i < 8; i++) {
passed &= reduceSeen[i];
passed &= reduceShuffleSeen[i];
passed &= reduceSortSeen[i];
passed &= reduceReducerSeen[i];
}
assertTrue("Seen all Maps and Reduces in generated states.",passed);
} catch (Exception e) {
fail("Error checking FSMBuilder output: "+e.toString());
}
}
public void testFSMBuilder_ClientTrace020 () {
initialTasks();
// Test FSMBuilder (job history only)
log.info("Testing FSMBuilder (ClientTrace only)");
System.out.println("In ClientTrace020");
// Run FSMBuilder on Demux output
try {
// Process TaskTracker shuffle clienttrace entries first
JobConf job = new JobConf(new ChukwaConfiguration(), FSMBuilder.class);
job.addResource(System.getenv("CHUKWA_CONF_DIR")+File.separator+"chukwa-demux-conf.xml");
job.setJobName("Chukwa-FSMBuilder_" + day.format(new Date()));
job.setMapperClass(TaskTrackerClientTraceMapper.class);
job.setPartitionerClass(FSMIntermedEntryPartitioner.class);
job.setReducerClass(FSMBuilder.FSMReducer.class);
job.setMapOutputValueClass(FSMIntermedEntry.class);
job.setMapOutputKeyClass(ChukwaRecordKey.class);
job.setInputFormat(SequenceFileInputFormat.class);
job.setOutputKeyClass(ChukwaRecordKey.class);
job.setOutputValueClass(ChukwaRecord.class);
job.setOutputFormat(ChukwaRecordOutputFormat.class);
job.setNumReduceTasks(1);
Path inputPath = new Path(DEMUX_OUTPUT_PATH.toString()+File.separator+"/*/*/ClientTraceDetailed*.evt");
Path fsmOutputPath1 = new Path(fileSys.getUri().toString()+File.separator+fsmSink+"1");
FileInputFormat.setInputPaths(job, inputPath);
FileOutputFormat.setOutputPath(job, fsmOutputPath1);
String[] jars = new File(System.getenv("CHUKWA_HOME")).list(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".jar");
}
});
job.setJar(System.getenv("CHUKWA_HOME")+File.separator+jars[0]);
JobClient.runJob(job);
System.out.println("Processed TaskTracker ClientTrace.");
// Process DataNode clienttrace entries
job = new JobConf(new ChukwaConfiguration(), FSMBuilder.class);
job.addResource(System.getenv("CHUKWA_CONF_DIR")+File.separator+"chukwa-demux-conf.xml");
job.setJobName("Chukwa-FSMBuilder_" + day.format(new Date()));
job.setMapperClass(DataNodeClientTraceMapper.class);
job.setPartitionerClass(FSMIntermedEntryPartitioner.class);
job.setReducerClass(FSMBuilder.FSMReducer.class);
job.setMapOutputValueClass(FSMIntermedEntry.class);
job.setMapOutputKeyClass(ChukwaRecordKey.class);
job.setInputFormat(SequenceFileInputFormat.class);
job.setOutputKeyClass(ChukwaRecordKey.class);
job.setOutputValueClass(ChukwaRecord.class);
job.setOutputFormat(ChukwaRecordOutputFormat.class);
job.setNumReduceTasks(1);
inputPath = new Path(DEMUX_OUTPUT_PATH.toString()+File.separator+"/*/*/ClientTraceDetailed*.evt");
Path fsmOutputPath2 = new Path(fileSys.getUri().toString()+File.separator+fsmSink+"2");
FileInputFormat.setInputPaths(job, inputPath);
FileOutputFormat.setOutputPath(job, fsmOutputPath2);
jars = new File(System.getenv("CHUKWA_HOME")).list(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".jar");
}
});
job.setJar(System.getenv("CHUKWA_HOME")+File.separator+jars[0]);
JobClient.runJob(job);
System.out.println("Processed DataNode ClientTrace.");
} catch (Exception e) {
fail("Error running FSMBuilder: "+e.toString());
}
System.out.println("Done running FSMBuilder; Checking results");
try {
Path fsm_outputs = new Path(fileSys.getUri().toString()+File.separator+
fsmSink + "*/*/*/*.evt");
FileStatus [] files;
files = fileSys.globStatus(fsm_outputs);
int count = 0;
int numHDFSRead = 0, numHDFSWrite = 0, numShuffles = 0;
ChukwaRecordKey key = new ChukwaRecordKey();
ChukwaRecord record = new ChukwaRecord();
for (int i = 0; i < files.length; i++) {
SequenceFile.Reader r = new SequenceFile.Reader(fileSys, files[i].getPath(), conf);
System.out.println("Processing files " + files[i].getPath().toString());
while (r.next(key, record)) {
String state_name = record.getValue("STATE_NAME");
if (state_name.equals("READ_LOCAL") || state_name.equals("READ_REMOTE"))
{
numHDFSRead++;
} else if (state_name.equals("WRITE_LOCAL") || state_name.equals("WRITE_REMOTE")
|| state_name.equals("WRITE_REPLICATED"))
{
numHDFSWrite++;
} else if (state_name.equals("SHUFFLE_LOCAL") || state_name.equals("SHUFFLE_REMOTE"))
{
numShuffles++;
}
count++;
}
}
System.out.println("Processed " + count + " records.");
System.out.println("HDFSRD: " + numHDFSRead + " HDFSWR: " + numHDFSWrite + " SHUF: " + numShuffles);
assertTrue("Number of HDFS reads", numHDFSRead == 10);
assertTrue("Number of HDFS writes", numHDFSWrite == 8);
assertTrue("Number of shuffles", numShuffles == 80);
} catch (Exception e) {
fail("Error checking FSMBuilder results: " + e.toString());
}
}
}
| 8,104 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/util/TestDumpChunks.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import junit.framework.TestCase;
import java.util.*;
import java.util.regex.PatternSyntaxException;
import java.io.*;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.datacollection.DataFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
public class TestDumpChunks extends TestCase {
public static void writeSeqFile(Configuration conf, FileSystem fileSys, Path dest,
List<ChunkImpl> chunks) throws IOException {
FSDataOutputStream out = fileSys.create(dest);
Calendar calendar = Calendar.getInstance();
SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(conf, out,
ChukwaArchiveKey.class, ChunkImpl.class,
SequenceFile.CompressionType.NONE, null);
for (ChunkImpl chunk: chunks) {
ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
archiveKey.setTimePartition(calendar.getTimeInMillis());
archiveKey.setDataType(chunk.getDataType());
archiveKey.setStreamName(chunk.getStreamName());
archiveKey.setSeqId(chunk.getSeqID());
seqFileWriter.append(archiveKey, chunk);
}
seqFileWriter.close();
out.close();
}
public void testFilePatternMatching() throws IOException, java.net.URISyntaxException {
File tempDir = new File(System.getProperty("test.build.data", "/tmp"));
File tmpFile = File.createTempFile("dumpchunkTest", ".seq", tempDir);
tmpFile.deleteOnExit();
Configuration conf = new Configuration();
Path path = new Path(tmpFile.getAbsolutePath());
List<ChunkImpl> chunks = new ArrayList<ChunkImpl>();
byte[] dat = "test".getBytes();
ChunkImpl c = new ChunkImpl("Data", "aname", dat.length, dat, null);
chunks.add(c);
dat = "ing".getBytes();
c = new ChunkImpl("Data", "aname", dat.length+4, dat, null);
chunks.add(c);
writeSeqFile(conf, FileSystem.getLocal(conf), path, chunks);
String[] args = new String[] {"datatype=Data",path.toString()};
ByteArrayOutputStream capture = new ByteArrayOutputStream();
DumpChunks.dump(args, conf,new PrintStream(capture));
assertTrue(new String(capture.toByteArray()).startsWith("testing\n---"));
//now test for matches.
}
public void testIllegalRegex() throws Exception {
String[] args = { "tags.command=(" };
Configuration conf = new Configuration();
try {
DumpChunks.dump(args, conf, System.out);
} catch (PatternSyntaxException e) {
e.printStackTrace();
fail("Illegal regular expression caused PatternSyntaxException: " + e);
}
}
}
| 8,105 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/util/TestRecordConsts.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import junit.framework.TestCase;
public class TestRecordConsts extends TestCase {
public void testEscapeAllButLastRecordSeparator() {
String post = RecordConstants.escapeAllButLastRecordSeparator("\n",
"foo bar baz\n");
assertEquals(post, "foo bar baz\n");
post = RecordConstants.escapeAllButLastRecordSeparator("\n",
"foo\nbar\nbaz\n");
post = post.replaceAll(RecordConstants.RECORD_SEPARATOR_ESCAPE_SEQ, "^D");
assertEquals(post, "foo^D\nbar^D\nbaz\n");
System.out.println("string is " + post + ".");
}
public void testEscapeAllRecordSeparators() {
}
}
| 8,106 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/util/TestCreateRecordFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import junit.framework.TestCase;
import java.text.SimpleDateFormat;
import java.text.ParseException;
import java.net.InetAddress;
import java.io.File;
import java.io.IOException;
import java.io.BufferedReader;
import java.io.FileReader;
import java.util.Calendar;
import org.apache.hadoop.chukwa.extraction.demux.processor.mapper.TsProcessor;
import org.apache.hadoop.chukwa.extraction.demux.processor.mapper.MapProcessor;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
public class TestCreateRecordFile extends TestCase {
private SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
private Calendar calendar = Calendar.getInstance();
public void testWriteSequenceFile() throws IOException, ParseException {
String outputDir = System.getProperty("test.build.data", "/tmp");
//input configs
String datadir = System.getenv("CHUKWA_DATA_DIR");
if(datadir == null)
datadir = "test/samples";
else
datadir = datadir + File.separator + "log";
File inputFile = new File( datadir+ File.separator + "ClientTrace.log");
Path outputFile = new Path(outputDir + "/" + this.getClass().getName() + "/ClientTrace.evt");
String clusterName = "testClusterName";
String dataType = "testDataType";
String streamName = "testStreamName";
MapProcessor processor = new TsProcessor();
//create the sequence file
CreateRecordFile.makeTestSequenceFile(inputFile, outputFile, clusterName,
dataType, streamName, processor);
//read the output file
ChukwaRecordKey key = new ChukwaRecordKey();
ChukwaRecord record = new ChukwaRecord();
Configuration conf = new Configuration();
FileSystem fs = outputFile.getFileSystem(conf);
SequenceFile.Reader sequenceReader = new SequenceFile.Reader(fs, outputFile, conf);
//read the input file to assert
BufferedReader inputReader = new BufferedReader(new FileReader(inputFile));
String expectedHostname = InetAddress.getLocalHost().getHostName();
//Read input and output back comparing each
int i = 0;
while (sequenceReader.next(key, record)) {
String line = inputReader.readLine();
assertNotNull("Sequence file contains more records than input file", line);
long expectedTime = sdf.parse(line.substring(0,23)).getTime();
calendar.setTimeInMillis(expectedTime);
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
String expectedKey = calendar.getTimeInMillis() + "/" +
expectedHostname + "/" + expectedTime;
String expectedTags = "cluster=\"" + clusterName + "\"";
//assert key
assertEquals("Invalid key found for record " + i, expectedKey, key.getKey());
assertEquals("Invalid dataType found for record " + i, dataType, key.getReduceType());
//assert record
assertEquals("Invalid record time for record " + i, expectedTime, record.getTime());
assertEquals("Invalid body for record " + i, line, record.getValue("body"));
assertEquals("Invalid capp for record " + i, streamName, record.getValue("capp"));
assertEquals("Invalid csource for record " + i, expectedHostname, record.getValue("csource"));
assertEquals("Invalid ctags for record " + i, expectedTags , record.getValue("ctags").trim());
i++;
}
sequenceReader.close();
inputReader.close();
}
} | 8,107 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/util/TestCopySequenceFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import junit.framework.TestCase;
import java.io.File;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.List;
import java.io.IOException;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.util.CopySequenceFile;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
public class TestCopySequenceFile extends TestCase {
File doneFile = null;
File tempDir = null;
String tempFileName = null;
public void testCopySequenceFile() throws IOException {
//Create a .chukwa sequence file
tempDir = new File(System.getProperty("test.build.data", "/tmp"));
File tempFile = File.createTempFile("testcopy", ".chukwa", tempDir);
tempFile.deleteOnExit(); // Will delete this file if test fails and file is not renamed to .done
tempFileName=tempFile.getName();
Configuration conf = new Configuration();
Path path = new Path(tempFile.getAbsolutePath());
List<ChunkImpl> chunks = new ArrayList<ChunkImpl>();
byte[] dat = "test".getBytes();
ChunkImpl c = new ChunkImpl("Data", "aname", dat.length, dat, null);
chunks.add(c);
dat = "ing".getBytes();
c = new ChunkImpl("Data", "aname", dat.length+4, dat, null);
chunks.add(c);
//Utilize the writeSeqFile method to create a valid .chukwa sequence file
writeSeqFile(conf, FileSystem.getLocal(conf), path, chunks);
//Call CopySequenceFile to convert .chukwa to .done
CopySequenceFile.createValidSequenceFile(conf, tempDir.getAbsolutePath(), tempFile.getName(), FileSystem.getLocal(conf));
//Assert that the chukwa file has been deleted
assertFalse("File " + tempFile.getAbsolutePath() + " has not been deleted", tempFile.exists()) ;
String doneFilePath= tempDir.getAbsolutePath()+"/"+tempFileName.replace(".chukwa", ".done");
doneFile= new File(doneFilePath);
//Assert that the done file has been created
assertTrue("File " + doneFilePath + " has not been created", doneFile.exists());
}
public static void writeSeqFile(Configuration conf, FileSystem fileSys, Path dest,
List<ChunkImpl> chunks) throws IOException {
FSDataOutputStream out = fileSys.create(dest);
Calendar calendar = Calendar.getInstance();
SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(conf, out,
ChukwaArchiveKey.class, ChunkImpl.class,
SequenceFile.CompressionType.NONE, null);
for (ChunkImpl chunk: chunks) {
ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
archiveKey.setTimePartition(calendar.getTimeInMillis());
archiveKey.setDataType(chunk.getDataType());
archiveKey.setStreamName(chunk.getStreamName());
archiveKey.setSeqId(chunk.getSeqID());
seqFileWriter.append(archiveKey, chunk);
}
seqFileWriter.close();
out.close();
}
protected void tearDown() {
if (doneFile != null && doneFile.exists()){
doneFile.delete();
} else { //Cleanup any files that may have been created during a failed copy attempt
File recoverFile = new File(tempDir.getAbsolutePath()+"/"+tempFileName.replace(".chukwa", ".recover"));
if (recoverFile.exists()){
recoverFile.delete();
} else {
File recoverDoneFile = new File(tempDir.getAbsolutePath()+"/"+tempFileName.replace(".chukwa", ".recoverDone"));
if (recoverDoneFile.exists()){
recoverDoneFile.delete();
}
}
}
}
}
| 8,108 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/util/TestCRValidator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.chukwa.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import junit.framework.TestCase;
import static org.apache.hadoop.chukwa.util.ConstRateValidator.ByteRange;
import static org.apache.hadoop.chukwa.util.ConstRateValidator.ValidatorSM;
import static org.apache.hadoop.chukwa.util.TempFileUtil.writeASinkFile;
public class TestCRValidator extends TestCase{
public void testCRchunks() {
ConstRateAdaptor adaptor = new ConstRateAdaptor();
adaptor.parseArgs("500 200 ");
adaptor.test_init("testdata");
Chunk c = adaptor.nextChunk(100);
assertTrue(ConstRateAdaptor.checkChunk(c));
c = adaptor.nextChunk(102);
assertTrue(ConstRateAdaptor.checkChunk(c));
}
public void testBasicSM() throws Exception {
ValidatorSM sm = new ValidatorSM();
byte[] dat = "test".getBytes();
ChunkImpl c = new ChunkImpl("Data", "aname", dat.length, dat, null);
ByteRange b = new ByteRange(c);
assertEquals(4, b.len);
assertEquals(0, b.start);
String t = sm.advanceSM(b);
assertNull(t);
if(t != null)
System.out.println(t);
dat = "ing".getBytes();
c = new ChunkImpl("Data", "aname", dat.length+4, dat, null);
b = new ByteRange(c);
assertEquals(4, b.start);
t = sm.advanceSM(b);
assertNull(t);
if(t != null)
System.out.println(t);
b = new ByteRange(new ChunkImpl("Data", "aname", 12, "more".getBytes(), null));
t= sm.advanceSM(b);
System.out.println(t);
}
public void testSlurping() throws Exception {
int NUM_CHUNKS = 10;
Configuration conf = new Configuration();
FileSystem localfs = FileSystem.getLocal(conf);
String baseDir = System.getProperty("test.build.data", "/tmp");
Path tmpFile = new Path(baseDir+"/tmpSeqFile.seq");
writeASinkFile(conf, localfs, tmpFile, NUM_CHUNKS);
ValidatorSM sm = new ValidatorSM();
try {
SequenceFile.Reader reader = new SequenceFile.Reader(localfs, tmpFile, conf);
ChukwaArchiveKey key = new ChukwaArchiveKey();
ChunkImpl chunk = ChunkImpl.getBlankChunk();
while (reader.next(key, chunk)) {
String s = sm.advanceSM(new ByteRange(chunk));
assertNull(s);
}
reader.close();
assertEquals(NUM_CHUNKS, sm.chunks);
localfs.delete(tmpFile);
} catch(IOException e) {
e.printStackTrace();
}
}
}
| 8,109 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/util/TestXSSFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import junit.framework.TestCase;
public class TestXSSFilter extends TestCase {
public void testFilter() {
XssFilter xss = new XssFilter();
String xssTest = "<XSS>";
String xssFiltered = xss.filter(xssTest);
assertEquals(xssFiltered, "");
xssTest = "\'\';!--\"<XSS>=&{()}";
xssFiltered = xss.filter(xssTest);
assertEquals(xssFiltered, "\'\';!--\"=&{()}");
xssTest = "<IMG \"\"\"><SCRIPT>alert(\"XSS\")</SCRIPT>\">";
xssFiltered = xss.filter(xssTest);
assertEquals(xssFiltered, "\">");
}
}
| 8,110 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/util/TestFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import junit.framework.TestCase;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.util.RegexUtil.CheckedPatternSyntaxException;
public class TestFilter extends TestCase {
public void testBasicPatternMatching() {
try {
Filter rules = new Filter("host=foo.*&cluster=bar&datatype=Data");
assertEquals(3, rules.size());
byte[] dat = "someText".getBytes();
ChunkImpl chunkNone = new ChunkImpl("badData","aname", dat.length, dat, null);
assertFalse(rules.matches(chunkNone));
assertTrue(Filter.ALL.matches(chunkNone));
//do the right thing on a non-match
ChunkImpl chunkSome = new ChunkImpl("badData", "aname", dat.length, dat, null);
chunkSome.setSource("fooly");
chunkSome.addTag("cluster=\"bar\"");
assertFalse(rules.matches( chunkSome));
assertTrue(Filter.ALL.matches(chunkSome));
ChunkImpl chunkAll = new ChunkImpl("Data", "aname", dat.length, dat, null);
chunkAll.setSource("fooly");
chunkAll.addTag("cluster=\"bar\"");
assertTrue(rules.matches(chunkAll));
assertTrue(Filter.ALL.matches(chunkAll));
//check that we match content correctly
rules = new Filter("content=someText");
assertTrue(rules.matches(chunkAll));
rules = new Filter("content=some");
assertFalse(rules.matches( chunkAll));
rules = new Filter("datatype=Data&content=.*some.*");
assertTrue(rules.matches( chunkAll));
} catch(Exception e) {
fail("exception " + e);
}
}
public void testClusterPatterns() {
byte[] dat = "someText".getBytes();
ChunkImpl chunk1 = new ChunkImpl("Data", "aname", dat.length, dat, null);
chunk1.setSource("asource");
assertTrue(Filter.ALL.matches(chunk1));
Filter rule = null;
try {
rule = new Filter("tags.foo=bar");
} catch (CheckedPatternSyntaxException e) {
e.printStackTrace();
fail("Regular expression error: " + e);
}
assertFalse(rule.matches(chunk1));
chunk1.addTag("foo=\"bar\"");
assertTrue(rule.matches(chunk1));
chunk1.addTag("baz=\"quux\"");
assertTrue(rule.matches(chunk1));
assertTrue(Filter.ALL.matches(chunk1));
}
public void testIllegalRegex() {
try {
new Filter("tags.foo=(");
} catch (CheckedPatternSyntaxException e) {
return;
}
fail("No CheckedPatternSyntaxException thrown for illegal regular expression.");
}
}
| 8,111 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/util/DriverManagerUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.util;
import java.sql.Connection;
import java.sql.SQLException;
import org.apache.hadoop.chukwa.util.DriverManagerUtil.ConnectionInfo;
import junit.framework.TestCase;
public class DriverManagerUtilTest extends TestCase {
public void testLoadDriver() throws ClassNotFoundException {
Class<?> clazz = DriverManagerUtil.loadDriver();
System.out.println(clazz);
}
public void testGetConnectionInfo() {
{
String url = "jdbc:mysql://localhost:3306/demo";
ConnectionInfo ci = new ConnectionInfo(url);
assertEquals("jdbc:mysql://localhost:3306/demo", ci.getUri());
assertEquals(0, ci.getProperties().size());
}
{
String url = "jdbc:mysql://localhost:3306/demo?user=example";
ConnectionInfo ci = new ConnectionInfo(url);
assertEquals("jdbc:mysql://localhost:3306/demo", ci.getUri());
assertEquals(1, ci.getProperties().size());
assertEquals("example", ci.getProperties().get("user"));
}
{
String url = "jdbc:mysql://localhost:3306/demo?user=example&";
ConnectionInfo ci = new ConnectionInfo(url);
assertEquals("jdbc:mysql://localhost:3306/demo", ci.getUri());
assertEquals(1, ci.getProperties().size());
assertEquals("example", ci.getProperties().get("user"));
}
{
String url = "jdbc:mysql://localhost:3306/demo?user=example&pwd";
ConnectionInfo ci = new ConnectionInfo(url);
assertEquals("jdbc:mysql://localhost:3306/demo", ci.getUri());
assertEquals(2, ci.getProperties().size());
assertEquals("example", ci.getProperties().get("user"));
assertEquals("", ci.getProperties().get("pwd"));
}
{
String url = "jdbc:mysql://localhost:3306/demo?user=example&pwd=";
ConnectionInfo ci = new ConnectionInfo(url);
assertEquals("jdbc:mysql://localhost:3306/demo", ci.getUri());
assertEquals(2, ci.getProperties().size());
assertEquals("example", ci.getProperties().get("user"));
assertEquals("", ci.getProperties().get("pwd"));
}
{
String url = "jdbc:mysql://localhost:3306/demo?user=example&pwd=ppppp";
ConnectionInfo ci = new ConnectionInfo(url);
assertEquals("jdbc:mysql://localhost:3306/demo", ci.getUri());
assertEquals(2, ci.getProperties().size());
assertEquals("example", ci.getProperties().get("user"));
assertEquals("ppppp", ci.getProperties().get("pwd"));
}
}
public void testGetConnection() throws ClassNotFoundException, SQLException, InstantiationException, IllegalAccessException {
if(false) {
DriverManagerUtil.loadDriver().newInstance();
String url = "jdbc:mysql://localhost:3306/test?user=root&pwd=";
Connection conn = DriverManagerUtil.getConnection(url);
}
}
}
| 8,112 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/validationframework/DemuxDirectoryValidator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.validationframework;
import java.io.File;
import java.net.URI;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.validationframework.util.DataOperations;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class DemuxDirectoryValidator {
static Configuration conf = null;
static FileSystem fs = null;
public static void usage() {
System.out.println("Usage ...");
}
public static void validate(boolean isLocal, FileSystem fs,
Configuration conf, String[] directories) {
DemuxDirectoryValidator.fs = fs;
DemuxDirectoryValidator.conf = conf;
try {
if (isLocal) {
compareLocalDirectory(directories[0], directories[1]);
} else {
DemuxDirectoryValidator.fs = fs;
compareHDFSDirectory(directories[0], directories[1]);
}
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException("Validation failed! [" + directories[0] + "]["
+ directories[1] + "]", e);
}
}
/**
* @param args
*/
public static void main(String[] args) {
if (args.length != 3) {
usage();
return;
}
String demuxGoldDirectory = args[1];
String demuxTestDirectory = args[2];
boolean isLocal = true;
if ("-local".equalsIgnoreCase(args[0])) {
compareLocalDirectory(demuxGoldDirectory, demuxTestDirectory);
} else if ("-hdfs".equalsIgnoreCase(args[0])) {
isLocal = false;
conf = new ChukwaConfiguration();
String fsName = conf.get("writer.hdfs.filesystem");
try {
fs = FileSystem.get(new URI(fsName), conf);
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
} else {
System.out.println("Wrong first argument");
usage();
return;
}
String[] dirs = { demuxGoldDirectory, demuxTestDirectory };
validate(isLocal, fs, conf, dirs);
System.out.println("Gold and test directories are equivalent");
return;
}
public static void compareHDFSDirectory(String gold, String test) {
try {
Path goldDirectory = new Path(gold);
FileStatus[] goldFiles = fs.listStatus(goldDirectory);
// Path testDirectory = new Path(test);
// FileStatus[] testFiles = fs.listStatus(testDirectory);
//
for (int i = 0; i < goldFiles.length; i++) {
// Skip the crc files
if (goldFiles[i].getPath().getName().endsWith(".crc")) {
continue;
}
System.out.println("Testing ["
+ goldFiles[i].getPath().getName().intern() + "]");
// if (goldFiles[i].getPath().getName().intern() !=
// testFiles[i].getPath().getName().intern())
// {
// throw new RuntimeException("Gold & test dirrectories [" + gold +"/"
// +goldFiles[i].getPath().getName() +"] are not the same");
// }
if (goldFiles[i].isDir()) {
// Skip the _logs directory
if (goldFiles[i].getPath().getName().equalsIgnoreCase("_logs")) {
continue;
}
compareHDFSDirectory(gold + "/" + goldFiles[i].getPath().getName(),
test + "/" + goldFiles[i].getPath().getName());
} else {
boolean isTheSme = DataOperations.validateChukwaRecords(fs, conf,
goldFiles[i].getPath(), new Path(test + "/"
+ goldFiles[i].getPath().getName()));
if (!isTheSme) {
// System.out.println("MD5 failed on [" + gold +"/" +goldFiles[i]
// +"]");
throw new RuntimeException(
"ChukwaRecords validation error: for Gold & test [" + gold
+ "/" + goldFiles[i].getPath().getName() + "] [" + test
+ "/" + goldFiles[i].getPath().getName()
+ "] are not the same");
}
}
}
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
public static void compareLocalDirectory(String gold, String test) {
File goldDirectory = new File(gold);
String[] goldFiles = goldDirectory.list();
File testDirectory = new File(test);
String[] testFiles = testDirectory.list();
for (int i = 0; i < goldFiles.length; i++) {
if (goldFiles[i].intern() != testFiles[i].intern()) {
throw new RuntimeException("Gold & test dirrectories [" + gold + "/"
+ goldFiles[i] + "] are not the same");
}
File g = new File(gold + "/" + goldFiles[i]);
if (g.isDirectory()) {
// Skip the _logs directory
if (goldFiles[i].equalsIgnoreCase("_logs")) {
continue;
}
compareLocalDirectory(gold + "/" + goldFiles[i], test + "/"
+ goldFiles[i]);
} else {
boolean md5 = DataOperations.validateMD5(gold + "/" + goldFiles[i],
test + "/" + goldFiles[i]);
if (!md5) {
// System.out.println("MD5 failed on [" + gold +"/" +goldFiles[i]
// +"]");
throw new RuntimeException("MD5 for Gold & test [" + gold + "/"
+ goldFiles[i] + "] are not the same");
}
}
}
}
}
| 8,113 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/validationframework | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/validationframework/util/MD5.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.validationframework.util;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;
import java.security.MessageDigest;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class MD5 {
public static String checksum(File file) {
try {
InputStream fin = new FileInputStream(file);
java.security.MessageDigest md5er = MessageDigest.getInstance("MD5");
byte[] buffer = new byte[1024];
int read;
do {
read = fin.read(buffer);
if (read > 0)
md5er.update(buffer, 0, read);
} while (read != -1);
fin.close();
byte[] digest = md5er.digest();
if (digest == null)
return null;
String strDigest = "0x";
for (int i = 0; i < digest.length; i++) {
strDigest += Integer.toString((digest[i] & 0xff) + 0x100, 16)
.substring(1).toUpperCase();
}
return strDigest;
} catch (Exception e) {
return null;
}
}
public static String checksum(FileSystem fs, Path file) {
try {
FSDataInputStream fin = fs.open(file);
java.security.MessageDigest md5er = MessageDigest.getInstance("MD5");
byte[] buffer = new byte[1024];
int read;
do {
read = fin.read(buffer);
if (read > 0)
md5er.update(buffer, 0, read);
} while (read != -1);
fin.close();
byte[] digest = md5er.digest();
if (digest == null)
return null;
String strDigest = "0x";
for (int i = 0; i < digest.length; i++) {
strDigest += Integer.toString((digest[i] & 0xff) + 0x100, 16)
.substring(1).toUpperCase();
}
return strDigest;
} catch (Exception e) {
return null;
}
}
}
| 8,114 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/validationframework | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/validationframework/util/DataOperations.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.validationframework.util;
import java.io.DataInputStream;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.log4j.Logger;
public class DataOperations {
static Logger log = Logger.getLogger(DataOperations.class);
public static void copyFile(String fromFileName, String toFileName)
throws IOException {
File fromFile = new File(fromFileName);
File toFile = new File(toFileName);
FileInputStream from = null;
FileOutputStream to = null;
try {
from = new FileInputStream(fromFile);
to = new FileOutputStream(toFile);
byte[] buffer = new byte[4096];
int bytesRead;
while ((bytesRead = from.read(buffer)) != -1)
to.write(buffer, 0, bytesRead); // write
} finally {
if (from != null)
try {
from.close();
} catch (IOException e) {
;
}
if (to != null)
try {
to.close();
} catch (IOException e) {
// ;
}
}
}
public static boolean validateMD5(String inputFile, String testFile) {
// System.out.println("validateMD5 [" + inputFile + "] [" + testFile
// + "]");
String md5_1 = MD5.checksum(new File(inputFile));
String md5_2 = MD5.checksum(new File(testFile));
// System.out.println("MD5 [" + md5_1 + "] [" + md5_2 + "]");
return md5_1.intern() == md5_2.intern();
}
public static boolean validateMD5(FileSystem fs, Path inputFile, Path testFile) {
// System.out.println("validateMD5 [" + inputFile + "] [" + testFile
// + "]");
String md5_1 = MD5.checksum(fs, inputFile);
String md5_2 = MD5.checksum(fs, testFile);
// System.out.println("MD5 [" + md5_1 + "] [" + md5_2 + "]");
return md5_1.intern() == md5_2.intern();
}
public static boolean validateChukwaRecords(FileSystem fs,
Configuration conf, Path inputFile, Path testFile) {
SequenceFile.Reader goldReader = null;
SequenceFile.Reader testReader = null;
try {
// log.info(">>>>>>>>>>>>>> Openning records [" + inputFile.getName()
// +"][" + testFile.getName() +"]");
goldReader = new SequenceFile.Reader(fs, inputFile, conf);
testReader = new SequenceFile.Reader(fs, testFile, conf);
ChukwaRecordKey goldKey = new ChukwaRecordKey();
ChukwaRecord goldRecord = new ChukwaRecord();
ChukwaRecordKey testKey = new ChukwaRecordKey();
ChukwaRecord testRecord = new ChukwaRecord();
// log.info(">>>>>>>>>>>>>> Start reading");
while (goldReader.next(goldKey, goldRecord)) {
testReader.next(testKey, testRecord);
if (goldKey.compareTo(testKey) != 0) {
log.info(">>>>>>>>>>>>>> Not the same Key");
log.info(">>>>>>>>>>>>>> Record [" + goldKey.getKey() + "] ["
+ goldKey.getReduceType() + "]");
log.info(">>>>>>>>>>>>>> Record [" + testKey.getKey() + "] ["
+ testKey.getReduceType() + "]");
return false;
}
if (goldRecord.compareTo(testRecord) != 0) {
log.info(">>>>>>>>>>>>>> Not the same Value");
log.info(">>>>>>>>>>>>>> Record [" + goldKey.getKey() + "] ["
+ goldKey.getReduceType() + "]");
log.info(">>>>>>>>>>>>>> Record [" + testKey.getKey() + "] ["
+ testKey.getReduceType() + "]");
log.info(">>>>>>>>>>>>>> Gold Value [" + goldRecord.toString() + "]");
log.info(">>>>>>>>>>>>>> Test value [" + testRecord.toString() + "]");
return false;
}
}
// log.info(">>>>>>>>>>>>>> Same File");
return true;
} catch (IOException e) {
e.printStackTrace();
return false;
} finally {
try {
goldReader.close();
testReader.close();
} catch (IOException e) {
}
}
}
public static void extractRawLogFromdataSink(String directory, String fileName)
throws Exception {
ChukwaConfiguration conf = new ChukwaConfiguration();
String fsName = conf.get("writer.hdfs.filesystem");
FileSystem fs = FileSystem.get(new URI(fsName), conf);
SequenceFile.Reader r = new SequenceFile.Reader(fs, new Path(directory
+ fileName + ".done"), conf);
File outputFile = new File(directory + fileName + ".raw");
ChukwaArchiveKey key = new ChukwaArchiveKey();
ChunkImpl chunk = ChunkImpl.getBlankChunk();
FileWriter out = new FileWriter(outputFile);
try {
while (r.next(key, chunk)) {
out.write(new String(chunk.getData()));
}
} finally {
out.close();
r.close();
}
}
public static void extractRawLogFromDump(String directory, String fileName)
throws Exception {
File inputFile = new File(directory + fileName + ".bin");
File outputFile = new File(directory + fileName + ".raw");
DataInputStream dis = new DataInputStream(new FileInputStream(inputFile));
Chunk chunk = null;
FileWriter out = new FileWriter(outputFile);
boolean eof = false;
do {
try {
chunk = ChunkImpl.read(dis);
out.write(new String(chunk.getData()));
} catch (EOFException e) {
eof = true;
}
} while (!eof);
dis.close();
out.close();
}
}
| 8,115 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/validationframework | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/validationframework/interceptor/ChunkDumper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.validationframework.interceptor;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import org.apache.hadoop.chukwa.Chunk;
public class ChunkDumper {
static public String testRepositoryDumpDir = "/tmp/chukwaDump/";
static HashMap<String, DataOutputStream> hash = new HashMap<String, DataOutputStream>();
public static void dump(String component, Chunk chunk) {
String fileName = chunk.getStreamName();
if (!hash.containsKey(component + "-" + fileName)) {
File directory = new File(testRepositoryDumpDir + "/" + component);
if (!directory.exists()) {
directory.mkdirs();
}
String name = fileName;
if (fileName.indexOf("/") >= 0) {
name = fileName.substring(fileName.lastIndexOf("/"));
}
name += ".bin";
synchronized (name.intern()) {
System.out.println("FileName [" + name + "]");
try {
DataOutputStream dos = new DataOutputStream(new FileOutputStream(
new File(testRepositoryDumpDir + "/" + component + "/" + name)));
System.out.println("Writing to [" + testRepositoryDumpDir + "/"
+ component + "/" + name + "]");
hash.put(component + "-" + fileName, dos);
} catch (FileNotFoundException e) {
e.printStackTrace();
}
}
}
String key = component + "-" + fileName;
synchronized (key.intern()) {
DataOutputStream dos = hash.get(key);
try {
chunk.write(dos);
dos.flush();
} catch (IOException e) {
e.printStackTrace();
}
}
}
static void close() {
Iterator<String> it = hash.keySet().iterator();
while (it.hasNext()) {
String key = it.next();
DataOutputStream dos = hash.get(key);
try {
dos.close();
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
| 8,116 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/validationframework | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/validationframework/interceptor/SetupTestClasses.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.validationframework.interceptor;
import java.lang.reflect.Field;
import org.apache.hadoop.chukwa.datacollection.ChunkQueue;
import org.apache.hadoop.chukwa.datacollection.DataFactory;
public class SetupTestClasses {
public static void setupClasses() throws Throwable {
setupChunkQueueInterceptor();
}
static protected void setupChunkQueueInterceptor() throws Throwable {
DataFactory da = DataFactory.getInstance();
ChunkQueue chunkQueue = da.getEventQueue();
final Field fields[] = DataFactory.class.getDeclaredFields();
for (int i = 0; i < fields.length; ++i) {
if ("chunkQueue".equals(fields[i].getName())) {
Field f = fields[i];
f.setAccessible(true);
ChunkQueue ci = new ChunkQueueInterceptor(chunkQueue);
f.set(da, ci);
System.out.println("Adding QueueInterceptor");
break;
}
}
}
}
| 8,117 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/validationframework | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/validationframework/interceptor/ChunkQueueInterceptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.validationframework.interceptor;
import java.util.List;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.ChunkQueue;
public class ChunkQueueInterceptor implements
org.apache.hadoop.chukwa.datacollection.ChunkQueue {
private ChunkQueue defaultQueue = null;
public ChunkQueueInterceptor(ChunkQueue defaultQueue) {
this.defaultQueue = defaultQueue;
}
@Override
public void add(Chunk chunk) throws InterruptedException {
ChunkDumper.dump("adaptor", chunk);
defaultQueue.add(chunk);
}
@Override
public void collect(List<Chunk> chunks, int count)
throws InterruptedException {
defaultQueue.collect(chunks, count);
for (Chunk chunk : chunks) {
ChunkDumper.dump("sender", chunk);
}
}
@Override
public int size() {
return defaultQueue.size();
}
}
| 8,118 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/caffe/TestMemoryUsageDetection.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.caffe;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.InetAddress;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import junit.framework.TestCase;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
/**
* (1) Run non-stop terasort and teragen
* (2) Collect memory usage metrics from hbase every 5 minutes for 10 hours and write to csv files in /caffe-test/train/data
* (3) Create images of dimension 1000 * 200 from /caffe-test/train/data/*.csv.
* The files are saved in /caffe-test/train/data/*png
* (4) Train the image using caffe
*
*/
public class TestMemoryUsageDetection extends TestCase {
/**
* Run non-stop terasort and teragen to force memory leak
*/
public void setUp() {}
public void tearDown() {}
public void testMemoryDetection () {
String dirName = "/caffe-test/train";
Thread teraSortThread = createTeraSortThread ();
ExecutorService executor = Executors.newFixedThreadPool(1);
Future<?> task = executor.submit(teraSortThread);
collectNodeManagerMetrics (dirName + "/data");
task.cancel (true);
executor.shutdown ();
caffeTrain (dirName);
}
private Thread createTeraSortThread () {
Thread teraSortThread = new Thread(new Runnable() {
public void run(){
try {
String target = new String("/caffe-test/tera/tera.sh");
Runtime rt = Runtime.getRuntime();
Process proc = rt.exec(target);
BufferedReader reader = new BufferedReader(new InputStreamReader(proc.getInputStream()));
String line = "";
while ((line = reader.readLine())!= null) {
System.out.println(line + "\n");
}
} catch (Exception e) {
fail(ExceptionUtil.getStackTrace(e));
}
}
});
return teraSortThread;
}
/**
* Collect memory usage data every 15 min.
* Stop the timer after 10 hours
*/
private void collectNodeManagerMetrics(String dirName) {
int intervalInMilli = 15 * 60 * 1000;
long timerDurationTime = 10 * 60 * 60 * 1000;
String hostname = "";
try {
hostname = InetAddress.getLocalHost().getHostName();
System.out.println (hostname);
} catch (IOException e) {
fail(ExceptionUtil.getStackTrace(e));
}
MetricsCollector collector = new MetricsCollector (intervalInMilli, hostname, dirName);
collector.start ();
try {
Thread.sleep (timerDurationTime);
} catch (InterruptedException e) {
}
collector.cancel ();
// draw images of size 1000 * 200 from the collected csv files
try {
ImageCreator generator = new ImageCreator (dirName);
generator.drawImages ();
} catch (Exception e) {
fail(ExceptionUtil.getStackTrace(e));
}
}
/**
* Train the images
*/
private void caffeTrain (String dirName) {
try {
String target = new String(dirName + "/train.sh");
Runtime rt = Runtime.getRuntime();
Process proc = rt.exec(target);
//proc.waitFor();
BufferedReader reader = new BufferedReader(new InputStreamReader(proc.getInputStream()));
String line = "";
while ((line = reader.readLine())!= null) {
System.out.println(line + "\n");
}
} catch (Exception e) {
fail(ExceptionUtil.getStackTrace(e));
}
}
}
| 8,119 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/caffe/MetricsCollector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.caffe;
import java.io.BufferedWriter;
import java.io.PrintWriter;
import java.util.Calendar;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.TimeZone;
import java.util.Timer;
import java.util.TimerTask;
import org.apache.hadoop.chukwa.datastore.ChukwaHBaseStore;
import org.apache.hadoop.chukwa.hicc.bean.Series;
import org.json.simple.JSONObject;
//export CLASSPATH=/opt/apache/hadoop/etc/hadoop:/opt/apache/hbase/conf:/opt/apache/chukwa-0.8.0/share/chukwa/*:/opt/apache/chukwa-0.8.0/share/chukwa/lib/*:$CLASSPATH
public class MetricsCollector
{
private Timer getMetricSnapshotTimer = null;
private long intervalInMilli;
private String hostname;
private String dirName;
public MetricsCollector (long intervalInMilli, String hostname, String dirName) {
this.intervalInMilli = intervalInMilli;
this.hostname = hostname;
this.dirName = dirName;
getMetricSnapshotTimer = new Timer ("GetMetricSnapshot", true);
}
public void start () {
if (getMetricSnapshotTimer != null)
getMetricSnapshotTimer.schedule (new GetMetricSnapshotTimerTask (hostname, intervalInMilli, dirName), 0, intervalInMilli);
}
public void cancel ()
{
if (getMetricSnapshotTimer != null)
getMetricSnapshotTimer.cancel ();
}
class GetMetricSnapshotTimerTask extends TimerTask
{
private String hostname = null;
private BufferedWriter bufferedWriter = null;
private long intervalInMilli;
private String dirName;
/**
* Normalize the timestamp in time series data to use seconds
*/
private final static int XSCALE = 1000;
GetMetricSnapshotTimerTask (String hostname, long intervalInMilli, String dirName)
{
this.hostname = hostname;
this.intervalInMilli = intervalInMilli;
this.dirName = dirName;
}
public void run ()
{
TimeZone tz = TimeZone.getTimeZone("UTC");
Calendar now = Calendar.getInstance(tz);
long currTime=now.getTimeInMillis();
System.out.println ("currTime in UTC: " + currTime);
System.out.println ("currTime in current time zone" + System.currentTimeMillis ());
long startTime = currTime - intervalInMilli;
long endTime = currTime;
try {
System.out.println ("About to run");
getHadoopMetrics (startTime, endTime);
System.out.println ("Done run");
} catch (Exception e) {
e.printStackTrace ();
}
}
private void getHadoopMetrics(long startTime, long endTime) throws Exception
{
String source = hostname + ":NodeManager";
System.out.println ("source: " + source);
System.out.println ("startTime: " + startTime);
System.out.println ("endTime: " + endTime);
Series series = ChukwaHBaseStore.getSeries ("HadoopMetrics.jvm.JvmMetrics.MemHeapUsedM", source, startTime, endTime);
String value = series.toString ();
System.out.println ("value: " + value);
JSONObject jsonObj = (JSONObject) series.toJSONObject ();
Set set = jsonObj.keySet ();
Iterator iter = set.iterator ();
List list = (List) jsonObj.get ("data");
if (list != null) {
int size = list.size ();
System.out.println ("size: " + size);
if (size > 0 ) {
String name = "NodeManager" + "_" + "HadoopMetrics.jvm.JvmMetrics.MemHeapUsedM" + "_" + hostname;
generateCsv (list, name, startTime, bufferedWriter);
}
}
}
private void generateCsv (List list, String name, long startTime, BufferedWriter bufferedWriter) throws Exception
{
String fileName = dirName + "/" + name + "_" + startTime;
PrintWriter writer = new PrintWriter(fileName + ".csv", "UTF-8");
int size = list.size ();
for (int i = 0; i < size; i++) {
List point = (List) list.get (i);
long time = (Long) point.get (0) / XSCALE;
double val = (Double) point.get (1);
writer.println(time + "," + val);
}
writer.close();
}
}
}
| 8,120 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/caffe/ImageCreator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.caffe;
import java.awt.BasicStroke;
import java.awt.Color;
import java.awt.Graphics2D;
import java.awt.image.BufferedImage;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import javax.imageio.ImageIO;
/**
* Read csv files to create image files of dimension 1000 * 200
*
*/
public class ImageCreator
{
private static final int X_SIZE = 1000;
private static final int Y_SIZE = 200;
private String dirName = null;
public ImageCreator (String dirName) {
this.dirName = dirName;
}
public void drawImages () throws Exception
{
String outputFileName = dirName + "/labels.txt";
BufferedWriter bufferedWriter = null;
try {
FileWriter fileWriter = new FileWriter(outputFileName);
bufferedWriter = new BufferedWriter(fileWriter);
} catch (IOException e) {
e.printStackTrace ();
}
//int start = 1;
File dir = new File (dirName);
File [] files = dir.listFiles ();
Arrays.sort(files);
// find min and max memory usage
double minMem = 0;
double maxMem = 0;
long minTime = 0L;
long maxTime = 0L;
// image size: 1000 *200
int lineNum = 0;
for (int i = 0; i < files.length; i++) {
String fileName = files [i].getName ();
if (!fileName.endsWith ("csv")) {
continue;
}
//System.out.println (">>>>> " + fileName);
BufferedReader bufferedReader = new BufferedReader(new FileReader(files [i]));
String line = null;
while ((line = bufferedReader.readLine()) != null)
{
lineNum ++;
String [] point = line.split (",");
long time = Long.parseLong (point[0]);
double mem = Double.parseDouble (point[1]);
point [1] = String.valueOf (mem);
if (maxMem == 0 || maxMem < mem){
maxMem = mem;
}
if (minMem == 0 || minMem > mem) {
minMem = mem;
}
if (maxTime == 0 || maxTime < time){
maxTime = time;
}
if (minTime == 0 || minTime > time) {
minTime = time;
}
}
bufferedReader.close ();
}
//System.out.println ("minMem:" + minMem + ", maxMem:" + maxMem + ", total line number: " + lineNum);
//System.out.println ("minTime:" + minTime + ", maxTime:" + maxTime + ", total elapseTime: " + (maxTime - minTime));
List <String []> dataList = new ArrayList<String []> ();
lineNum = 0;
long startTime = 0;
long endTime = 0;
int imageId = 1;
int totalPoint = 0;
for (int i = 0; i < files.length; i++) {
String fileName = files [i].getName ();
if (!fileName.endsWith ("csv")) {
continue;
}
System.out.println (">>>>> " + fileName);
BufferedReader bufferedReader = new BufferedReader(new FileReader(files [i]));
String line = null;
while ((line = bufferedReader.readLine()) != null)
{
lineNum ++;
String [] point = line.split (",");
long time = Long.parseLong (point[0]);
double mem = Double.parseDouble (point[1]);
point [1] = String.valueOf (mem);
if (startTime == 0) {
startTime = time;
}
dataList.add (point);
endTime = time;
long elapseTime = endTime - startTime;
if (elapseTime > X_SIZE) {
totalPoint = totalPoint + dataList.size ();
String imageFileName = "image" + imageId + ".png";
System.out.println ("elapseTime: " + elapseTime + ", data size: " + dataList.size () + ", imageFileName: " + imageFileName);
drawImage (dataList, imageFileName, X_SIZE, Y_SIZE);
bufferedWriter.write (imageFileName + " 0\n");
bufferedWriter.flush ();
dataList.clear ();
startTime = 0;
imageId ++;
}
}
bufferedReader.close ();
}
bufferedWriter.close ();
}
private static void drawImage (List <String []> dataList, String imageFileName, int x_size, int y_size) throws Exception
{
int size = dataList.size ();
String [] startPt = dataList.get (0);
//String [] endPt = dataList.get (size - 1);
long startTimeX = Long.parseLong (startPt [0]);
//long endTimeX = Long.parseLong (endPt [0]);
//System.out.println ("x_size: " + x_size + ", y_size: " + y_size + ", startTimeX: " + startTimeX + ", endTimeX: " + endTimeX);
BufferedImage img = new BufferedImage(x_size, y_size, BufferedImage.TYPE_INT_ARGB);
Graphics2D ig2 = img.createGraphics();
ig2.setBackground(Color.WHITE);
ig2.setColor (Color.BLACK);
ig2.setStroke(new BasicStroke(3));
MyPoint prevPoint = null;
for (int i = 0; i < size; i++) {
String [] point = (String []) dataList.get (i);
long time = Long.parseLong (point[0]);
double mem = Double.parseDouble (point[1]);
MyPoint currPoint = new MyPoint (time, mem);
//System.out.println ("time:" + time + ", mem:" + mem);
if (prevPoint != null) {
ig2.drawLine ((int) (prevPoint.time - startTimeX), (int) (y_size - prevPoint.data), (int) (currPoint.time - startTimeX), (int) (y_size - currPoint.data));
}
prevPoint = currPoint;
}
File f = new File(imageFileName);
ImageIO.write(img, "PNG", f);
}
}
class MyPoint
{
public long time;
public double data;
public MyPoint (long time, double data) {
this.time = time;
this.data = data;
}
} | 8,121 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/inputtools/TestInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools;
import java.io.IOException;
import java.util.regex.PatternSyntaxException;
import org.apache.hadoop.mapred.Reporter;
import junit.framework.TestCase;
import org.apache.hadoop.chukwa.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
public class TestInputFormat extends TestCase {
String[] lines = { "the rain", "in spain", "falls mainly", "in the plain" };
public void testInputFormat() {
verifyInputFormatForSequenceFile();
verifyInputFormatIllegalRegex();
}
private void verifyInputFormatForSequenceFile() {
try {
JobConf conf = new JobConf();
String TMP_DIR = System.getProperty("test.build.data", "/tmp");
Path filename = new Path("file:///" + TMP_DIR + "/tmpSeqFile");
SequenceFile.Writer sfw = SequenceFile.createWriter(FileSystem
.getLocal(conf), conf, filename, ChukwaArchiveKey.class,
ChunkImpl.class, SequenceFile.CompressionType.NONE, Reporter.NULL);
StringBuilder buf = new StringBuilder();
int offsets[] = new int[lines.length];
for (int i = 0; i < lines.length; ++i) {
buf.append(lines[i]);
buf.append("\n");
offsets[i] = buf.length() - 1;
}
ChukwaArchiveKey key = new ChukwaArchiveKey(0, "datatype", "sname", 0);
ChunkImpl val = new ChunkImpl("datatype", "sname", 0, buf.toString()
.getBytes(), null);
val.setRecordOffsets(offsets);
sfw.append(key, val);
sfw.append(key, val); // write it twice
sfw.close();
long len = FileSystem.getLocal(conf).getFileStatus(filename).getLen();
InputSplit split = new FileSplit(filename, 0, len, (String[]) null);
ChukwaInputFormat in = new ChukwaInputFormat();
RecordReader<LongWritable, Text> r = in.getRecordReader(split, conf,
Reporter.NULL);
LongWritable l = r.createKey();
Text line = r.createValue();
for (int i = 0; i < lines.length * 2; ++i) {
boolean succeeded = r.next(l, line);
assertTrue(succeeded);
assertEquals(i, l.get());
assertEquals(lines[i % lines.length], line.toString());
System.out.println("read line: " + l.get() + " " + line);
}
boolean succeeded = r.next(l, line);
assertFalse(succeeded);
} catch (IOException e) {
e.printStackTrace();
fail("IO exception " + e);
}
}
private void verifyInputFormatIllegalRegex() {
try {
JobConf conf = new JobConf();
conf.set("chukwa.inputfilter.datatype", "(");
String TMP_DIR = System.getProperty("test.build.data", "/tmp");
Path filename = new Path("file:///" + TMP_DIR + "/tmpSeqFile");
long len = FileSystem.getLocal(conf).getFileStatus(filename).getLen();
InputSplit split = new FileSplit(filename, 0, len, (String[]) null);
ChukwaInputFormat in = new ChukwaInputFormat();
RecordReader<LongWritable, Text> r = in.getRecordReader(split, conf,
Reporter.NULL);
} catch (PatternSyntaxException e) {
e.printStackTrace();
fail("Illegal regular expression caused PatternSyntaxException: " + e);
} catch (IOException e) {
e.printStackTrace();
fail("IO exception " + e);
}
}
}
| 8,122 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/inputtools/log4j/TestChukwaDailyRollingFileAppender.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.log4j;
import java.io.File;
import java.net.URL;
import java.util.Date;
import junit.framework.Assert;
import junit.framework.TestCase;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.log4j.helpers.LogLog;
import org.apache.log4j.helpers.OptionConverter;
public class TestChukwaDailyRollingFileAppender extends TestCase {
@SuppressWarnings("deprecation")
public void testlateInit4ChukwaDailyRollingFileAppender() {
String folder = System.getProperty("test.build.data", "/tmp");
File tempDir = new File(folder);
if (!tempDir.exists()) {
tempDir.mkdirs();
}
// load new log4j
String configuratorClassName = OptionConverter.getSystemProperty(
LogManager.CONFIGURATOR_CLASS_KEY, null);
URL url = TestChukwaDailyRollingFileAppender.class
.getResource("/late-log4j.properties");
System.getProperties().setProperty("CHUKWA_TEST_LOG_LATE_INIT", folder);
if (url != null) {
LogLog
.debug("Using URL [" + url + "] for automatic log4j configuration.");
try {
OptionConverter.selectAndConfigure(url, configuratorClassName,
LogManager.getLoggerRepository());
} catch (NoClassDefFoundError e) {
LogLog.warn("Error during default initialization", e);
}
} else {
Assert.fail("URL should not be null");
}
File logFile = new File(folder + "/chukwaTestLateLogInit.log");
if (logFile.exists()) {
logFile.delete();
}
Assert.assertTrue("Log file should not be there", logFile.exists() == false);
Logger log = Logger.getLogger(ChukwaAgent.class);
try {
Thread.sleep(2000);
}catch (Exception e) {
// do nothing
}
Assert.assertTrue("Log file should not be there", logFile.exists() == false);
log.warn("test 123 " + new Date());
Assert.assertTrue("Log file should not be there", logFile.exists() == true);
logFile.delete();
}
}
| 8,123 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/inputtools/log4j/TestTaskLogAppender.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.log4j;
import java.io.File;
import java.net.URL;
import java.util.Date;
import junit.framework.Assert;
import junit.framework.TestCase;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.log4j.helpers.LogLog;
import org.apache.log4j.helpers.OptionConverter;
public class TestTaskLogAppender extends TestCase {
@SuppressWarnings("deprecation")
public void testTaskLogAppender() {
String folder = System.getProperty("test.build.classes");
File logFile = new File(folder + "/userlogs/job_200905220200_13470/attempt_200905220200_13470_r_000000_0/syslog");
if (logFile.exists()) {
logFile.delete();
}
Assert.assertTrue("Log file should not be there", logFile.exists() == false);
File tempDir = new File(folder);
if (!tempDir.exists()) {
tempDir.mkdirs();
}
// load new log4j
String configuratorClassName = OptionConverter.getSystemProperty(
LogManager.CONFIGURATOR_CLASS_KEY, null);
URL url = TestTaskLogAppender.class
.getResource("/tasklog-log4j.properties");
System.getProperties().setProperty("CHUKWA_LOG_DIR", folder);
if (url != null) {
LogLog
.debug("Using URL [" + url + "] for automatic log4j configuration.");
try {
OptionConverter.selectAndConfigure(url, configuratorClassName,
LogManager.getLoggerRepository());
} catch (NoClassDefFoundError e) {
LogLog.warn("Error during default initialization", e);
}
} else {
Assert.fail("URL should not be null");
}
Logger log = Logger.getLogger(TestTaskLogAppender.class);
try {
Thread.sleep(2000);
}catch (Exception e) {
// do nothing
}
log.warn("test 123 " + new Date());
Assert.assertTrue("Log file should exist", logFile.exists() == true);
logFile.delete();
}
}
| 8,124 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/inputtools | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/inputtools/hdfsusage/HDFSUsagePluginTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.inputtools.hdfsusage;
import java.util.Map.Entry;
import org.apache.hadoop.chukwa.inputtools.hdfsusage.HDFSUsageMetrics;
import org.apache.hadoop.chukwa.inputtools.hdfsusage.HDFSUsagePlugin;
import org.apache.hadoop.chukwa.inputtools.jplugin.ChukwaMetrics;
import org.apache.hadoop.chukwa.inputtools.jplugin.ChukwaMetricsList;
import org.apache.hadoop.chukwa.inputtools.jplugin.GenericChukwaMetricsList;
import junit.framework.TestCase;
public class HDFSUsagePluginTest extends TestCase {
public void testGetMetrics() throws Throwable {
HDFSUsagePlugin plugin = new HDFSUsagePlugin();
plugin.init(new String[0]);
ChukwaMetricsList<HDFSUsageMetrics> list = plugin.getMetrics();
System.out.println(list.getTimestamp());
for (ChukwaMetrics metrics : list.getMetricsList()) {
HDFSUsageMetrics usage = (HDFSUsageMetrics) metrics;
System.out.print(usage.getName());
System.out.println("size: " + usage.getSize());
}
System.out.println();
String xml = list.toXml();
System.out.println(xml);
GenericChukwaMetricsList gene = new GenericChukwaMetricsList(xml);
System.out.println(list.getTimestamp());
for (ChukwaMetrics metrics : gene.getMetricsList()) {
System.out.print(metrics.getKey());
for (Entry<String, String> entry : metrics.getAttributes().entrySet()) {
System.out.println(entry.getKey() + ": " + entry.getValue());
}
}
System.out.println();
}
}
| 8,125 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/archive/TestArchive.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.archive;
import java.io.IOException;
import java.util.Calendar;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.ToolRunner;
import junit.framework.TestCase;
import static org.apache.hadoop.chukwa.util.TempFileUtil.writeASinkFile;
public class TestArchive extends TestCase {
public void browseDir(FileSystem fs, Path p, int d) throws IOException {
for(int i=0; i< d; ++i) {
System.out.print(" |");
}
FileStatus stat = fs.getFileStatus(p);
if(stat.isDir()) {
System.out.println(" \\ " + p.getName());
FileStatus[] files = fs.listStatus(p);
for(FileStatus f: files) {
browseDir(fs, f.getPath(), d+1);
}
}
else
System.out.println( p.getName() );
}
static final int NUM_HADOOP_SLAVES = 1;
static final Path DATASINK = new Path("/chukwa/logs/*");
static final Path DATASINKFILE = new Path("/chukwa/logs/foobar.done");
static final Path DATASINK_NOTDONE = new Path("/chukwa/logs/foo.chukwa");
static final Path DEST_FILE = new Path("/chukwa/archive/foocluster/HadoopLogProcessor_2008_05_29.arc");
static final Path MERGED_DATASINK = new Path("/chukwa/archive/foocluster/HadoopLogProcessor_2008_05_29-0.arc");
static final Path OUTPUT_DIR = new Path("/chukwa/archive/");
static final int CHUNKCOUNT = 1000;
/**
* Writes a single chunk to a file, checks that archiver delivers it
* to an archive file with correct filename.
* @throws Exception
*/
public void testArchiving() throws Exception {
FileSystem fileSys;
MiniMRCluster mr;
JobConf jc ;
System.out.println("starting archive test");
Configuration conf = new Configuration();
conf.setInt("io.sort.mb", 1);
conf.setInt("io.sort.factor", 5);
conf.setInt("mapred.tasktracker.map.tasks.maximum", 2);
conf.setInt("mapred.tasktracker.reduce.tasks.maximum", 2);
conf.set(ChukwaArchiveDataTypeOutputFormat.GROUP_BY_CLUSTER_OPTION_NAME, "true");
System.setProperty("hadoop.log.dir", System.getProperty(
"test.build.data", "/tmp"));
MiniDFSCluster dfs = new MiniDFSCluster(conf, NUM_HADOOP_SLAVES, true,
null);
fileSys = dfs.getFileSystem();
conf.set("fs.defaultFS", fileSys.getUri().toString());
System.out.println("filesystem is " + fileSys.getUri());
mr = new MiniMRCluster(NUM_HADOOP_SLAVES, fileSys.getUri()
.toString(), 1);
jc = mr.createJobConf(new JobConf(conf));
fileSys.delete(new Path("/chukwa"), true);//nuke sink
writeASinkFile(jc, fileSys, DATASINKFILE, CHUNKCOUNT);
FileStatus fstat = fileSys.getFileStatus(DATASINKFILE);
long dataLen = fstat.getLen();
assertTrue(dataLen > CHUNKCOUNT * 50);
String[] archiveArgs = {"DataType", fileSys.getUri().toString() + DATASINK.toString(),
fileSys.getUri().toString() +OUTPUT_DIR.toString() };
assertEquals("true", jc.get("archive.groupByClusterName"));
assertEquals(1, jc.getInt("io.sort.mb", 5));
int returnVal = ToolRunner.run(jc, new ChukwaArchiveBuilder(), archiveArgs);
assertEquals(0, returnVal);
fstat = fileSys.getFileStatus(DEST_FILE);
assertEquals(dataLen, fstat.getLen());
Thread.sleep(1000);
SinkArchiver a = new SinkArchiver();
fileSys.delete(new Path("/chukwa"), true);
writeASinkFile(jc, fileSys, DATASINKFILE, CHUNKCOUNT);
writeASinkFile(jc, fileSys, DATASINK_NOTDONE, 50);
writeASinkFile(jc, fileSys, DEST_FILE, 10);
long doneLen = fileSys.getFileStatus(DATASINKFILE).getLen();
long notDoneLen = fileSys.getFileStatus(DATASINK_NOTDONE).getLen();
long archFileLen = fileSys.getFileStatus(DEST_FILE).getLen();
//we now have three files: one closed datasink, one "unfinished" datasink,
//and one archived. After merge, should have two datasink files,
//plus the "unfinished" datasink
a.exec(fileSys, jc);
browseDir(fileSys, new Path("/"), 0); //OUTPUT_DIR, 0);
//make sure we don't scramble anything
assertEquals(notDoneLen, fileSys.getFileStatus(DATASINK_NOTDONE).getLen());
assertEquals(archFileLen, fileSys.getFileStatus(DEST_FILE).getLen());
//and make sure promotion worked right
assertEquals(doneLen, fileSys.getFileStatus(MERGED_DATASINK).getLen());
mr.shutdown();
dfs.shutdown();
}
}
| 8,126 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/TestDemuxReducerConfigs.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.demux.processor.mapper.ChukwaTestOutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reducer;
import junit.framework.TestCase;
import java.io.IOException;
/**
* Tests that settings related to the Demux mapper do what they should.
*/
public class TestDemuxReducerConfigs extends TestCase {
public static String SAMPLE_RECORD_DATA = "sampleRecordData";
public void testSetDefaultReducerProcessor() throws IOException {
Reducer<ChukwaRecordKey, ChukwaRecord, ChukwaRecordKey, ChukwaRecord> reducer =
new Demux.ReduceClass();
JobConf conf = new JobConf();
conf.set("chukwa.demux.reducer.default.processor", ",org.apache.hadoop.chukwa.extraction.demux.processor.reducer" +
".MockReduceProcessor");
reducer.configure(conf);
ChukwaRecordKey key = new ChukwaRecordKey("someReduceType", "someKey");
ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord> output =
new ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord>();
reducer.reduce(key, null, output, Reporter.NULL);
assertEquals("MockReduceProcessor never invoked - no records found", 1, output.data.size());
assertNotNull("MockReduceProcessor never invoked", output.data.get(key));
assertEquals("MockReduceProcessor never invoked - key value incorrect",
"MockReduceProcessorValue",
output.data.get(key).getValue("MockReduceProcessorKey"));
}
public void testSetCustomReducerProcessor() throws IOException {
Reducer<ChukwaRecordKey, ChukwaRecord, ChukwaRecordKey, ChukwaRecord> reducer =
new Demux.ReduceClass();
JobConf conf = new JobConf();
String cus_reduceType = "someReduceType";
conf.set(cus_reduceType, ",org.apache.hadoop.chukwa.extraction.demux.processor.reducer" +
".MockReduceProcessor");
reducer.configure(conf);
ChukwaRecordKey key = new ChukwaRecordKey(cus_reduceType, "someKey");
ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord> output =
new ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord>();
reducer.reduce(key, null, output, Reporter.NULL);
assertEquals("MockReduceProcessor never invoked - no records found", 1, output.data.size());
assertNotNull("MockReduceProcessor never invoked", output.data.get(key));
assertEquals("MockReduceProcessor never invoked - key value incorrect",
"MockReduceProcessorValue",
output.data.get(key).getValue("MockReduceProcessorKey"));
}
} | 8,127 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/TestDemux.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux;
import java.io.File;
import java.io.IOException;
import java.util.Calendar;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import junit.framework.TestCase;
/**
* test the Demux job in one process, using mini-mr.
*
* Unfortunately, this test case needs more jars than the rest of chukwa,
* including hadoop-*-test, commons-cli, and jetty5
*
*
*
*/
public class TestDemux extends TestCase {
java.util.Random r = new java.util.Random();
public ChunkImpl getARandomChunk() {
int ms = r.nextInt(1000);
String line = "2008-05-29 10:42:22," + ms
+ " INFO org.apache.hadoop.dfs.DataNode: Some text goes here"
+ r.nextInt() + "\n";
ChunkImpl c = new ChunkImpl("HadoopLogProcessor", "test",
line.length() , line.getBytes(), null);
return c;
}
public void writeASinkFile(Configuration conf, FileSystem fileSys, Path dest,
int chunks) throws IOException {
FSDataOutputStream out = fileSys.create(dest);
Calendar calendar = Calendar.getInstance();
SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(conf, out,
ChukwaArchiveKey.class, ChunkImpl.class,
SequenceFile.CompressionType.NONE, null);
for (int i = 0; i < chunks; ++i) {
ChunkImpl chunk = getARandomChunk();
ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
// FIXME compute this once an hour
calendar.setTimeInMillis(System.currentTimeMillis());
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
archiveKey.setTimePartition(calendar.getTimeInMillis());
archiveKey.setDataType(chunk.getDataType());
archiveKey.setStreamName(chunk.getStreamName());
archiveKey.setSeqId(chunk.getSeqID());
seqFileWriter.append(archiveKey, chunk);
}
seqFileWriter.close();
out.close();
}
private void runDemux(JobConf job, Path sortInput, Path sortOutput)
throws Exception {
// Setup command-line arguments to 'sort'
String[] sortArgs = { sortInput.toString(), sortOutput.toString() };
// Run Sort
assertEquals(ToolRunner.run(job, new Demux(), sortArgs), 0);
}
int NUM_HADOOP_SLAVES = 1;
int LINES = 10000;
private static final Path DEMUX_INPUT_PATH = new Path("/demux/input");
private static final Path DEMUX_OUTPUT_PATH = new Path("/demux/output");
public void testDemux() {
try {
System.out.println("testing demux");
Configuration conf = new Configuration();
System.setProperty("hadoop.log.dir", System.getProperty(
"test.build.data", "/tmp"));
MiniDFSCluster dfs = new MiniDFSCluster(conf, NUM_HADOOP_SLAVES, true,
null);
FileSystem fileSys = dfs.getFileSystem();
MiniMRCluster mr = new MiniMRCluster(NUM_HADOOP_SLAVES, fileSys.getUri()
.toString(), 1);
writeASinkFile(conf, fileSys, DEMUX_INPUT_PATH, LINES);
System.out.println("wrote "
+ fileSys.getFileStatus(DEMUX_INPUT_PATH).getLen()
+ " bytes of temp test data");
long ts_start = System.currentTimeMillis();
Path inputPath = new Path(fileSys.getUri().toString()+DEMUX_INPUT_PATH);
Path outputPath = new Path(fileSys.getUri().toString()+DEMUX_OUTPUT_PATH);
runDemux(mr.createJobConf(), inputPath, outputPath);
long time = (System.currentTimeMillis() - ts_start);
long bytes = fileSys.getContentSummary(DEMUX_OUTPUT_PATH).getLength();
System.out.println("result was " + bytes + " bytes long");
System.out.println("processing took " + time + " milliseconds");
System.out.println("aka " + time * 1.0 / LINES + " ms per line or "
+ time * 1000.0 / bytes + " ms per kilobyte of log data");
mr.shutdown();
dfs.shutdown();
String testBuildDir = System.getProperty("test.build.data", "/tmp");
String dfsPath = testBuildDir + "/dfs";
FileUtils.deleteDirectory(new File(dfsPath));
System.out.println(dfsPath);
} catch (Exception e) {
e.printStackTrace();
}
}
}
| 8,128 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/MockTriggerAction.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux;
import org.apache.hadoop.chukwa.datatrigger.TriggerAction;
import org.apache.hadoop.chukwa.datatrigger.TriggerEvent;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
import java.util.Collection;
import java.util.ArrayList;
public class MockTriggerAction implements TriggerAction {
private static Collection<TriggerEvent> triggerEvents = new ArrayList<TriggerEvent>();
public void execute(Configuration conf, FileSystem fs,
FileStatus[] src, TriggerEvent event) throws IOException {
triggerEvents.add(event);
}
public static void reset() {
triggerEvents = new ArrayList<TriggerEvent>();
}
public static Collection<TriggerEvent> getTriggerEvents() {
return triggerEvents;
}
}
| 8,129 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/TestDemuxMapperConfigs.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.ChunkBuilder;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.demux.processor.mapper.ChukwaTestOutputCollector;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.JobConf;
import junit.framework.TestCase;
import java.io.IOException;
/**
* Tests that settings related to the Demux mapper do what they should.
*/
public class TestDemuxMapperConfigs extends TestCase {
public static String SAMPLE_RECORD_DATA = "sampleRecordData";
public void testSetDefaultMapProcessor() throws IOException {
Mapper<ChukwaArchiveKey, ChunkImpl, ChukwaRecordKey, ChukwaRecord> mapper =
new Demux.MapClass();
JobConf conf = new JobConf();
conf.set("chukwa.demux.mapper.default.processor",
"org.apache.hadoop.chukwa.extraction.demux.processor.mapper.MockMapProcessor,");
mapper.configure(conf);
ChunkBuilder cb = new ChunkBuilder();
cb.addRecord(SAMPLE_RECORD_DATA.getBytes());
ChunkImpl chunk = (ChunkImpl)cb.getChunk();
ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord> output =
new ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord>();
mapper.map(new ChukwaArchiveKey(), chunk, output, Reporter.NULL);
ChukwaRecordKey recordKey = new ChukwaRecordKey("someReduceType", SAMPLE_RECORD_DATA);
assertEquals("MockMapProcessor never invoked - no records found", 1, output.data.size());
assertNotNull("MockMapProcessor never invoked", output.data.get(recordKey));
}
public void testSetCustomeMapProcessor() throws IOException {
Mapper<ChukwaArchiveKey, ChunkImpl, ChukwaRecordKey, ChukwaRecord> mapper =
new Demux.MapClass();
String custom_DataType = "cus_dt";
JobConf conf = new JobConf();
conf.set(custom_DataType,
"org.apache.hadoop.chukwa.extraction.demux.processor.mapper.MockMapProcessor,");
mapper.configure(conf);
ChunkBuilder cb = new ChunkBuilder();
cb.addRecord(SAMPLE_RECORD_DATA.getBytes());
ChunkImpl chunk = (ChunkImpl)cb.getChunk();
chunk.setDataType(custom_DataType);
ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord> output =
new ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord>();
mapper.map(new ChukwaArchiveKey(), chunk, output, Reporter.NULL);
ChukwaRecordKey recordKey = new ChukwaRecordKey("someReduceType", SAMPLE_RECORD_DATA);
assertEquals("MockMapProcessor never invoked - no records found", 1, output.data.size());
assertNotNull("MockMapProcessor never invoked", output.data.get(recordKey));
}
}
| 8,130 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/TestDemuxManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import junit.framework.Assert;
import junit.framework.TestCase;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT;
public class TestDemuxManager extends TestCase {
/**
* Standard workflow
*/
public void testScenario1() {
ChukwaConfiguration cc = new ChukwaConfiguration();
String tempDirectory = System.getProperty("test.build.data", "/tmp");
String chukwaRootDir = tempDirectory + "/demuxManagerTest_" + System.currentTimeMillis() +"/";
cc.set(CHUKWA_CONSTANT.HDFS_DEFAULT_NAME_FIELD, "file:///");
cc.set(CHUKWA_CONSTANT.CHUKWA_ROOT_DIR_FIELD, chukwaRootDir );
cc.set(CHUKWA_CONSTANT.CHUKWA_ARCHIVE_DIR_FIELD, chukwaRootDir +"/archives/" );
cc.set(CHUKWA_CONSTANT.CHUKWA_POST_PROCESS_DIR_FIELD, chukwaRootDir +"/postProcess" );
cc.set(CHUKWA_CONSTANT.CHUKWA_DATA_SINK_DIR_FIELD, chukwaRootDir +"/logs" );
try {
File dataSinkDirectory = new File(chukwaRootDir +"/logs");
dataSinkDirectory.mkdirs();
File dataSinkFile = new File(chukwaRootDir +"/logs"+ "/dataSink1.done");
dataSinkFile.createNewFile();
DemuxManagerScenario dm = new DemuxManagerScenario1(cc,0);
dm.start();
List<String> requireActions = new ArrayList<String>();
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("checkDemuxInputDir:false");
requireActions.add("moveDataSinkFilesToDemuxInputDirectory:true");
requireActions.add("runDemux:true");
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("moveDataSinkFilesToArchiveDirectory:true");
requireActions.add("processData done");
List<String> actions = dm.actions;
Assert.assertTrue(requireActions.size() == actions.size());
for(int i=0;i<requireActions.size();i++) {
Assert.assertTrue( requireActions.get(i) + " == " +actions.get(i),requireActions.get(i).intern() == actions.get(i).intern());
}
} catch (Exception e) {
e.printStackTrace();
Assert.fail();
}
finally {
deleteDirectory(new File(chukwaRootDir));
}
}
/**
* No dataSink file at startup
* Add one later
*/
public void testScenario2() {
ChukwaConfiguration cc = new ChukwaConfiguration();
String tempDirectory = System.getProperty("test.build.data", "/tmp");
String chukwaRootDir = tempDirectory + "/demuxManagerTest_" + System.currentTimeMillis() +"/";
cc.set(CHUKWA_CONSTANT.HDFS_DEFAULT_NAME_FIELD, "file:///");
cc.set(CHUKWA_CONSTANT.CHUKWA_ROOT_DIR_FIELD, chukwaRootDir );
cc.set(CHUKWA_CONSTANT.CHUKWA_ARCHIVE_DIR_FIELD, chukwaRootDir +"/archives/" );
cc.set(CHUKWA_CONSTANT.CHUKWA_POST_PROCESS_DIR_FIELD, chukwaRootDir +"/postProcess" );
cc.set(CHUKWA_CONSTANT.CHUKWA_DATA_SINK_DIR_FIELD, chukwaRootDir +"/logs" );
try {
DemuxManagerScenario dm = new DemuxManagerScenario2(cc);
dm.start();
List<String> requireActions = new ArrayList<String>();
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("checkDemuxInputDir:false");
requireActions.add("moveDataSinkFilesToDemuxInputDirectory:false");
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("checkDemuxInputDir:false");
requireActions.add("moveDataSinkFilesToDemuxInputDirectory:true");
requireActions.add("runDemux:true");
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("moveDataSinkFilesToArchiveDirectory:true");
requireActions.add("processData done");
List<String> actions = dm.actions;
Assert.assertTrue(requireActions.size() == actions.size());
for(int i=0;i<requireActions.size();i++) {
Assert.assertTrue( requireActions.get(i) + " == " +actions.get(i),requireActions.get(i).intern() == actions.get(i).intern());
}
} catch (Exception e) {
e.printStackTrace();
Assert.fail();
}
finally {
deleteDirectory(new File(chukwaRootDir));
}
}
/**
* DataSink file in dataSink directory
* MR_INPUT_DIR already there
* MR_INPUT_DIR should be reprocessed first
* Then dataSink file
*/
public void testScenario3() {
ChukwaConfiguration cc = new ChukwaConfiguration();
String tempDirectory = System.getProperty("test.build.data", "/tmp");
String chukwaRootDir = tempDirectory + "/demuxManagerTest_" + System.currentTimeMillis() +"/";
cc.set(CHUKWA_CONSTANT.HDFS_DEFAULT_NAME_FIELD, "file:///");
cc.set(CHUKWA_CONSTANT.CHUKWA_ROOT_DIR_FIELD, chukwaRootDir );
cc.set(CHUKWA_CONSTANT.CHUKWA_ARCHIVE_DIR_FIELD, chukwaRootDir +"/archives/" );
cc.set(CHUKWA_CONSTANT.CHUKWA_POST_PROCESS_DIR_FIELD, chukwaRootDir +"/postProcess" );
cc.set(CHUKWA_CONSTANT.CHUKWA_DATA_SINK_DIR_FIELD, chukwaRootDir +"/logs" );
try {
File mrInputDir = new File(chukwaRootDir + CHUKWA_CONSTANT.DEFAULT_DEMUX_PROCESSING_DIR_NAME+ CHUKWA_CONSTANT.DEFAULT_DEMUX_MR_INPUT_DIR_NAME);
mrInputDir.mkdirs();
File dataSinkDirectory = new File(chukwaRootDir +"/logs");
dataSinkDirectory.mkdirs();
File dataSinkFile = new File(chukwaRootDir +"/logs"+ "/dataSink3.done");
dataSinkFile.createNewFile();
DemuxManagerScenario dm = new DemuxManagerScenario1(cc,2);
dm.start();
List<String> requireActions = new ArrayList<String>();
// DEMUX_INPUT_DIR reprocessing
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("checkDemuxInputDir:true");
requireActions.add("runDemux:true");
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("moveDataSinkFilesToArchiveDirectory:true");
requireActions.add("processData done");
// dataSink3.done processing
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("checkDemuxInputDir:false");
requireActions.add("moveDataSinkFilesToDemuxInputDirectory:true");
requireActions.add("runDemux:true");
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("moveDataSinkFilesToArchiveDirectory:true");
requireActions.add("processData done");
List<String> actions = dm.actions;
Assert.assertTrue(requireActions.size() == actions.size());
for (int i=0;i<requireActions.size();i++) {
Assert.assertTrue( requireActions.get(i) + " == " +actions.get(i),requireActions.get(i).intern() == actions.get(i).intern());
}
} catch (Exception e) {
e.printStackTrace();
Assert.fail();
}
finally {
deleteDirectory(new File(chukwaRootDir));
}
}
/**
* DataSink file in dataSink directory
* MR_INPUT_DIR already there
* MR_OUTPUT_DIR already there
* MR_OUTPUT_DIR should be deleted first
* Then MR_INPUT_DIR should be reprocessed
* Then dataSink file
*/
public void testScenario4() {
ChukwaConfiguration cc = new ChukwaConfiguration();
String tempDirectory = System.getProperty("test.build.data", "/tmp");
String chukwaRootDir = tempDirectory + "/demuxManagerTest_" + System.currentTimeMillis() +"/";
cc.set(CHUKWA_CONSTANT.HDFS_DEFAULT_NAME_FIELD, "file:///");
cc.set(CHUKWA_CONSTANT.CHUKWA_ROOT_DIR_FIELD, chukwaRootDir );
cc.set(CHUKWA_CONSTANT.CHUKWA_ARCHIVE_DIR_FIELD, chukwaRootDir +"/archives/" );
cc.set(CHUKWA_CONSTANT.CHUKWA_POST_PROCESS_DIR_FIELD, chukwaRootDir +"/postProcess" );
cc.set(CHUKWA_CONSTANT.CHUKWA_DATA_SINK_DIR_FIELD, chukwaRootDir +"/logs" );
try {
File mrInputDir = new File(chukwaRootDir + CHUKWA_CONSTANT.DEFAULT_DEMUX_PROCESSING_DIR_NAME+ CHUKWA_CONSTANT.DEFAULT_DEMUX_MR_INPUT_DIR_NAME);
mrInputDir.mkdirs();
File mrOutputDir = new File(chukwaRootDir + CHUKWA_CONSTANT.DEFAULT_DEMUX_PROCESSING_DIR_NAME+ CHUKWA_CONSTANT.DEFAULT_DEMUX_MR_OUTPUT_DIR_NAME);
mrOutputDir.mkdirs();
File dataSinkDirectory = new File(chukwaRootDir +"/logs");
dataSinkDirectory.mkdirs();
File dataSinkFile = new File(chukwaRootDir +"/logs"+ "/dataSink4.done");
dataSinkFile.createNewFile();
DemuxManagerScenario dm = new DemuxManagerScenario1(cc,2);
dm.start();
List<String> requireActions = new ArrayList<String>();
requireActions.add("checkDemuxOutputDir:true");
requireActions.add("deleteDemuxOutputDir:true");
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("checkDemuxInputDir:true");
requireActions.add("runDemux:true");
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("moveDataSinkFilesToArchiveDirectory:true");
requireActions.add("processData done");
// dataSink4.done processing
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("checkDemuxInputDir:false");
requireActions.add("moveDataSinkFilesToDemuxInputDirectory:true");
requireActions.add("runDemux:true");
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("moveDataSinkFilesToArchiveDirectory:true");
requireActions.add("processData done");
List<String> actions = dm.actions;
Assert.assertTrue(requireActions.size() == actions.size());
for(int i=0;i<requireActions.size();i++) {
Assert.assertTrue( requireActions.get(i) + " == " +actions.get(i),requireActions.get(i).intern() == actions.get(i).intern());
}
} catch (Exception e) {
e.printStackTrace();
Assert.fail();
}
finally {
deleteDirectory(new File(chukwaRootDir));
}
}
/**
* DataSink file in dataSinkDir
* Demux fails 3 times
* Add a new DataSink file to dataSinkDir
* Demux succeed
*/
public void testScenario5() {
ChukwaConfiguration cc = new ChukwaConfiguration();
String tempDirectory = System.getProperty("test.build.data", "/tmp");
String chukwaRootDir = tempDirectory + "/demuxManagerTest_" + System.currentTimeMillis() +"/";
cc.set(CHUKWA_CONSTANT.HDFS_DEFAULT_NAME_FIELD, "file:///");
cc.set(CHUKWA_CONSTANT.CHUKWA_ROOT_DIR_FIELD, chukwaRootDir );
cc.set(CHUKWA_CONSTANT.CHUKWA_ARCHIVE_DIR_FIELD, chukwaRootDir +"/archives/" );
cc.set(CHUKWA_CONSTANT.CHUKWA_POST_PROCESS_DIR_FIELD, chukwaRootDir +"/postProcess" );
cc.set(CHUKWA_CONSTANT.CHUKWA_DATA_SINK_DIR_FIELD, chukwaRootDir +"/logs" );
try {
File dataSinkDirectory = new File(chukwaRootDir +"/logs");
dataSinkDirectory.mkdirs();
File dataSinkFile = new File(chukwaRootDir +"/logs"+ "/dataSink5-0.done");
dataSinkFile.createNewFile();
DemuxManagerScenario dm = new DemuxManagerScenario5(cc);
dm.start();
List<String> requireActions = new ArrayList<String>();
// Move dataSink & process
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("checkDemuxInputDir:false");
requireActions.add("moveDataSinkFilesToDemuxInputDirectory:true");
requireActions.add("runDemux:false");
requireActions.add("processData done");
// Reprocess 1
requireActions.add("checkDemuxOutputDir:true");
requireActions.add("deleteDemuxOutputDir:true");
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("checkDemuxInputDir:true");
requireActions.add("runDemux:false");
requireActions.add("processData done");
// Reprocess 2
requireActions.add("checkDemuxOutputDir:true");
requireActions.add("deleteDemuxOutputDir:true");
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("checkDemuxInputDir:true");
requireActions.add("runDemux:false");
requireActions.add("processData done");
// Reprocess3
requireActions.add("checkDemuxOutputDir:true");
requireActions.add("deleteDemuxOutputDir:true");
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("checkDemuxInputDir:true");
requireActions.add("runDemux:false");
requireActions.add("processData done");
requireActions.add("checkDemuxOutputDir:true");
requireActions.add("deleteDemuxOutputDir:true");
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("checkDemuxInputDir:true");
requireActions.add("moveDataSinkFilesToDemuxErrorDirectory:true");
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("checkDemuxInputDir:false");
requireActions.add("moveDataSinkFilesToDemuxInputDirectory:true");
requireActions.add("runDemux:true");
requireActions.add("checkDemuxOutputDir:true");
requireActions.add("moveDemuxOutputDirToPostProcessDirectory:true");
requireActions.add("moveDataSinkFilesToArchiveDirectory:true");
requireActions.add("processData done");
List<String> actions = dm.actions;
Assert.assertTrue(requireActions.size() == actions.size());
for(int i=0;i<requireActions.size();i++) {
Assert.assertTrue( i + " - " + requireActions.get(i) + " == " +actions.get(i),requireActions.get(i).intern() == actions.get(i).intern());
}
} catch (Exception e) {
e.printStackTrace();
Assert.fail();
}
finally {
deleteDirectory(new File(chukwaRootDir));
}
}
/**
* Standard workflow with MR_OUTPUT
*/
public void testScenario6() {
ChukwaConfiguration cc = new ChukwaConfiguration();
String tempDirectory = System.getProperty("test.build.data", "/tmp");
String chukwaRootDir = tempDirectory + "/demuxManagerTest_" + System.currentTimeMillis() +"/";
cc.set(CHUKWA_CONSTANT.WRITER_HDFS_FILESYSTEM_FIELD, "file:///");
cc.set(CHUKWA_CONSTANT.CHUKWA_ROOT_DIR_FIELD, chukwaRootDir );
cc.set(CHUKWA_CONSTANT.CHUKWA_ARCHIVE_DIR_FIELD, chukwaRootDir +"/archives/" );
cc.set(CHUKWA_CONSTANT.CHUKWA_POST_PROCESS_DIR_FIELD, chukwaRootDir +"/postProcess" );
cc.set(CHUKWA_CONSTANT.CHUKWA_DATA_SINK_DIR_FIELD, chukwaRootDir +"/logs" );
try {
File dataSinkDirectory = new File(chukwaRootDir +"/logs");
dataSinkDirectory.mkdirs();
File dataSinkFile = new File(chukwaRootDir +"/logs"+ "/dataSink6.done");
dataSinkFile.createNewFile();
DemuxManagerScenario dm = new DemuxManagerScenario6(cc,3);
dm.start();
List<String> requireActions = new ArrayList<String>();
for (int i=0;i<3;i++) {
requireActions.add("checkDemuxOutputDir:false");
requireActions.add("checkDemuxInputDir:false");
requireActions.add("moveDataSinkFilesToDemuxInputDirectory:true");
requireActions.add("runDemux:true");
requireActions.add("checkDemuxOutputDir:true");
requireActions.add("moveDemuxOutputDirToPostProcessDirectory:true");
requireActions.add("moveDataSinkFilesToArchiveDirectory:true");
requireActions.add("processData done");
}
List<String> actions = dm.actions;
Assert.assertTrue(requireActions.size() == actions.size());
for(int i=0;i<requireActions.size();i++) {
Assert.assertTrue( requireActions.get(i) + " == " +actions.get(i),requireActions.get(i).intern() == actions.get(i).intern());
}
} catch (Exception e) {
e.printStackTrace();
Assert.fail();
}
finally {
deleteDirectory(new File(chukwaRootDir));
}
}
static public boolean deleteDirectory(File path) {
if( path.exists() ) {
File[] files = path.listFiles();
for(int i=0; i<files.length; i++) {
if(files[i].isDirectory()) {
deleteDirectory(files[i]);
}
else {
files[i].delete();
}
}
}
return( path.delete() );
}
/////////////////////////\
//// HELPER CLASSES ///// \
/////////////////////////____\
private static class DemuxManagerScenario6 extends DemuxManagerScenario {
int count = 0;
public DemuxManagerScenario6(ChukwaConfiguration conf, int count) throws Exception {
super(conf);
this.count = count;
}
@Override
public boolean runDemux(String demuxInputDir, String demuxOutputDir) {
boolean res = super.runDemux(demuxInputDir, demuxOutputDir);
try {
// Create DEMUX_OUTPOUT
String chukwaRootDir = conf.get(CHUKWA_ROOT_DIR_FIELD);
File mrOutputDir = new File(chukwaRootDir + DEFAULT_DEMUX_PROCESSING_DIR_NAME+ DEFAULT_DEMUX_MR_OUTPUT_DIR_NAME);
mrOutputDir.mkdirs();
mrOutputDir.deleteOnExit();
// ADD DATASINK FILE
File dataSinkDirectory = new File(chukwaRootDir +"logs");
dataSinkDirectory.mkdirs();
File dataSinkFile = new File(chukwaRootDir +"logs"+ "/dataSink6-" + count + ".done");
dataSinkFile.createNewFile();
}catch(Exception e) {
throw new RuntimeException(e);
}
count --;
if (count <= 0) {
this.isRunning = false;
}
return res;
}
}
private static class DemuxManagerScenario5 extends DemuxManagerScenario {
public DemuxManagerScenario5(ChukwaConfiguration conf) throws Exception {
super(conf);
}
boolean errorDone = false;
public boolean moveDataSinkFilesToDemuxErrorDirectory(String dataSinkDir,
String demuxErrorDir) throws IOException {
boolean res = super.moveDataSinkFilesToDemuxErrorDirectory(dataSinkDir, demuxErrorDir);
String chukwaRootDir = conf.get(CHUKWA_ROOT_DIR_FIELD);
File dataSinkDirectory = new File(chukwaRootDir +"logs");
dataSinkDirectory.mkdirs();
dataSinkDirectory.deleteOnExit();
File dataSinkFile = new File(chukwaRootDir +"logs"+ "/dataSink5-1.done");
dataSinkFile.createNewFile();
dataSinkFile.deleteOnExit();
errorDone = true;
return res;
}
int counter = 0;
@Override
public boolean runDemux(String demuxInputDir, String demuxOutputDir) {
if (errorDone && counter >= 4) {
this.isRunning = false;
}
// Create DEMUX_OUTPOUT
String chukwaRootDir = conf.get(CHUKWA_ROOT_DIR_FIELD);
File mrOutputDir = new File(chukwaRootDir + DEFAULT_DEMUX_PROCESSING_DIR_NAME+ DEFAULT_DEMUX_MR_OUTPUT_DIR_NAME);
mrOutputDir.mkdirs();
mrOutputDir.deleteOnExit();
counter ++;
this.actions.add("runDemux:" + errorDone);
return errorDone;
}
}
private static class DemuxManagerScenario2 extends DemuxManagerScenario {
public DemuxManagerScenario2(ChukwaConfiguration conf) throws Exception {
super(conf);
}
int counter = 0;
@Override
public boolean moveDataSinkFilesToDemuxInputDirectory(String dataSinkDir,
String demuxInputDir) throws IOException {
if (counter == 1) {
String chukwaRootDir = conf.get(CHUKWA_ROOT_DIR_FIELD);
File dataSinkDirectory = new File(chukwaRootDir +"logs");
dataSinkDirectory.mkdirs();
dataSinkDirectory.deleteOnExit();
File dataSinkFile = new File(chukwaRootDir +"logs"+ "/dataSink2.done");
dataSinkFile.createNewFile();
dataSinkFile.deleteOnExit();
}
counter ++;
return super.moveDataSinkFilesToDemuxInputDirectory(dataSinkDir, demuxInputDir);
}
@Override
public boolean runDemux(String demuxInputDir, String demuxOutputDir) {
boolean res = super.runDemux(demuxInputDir, demuxOutputDir);
if (counter > 1) {
this.isRunning = false;
}
return res;
}
}
private static class DemuxManagerScenario1 extends DemuxManagerScenario {
int count = 0;
public DemuxManagerScenario1(ChukwaConfiguration conf, int count) throws Exception {
super(conf);
this.count = count;
}
@Override
public boolean runDemux(String demuxInputDir, String demuxOutputDir) {
boolean res = super.runDemux(demuxInputDir, demuxOutputDir);
count --;
if (count <= 0) {
this.isRunning = false;
}
return res;
}
}
private static class DemuxManagerScenario extends DemuxManager {
public List<String>actions = new ArrayList<String>();
public DemuxManagerScenario(ChukwaConfiguration conf) throws Exception {
super(conf);
NO_DATASINK_SLEEP_TIME = 5;
}
@Override
public boolean checkDemuxInputDir(String demuxInputDir) throws IOException {
boolean res = super.checkDemuxInputDir(demuxInputDir);
this.actions.add("checkDemuxInputDir:" + res);
return res;
}
@Override
public boolean checkDemuxOutputDir(String demuxOutputDir)
throws IOException {
boolean res = super.checkDemuxOutputDir(demuxOutputDir);
this.actions.add("checkDemuxOutputDir:" + res);
return res;
}
@Override
public boolean moveDataSinkFilesToArchiveDirectory(String demuxInputDir,
String archiveDirectory) throws IOException {
boolean res = super.moveDataSinkFilesToArchiveDirectory(demuxInputDir,
archiveDirectory);
this.actions.add("moveDataSinkFilesToArchiveDirectory:" + res);
return res;
}
@Override
public boolean moveDataSinkFilesToDemuxErrorDirectory(String dataSinkDir,
String demuxErrorDir) throws IOException {
boolean res = super.moveDataSinkFilesToDemuxErrorDirectory(dataSinkDir, demuxErrorDir);
this.actions.add("moveDataSinkFilesToDemuxErrorDirectory:" + res);
return res;
}
@Override
public boolean moveDataSinkFilesToDemuxInputDirectory(String dataSinkDir,
String demuxInputDir) throws IOException {
boolean res = super.moveDataSinkFilesToDemuxInputDirectory(dataSinkDir, demuxInputDir);
this.actions.add("moveDataSinkFilesToDemuxInputDirectory:" + res);
return res;
}
@Override
public boolean moveDemuxOutputDirToPostProcessDirectory(
String demuxOutputDir, String postProcessDirectory) throws IOException {
boolean res = super.moveDemuxOutputDirToPostProcessDirectory(demuxOutputDir,
postProcessDirectory);
this.actions.add("moveDemuxOutputDirToPostProcessDirectory:" + res);
return res;
}
@Override
public boolean processData(String dataSinkDir, String demuxInputDir,
String demuxOutputDir, String postProcessDir, String archiveDir)
throws IOException {
boolean res = super.processData(dataSinkDir, demuxInputDir, demuxOutputDir, postProcessDir,
archiveDir);
this.actions.add("processData done");
return res;
}
@Override
public boolean runDemux(String demuxInputDir, String demuxOutputDir) {
boolean res = true;
this.actions.add("runDemux:" + res);
return res;
}
@Override
public boolean deleteDemuxOutputDir(String demuxOutputDir)
throws IOException {
boolean res = super.deleteDemuxOutputDir(demuxOutputDir);
this.actions.add("deleteDemuxOutputDir:" + res);
return res;
}
}
}
| 8,131 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/TestPostDemuxTrigger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux;
import junit.framework.TestCase;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT;
import org.apache.hadoop.chukwa.datatrigger.TriggerEvent;
import org.apache.hadoop.fs.Path;
import java.util.Iterator;
public class TestPostDemuxTrigger extends TestCase {
static final Path[] SAMPLE_PATHS = new Path[] { new Path("/") };
protected void setUp() throws Exception {
MockTriggerAction.reset();
}
public void testSuccessTrigger() throws Exception {
ChukwaConfiguration conf = new ChukwaConfiguration();
conf.set(CHUKWA_CONSTANT.POST_DEMUX_SUCCESS_ACTION,
"org.apache.hadoop.chukwa.extraction.demux.MockTriggerAction");
PostProcessorManager postProcessManager = new PostProcessorManager(conf);
assertTrue("processPostMoveTriggers returned false",
postProcessManager.processPostMoveTriggers(SAMPLE_PATHS));
assertEquals("Trigger never invoked", SAMPLE_PATHS.length,
MockTriggerAction.getTriggerEvents().size());
Iterator events = MockTriggerAction.getTriggerEvents().iterator();
assertEquals("Incorrect Trigger event found", TriggerEvent.POST_DEMUX_SUCCESS,
events.next());
}
public void testMultiSuccessTrigger() throws Exception {
ChukwaConfiguration conf = new ChukwaConfiguration();
conf.set(CHUKWA_CONSTANT.POST_DEMUX_SUCCESS_ACTION,
"org.apache.hadoop.chukwa.extraction.demux.MockTriggerAction," +
"org.apache.hadoop.chukwa.extraction.demux.MockTriggerAction");
PostProcessorManager postProcessManager = new PostProcessorManager(conf);
assertTrue("processPostMoveTriggers returned false",
postProcessManager.processPostMoveTriggers(SAMPLE_PATHS));
assertEquals("Trigger never invoked", 2 * SAMPLE_PATHS.length,
MockTriggerAction.getTriggerEvents().size());
Iterator events = MockTriggerAction.getTriggerEvents().iterator();
assertEquals("Incorrect Trigger event found", TriggerEvent.POST_DEMUX_SUCCESS,
events.next());
assertEquals("Incorrect Trigger event found", TriggerEvent.POST_DEMUX_SUCCESS,
events.next());
}
}
| 8,132 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/TestTsProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import junit.framework.TestCase;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.ChunkBuilder;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.demux.Demux;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.JobConf;
import java.util.Map;
import java.util.Date;
import java.util.Calendar;
import java.text.SimpleDateFormat;
public class TestTsProcessor extends TestCase {
private static String DATA_TYPE = "testDataType";
private static String DATA_SOURCE = "testDataSource";
JobConf jobConf = null;
Date date = null;
Date dateWithoutMillis = null;
protected void setUp() throws Exception {
jobConf = new JobConf();
Demux.jobConf = jobConf;
date = new Date();
//if our format doesn't contain millis, then our final record date won't
//have them either. let's create a sample date without millis for those tests
//so our assertions will pass
Calendar calendar = Calendar.getInstance();
calendar.setTime(date);
calendar.set(Calendar.MILLISECOND, 0);
dateWithoutMillis = calendar.getTime();
}
public void testDefaultFormat() {
String record = buildSampleSimpleRecord(date, "yyyy-MM-dd HH:mm:ss,SSS");
doTest(date, record);
}
public void testCustomDefaultFormat() {
jobConf.set("TsProcessor.default.time.format", "yyyy--MM--dd HH::mm::ss SSS");
String record = buildSampleSimpleRecord(date, "yyyy--MM--dd HH::mm::ss SSS");
doTest(date, record);
}
public void testCustomDefaultFormat2() {
// this date format produces a date that longer than the format, since z
// expands to something like PDT
jobConf.set("TsProcessor.default.time.format", "yyyy--MM--dd HH::mm::ss SSS,z");
String record = buildSampleSimpleRecord(date, "yyyy--MM--dd HH::mm::ss SSS,z");
doTest(date, record);
}
public void testCustomDataTypeFormat() {
jobConf.set("TsProcessor.time.format." + DATA_TYPE, "yyyy--MM--dd HH::mm::ss SSS");
String record = buildSampleSimpleRecord(date, "yyyy--MM--dd HH::mm::ss SSS");
doTest(date, record);
}
public void testCustomDefaultFormatWithCustomDataTypeFormat() {
jobConf.set("TsProcessor.default.time.format", "yyyy/MM/dd HH:mm:ss SSS");
jobConf.set("TsProcessor.time.format." + DATA_TYPE, "yyyy--MM--dd HH::mm::ss SSS");
String record = buildSampleSimpleRecord(date, "yyyy--MM--dd HH::mm::ss SSS");
doTest(date, record);
}
public void testCustomApacheDefaultFormat() {
jobConf.set("TsProcessor.default.time.format", "dd/MMM/yyyy:HH:mm:ss Z");
jobConf.set("TsProcessor.default.time.regex",
"^(?:[\\d.]+) \\[(\\d{2}/\\w{3}/\\d{4}:\\d{2}:\\d{2}:\\d{2} [-+]\\d{4})\\] .*");
String record = buildSampleApacheRecord(dateWithoutMillis, "dd/MMM/yyyy:HH:mm:ss Z");
doTest(dateWithoutMillis, record);
}
public void testCustomApacheDataTypeFormat() {
jobConf.set("TsProcessor.time.format." + DATA_TYPE, "dd/MMM/yyyy:HH:mm:ss Z");
jobConf.set("TsProcessor.time.regex." + DATA_TYPE,
"^(?:[\\d.]+) \\[(\\d{2}/\\w{3}/\\d{4}:\\d{2}:\\d{2}:\\d{2} [-+]\\d{4})\\] .*");
String record = buildSampleApacheRecord(dateWithoutMillis, "dd/MMM/yyyy:HH:mm:ss Z");
doTest(dateWithoutMillis, record);
}
private static String buildSampleSimpleRecord(Date date, String dateFormat) {
SimpleDateFormat sdf = new SimpleDateFormat(dateFormat);
return "" + sdf.format(date) + " some sample record data";
}
private static String buildSampleApacheRecord(Date date, String dateFormat) {
SimpleDateFormat sdf = new SimpleDateFormat(dateFormat);
return "10.10.182.49 [" + sdf.format(date) +
"] \"\" 200 \"-\" \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3\" \"some.site.com:8076\"";
}
public void doTest(Date date, String recordData) {
ChunkBuilder cb = new ChunkBuilder();
cb.addRecord(recordData.getBytes());
Chunk chunk = cb.getChunk();
chunk.setDataType(DATA_TYPE);
chunk.setSource(DATA_SOURCE);
ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord> output =
new ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord>();
TsProcessor p = new TsProcessor();
p.reset(chunk);
p.process(null, chunk, output, Reporter.NULL);
ChukwaRecordKey key = buildKey(date, DATA_SOURCE, DATA_TYPE);
Map<ChukwaRecordKey, ChukwaRecord> outputData = output.data;
assertNotNull("No output data found.", outputData);
assertEquals("Output data size not correct.", 1, outputData.size());
ChukwaRecord record = outputData.get(key);
assertNotNull("Output record not found.", record);
assertEquals("Output record time not correct.", date.getTime(), record.getTime());
assertEquals("Output record body not correct.", recordData,
new String(record.getMapFields().get("body").get()));
}
private static ChukwaRecordKey buildKey(Date date, String dataSource, String dataType) {
Calendar calendar = Calendar.getInstance();
calendar.setTime(date);
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
ChukwaRecordKey key = new ChukwaRecordKey();
key.setKey("" + calendar.getTimeInMillis() + "/" + dataSource + "/" + date.getTime());
key.setReduceType(dataType);
return key;
}
public void testParseIllegalRegex() {
jobConf.set(TsProcessor.DEFAULT_TIME_REGEX, "(");
ChunkBuilder cb = new ChunkBuilder();
cb.addRecord("2012-10-25 00:18:44,818 some sample record data".getBytes());
Chunk chunk = cb.getChunk();
TsProcessor p = new TsProcessor();
p.reset(chunk);
ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord> output =
new ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord>();
p.process(null, chunk, output, Reporter.NULL);
assertEquals("Output data size not correct.", 1, output.data.size());
ChukwaRecordKey key = output.data.keySet().iterator().next();
ChukwaRecord record = output.data.get(key);
assertNull("Output should not be error.", record.getValue("cchunkData"));
}
} | 8,133 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/TestJsonProcessors.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map.Entry;
import java.net.InetAddress;
import java.net.UnknownHostException;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.engine.Record;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import junit.framework.TestCase;
public class TestJsonProcessors extends TestCase {
/**
* Process the chunk with the passed Processor and compare with the input
* JSONObject
*
* @param p
* @param inData
* @param chunk
* @return
*/
private String testProcessor(AbstractProcessor p, JSONObject inData,
Chunk chunk) {
ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord> output = new ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord>();
p.process(new ChukwaArchiveKey(), chunk, output, null);
HashMap<ChukwaRecordKey, ChukwaRecord> outData = output.data;
// First get all ChukwaRecords and then get all field-data pairs within
// each record
Iterator<Entry<ChukwaRecordKey, ChukwaRecord>> recordIter = outData
.entrySet().iterator();
while (recordIter.hasNext()) {
Entry<ChukwaRecordKey, ChukwaRecord> recordEntry = recordIter
.next();
ChukwaRecord value = recordEntry.getValue();
String[] fields = value.getFields();
for (String field : fields) {
//ignore ctags, capps, csource
if (field.equals(Record.tagsField)
|| field.equals(Record.applicationField)
|| field.equals(Record.sourceField)) {
continue;
}
String data = value.getValue(field);
String expected = String.valueOf(inData.get(field));
/*System.out.println("Metric, expected data, received data- " +
field + ", " + expected + ", " +data);
*/
if (!expected.equals(data)) {
StringBuilder sb = new StringBuilder(
"Failed to verify metric - ");
sb.append("field:").append(field);
sb.append(", expected:").append(expected);
sb.append(", but received:").append(data);
return sb.toString();
}
}
}
return null;
}
@SuppressWarnings("unchecked")
private JSONObject getJSONObject(){
String csource = "localhost";
try {
csource = InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
csource = "localhost";
}
JSONObject json = new JSONObject();
json.put("capp", "Test");
json.put("csource", csource);
return json;
}
@SuppressWarnings("unchecked")
public void testJobTrackerProcessor() {
// test metric for each record type
JSONObject json = getJSONObject();
json.put("memHeapUsedM", "286");
json.put("maps_killed", "3");
json.put("waiting_maps", "1");
json.put("RpcProcessingTime_avg_time", "0.003");
byte[] data = json.toString().getBytes();
JobTrackerProcessor p = new JobTrackerProcessor();
ChunkImpl ch = new ChunkImpl("TestType", "Test", data.length, data,
null);
String failMsg = testProcessor(p, json, ch);
assertNull(failMsg, failMsg);
// test gauge metric
json.put("maps_killed", "5");
data = json.toString().getBytes();
ch = new ChunkImpl("TestType", "Test", data.length, data, null);
json.put("maps_killed", "2");
failMsg = testProcessor(p, json, ch);
assertNull(failMsg, failMsg);
}
@SuppressWarnings("unchecked")
public void testNamenodeProcessor() {
// test metric for each record type
JSONObject json = getJSONObject();
json.put("BlocksTotal", "1234");
json.put("FilesCreated", "33");
json.put("RpcQueueTime_avg_time", "0.001");
json.put("gcCount", "112");
json.put("Transactions_num_ops", "3816");
byte[] data = json.toString().getBytes();
NamenodeProcessor p = new NamenodeProcessor();
ChunkImpl ch = new ChunkImpl("TestType", "Test", data.length, data,
null);
String failMsg = testProcessor(p, json, ch);
assertNull(failMsg, failMsg);
// test gauge metric
json.put("FilesCreated", "55");
json.put("gcCount", "115");
data = json.toString().getBytes();
ch = new ChunkImpl("TestType", "Test", data.length, data, null);
json.put("FilesCreated", "22");
json.put("gcCount", "3");
failMsg = testProcessor(p, json, ch);
assertNull(failMsg, failMsg);
}
@SuppressWarnings("unchecked")
public void testDatanodeProcessor() {
// test metric for each record type
JSONObject json = getJSONObject();
json.put("heartBeats_num_ops", "10875");
json.put("FilesCreated", "33");
json.put("RpcQueueTime_avg_time", "0.001");
json.put("gcCount", "112");
json.put("Capacity", "22926269645");
byte[] data = json.toString().getBytes();
DatanodeProcessor p = new DatanodeProcessor();
ChunkImpl ch = new ChunkImpl("TestType", "Test", data.length, data,
null);
String failMsg = testProcessor(p, json, ch);
assertNull(failMsg, failMsg);
// test gauge metric
json.put("heartBeats_num_ops", "10980");
json.put("gcCount", "115");
data = json.toString().getBytes();
ch = new ChunkImpl("TestType", "Test", data.length, data, null);
json.put("heartBeats_num_ops", "105");
json.put("gcCount", "3");
failMsg = testProcessor(p, json, ch);
assertNull(failMsg, failMsg);
}
@SuppressWarnings("unchecked")
public void testHBaseMasterProcessor() {
// test metric for each record type
JSONObject json = getJSONObject();
json.put("splitSizeNumOps", "108");
json.put("AverageLoad", "3.33");
byte[] data = json.toString().getBytes();
HBaseMasterProcessor p = new HBaseMasterProcessor();
ChunkImpl ch = new ChunkImpl("TestType", "Test", data.length, data,
null);
String failMsg = testProcessor(p, json, ch);
assertNull(failMsg, failMsg);
// test gauge metric
json.put("splitSizeNumOps", "109");
data = json.toString().getBytes();
ch = new ChunkImpl("TestType", "Test", data.length, data, null);
json.put("splitSizeNumOps", "1");
failMsg = testProcessor(p, json, ch);
assertNull(failMsg, failMsg);
}
@SuppressWarnings("unchecked")
public void testHBaseRegionServerProcessor() {
// test metric for each record type
JSONObject json = getJSONObject();
json.put("blockCacheSize", "2681872");
byte[] data = json.toString().getBytes();
HBaseMasterProcessor p = new HBaseMasterProcessor();
ChunkImpl ch = new ChunkImpl("TestType", "Test", data.length, data,
null);
String failMsg = testProcessor(p, json, ch);
assertNull(failMsg, failMsg);
// no gauge metrics yet
}
@SuppressWarnings("unchecked")
public void testZookeeperProcessor() {
// test metric for each record type
JSONObject json = getJSONObject();
json.put("packetsSent", "2049");
json.put("NodeCount", "40");
byte[] data = json.toString().getBytes();
ZookeeperProcessor p = new ZookeeperProcessor();
ChunkImpl ch = new ChunkImpl("TestType", "Test", data.length, data,
null);
String failMsg = testProcessor(p, json, ch);
assertNull(failMsg, failMsg);
// test gauge metric
json.put("packetsSent", "2122");
data = json.toString().getBytes();
ch = new ChunkImpl("TestType", "Test", data.length, data, null);
json.put("packetsSent", "73");
failMsg = testProcessor(p, json, ch);
assertNull(failMsg, failMsg);
}
@SuppressWarnings("unchecked")
public void testSysteMetricsProcessor() {
JSONObject system = new JSONObject();
JSONObject memory = new JSONObject();
JSONObject cpu1 = new JSONObject();
JSONObject cpu2 = new JSONObject();
JSONObject cpu3 = new JSONObject();
JSONObject cpu4 = new JSONObject();
JSONObject disk1 = new JSONObject();
JSONObject disk2 = new JSONObject();
JSONObject network1 = new JSONObject();
JSONObject network2 = new JSONObject();
JSONArray cpu = new JSONArray();
JSONArray loadAvg = new JSONArray();
JSONArray disk = new JSONArray();
JSONArray network = new JSONArray();
memory.put("Total", "130980773888");
memory.put("UsedPercent", "4.493927773730516");
memory.put("FreePercent", "95.50607222626948");
memory.put("ActualFree", "125094592512");
memory.put("ActualUsed", "5886181376");
memory.put("Free", "34487599104");
memory.put("Used", "96493174784");
memory.put("Ram", "124920");
system.put("memory", memory);
system.put("timestamp", 1353981082318L);
system.put("uptime", "495307.98");
cpu1.put("combined", 0.607);
cpu1.put("user", 0.49);
cpu1.put("idle", 0.35);
cpu1.put("sys", 0.116);
cpu2.put("combined", 0.898);
cpu2.put("user", 0.69);
cpu2.put("idle", 0.06);
cpu2.put("sys", 0.202);
// include chunks which have null values, to simulate sigar issue on
// pLinux
cpu3.put("combined", null);
cpu3.put("user", null);
cpu3.put("idle", null);
cpu3.put("sys", null);
cpu4.put("combined", "null");
cpu4.put("user", "null");
cpu4.put("idle", "null");
cpu4.put("sys", "null");
cpu.add(cpu1);
cpu.add(cpu2);
cpu.add(cpu3);
system.put("cpu", cpu);
loadAvg.add("0.16");
loadAvg.add("0.09");
loadAvg.add("0.06");
system.put("loadavg", loadAvg);
disk1.put("ReadBytes", 220000000000L);
disk1.put("Reads", 12994476L);
disk2.put("ReadBytes", 678910987L);
disk2.put("Reads", 276L);
disk.add(disk1);
disk.add(disk2);
system.put("disk", disk);
network1.put("RxBytes", 7234832487L);
network2.put("RxBytes", 8123023483L);
network.add(network1);
network.add(network2);
system.put("network", network);
byte[] data = system.toString().getBytes();
// parse with
// org.apache.hadoop.chukwa.extraction.demux.processor.mapper.SystemMetrics
// and verify cpu usage aggregates
SystemMetrics p = new SystemMetrics();
ChunkImpl ch = new ChunkImpl("TestType", "Test", data.length, data,
null);
ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord> output = new ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord>();
p.process(new ChukwaArchiveKey(), ch, output, null);
HashMap<ChukwaRecordKey, ChukwaRecord> outData = output.data;
Iterator<Entry<ChukwaRecordKey, ChukwaRecord>> recordIter = outData
.entrySet().iterator();
while (recordIter.hasNext()) {
Entry<ChukwaRecordKey, ChukwaRecord> recordEntry = recordIter
.next();
ChukwaRecordKey key = recordEntry.getKey();
ChukwaRecord value = recordEntry.getValue();
if (value.getValue("combined") != null) {
assertEquals(Double.parseDouble(value.getValue("combined")),
0.7525);
assertEquals(Double.parseDouble(value.getValue("user")), 0.59);
assertEquals(Double.parseDouble(value.getValue("sys")), 0.159);
assertEquals(Double.parseDouble(value.getValue("idle")), 0.205);
System.out.println("CPU metrics verified");
}
}
}
}
| 8,134 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/TestHadoopLogProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
/*
* Test code for verifying that the log processors work properly.
*
* Currently more or less just a stub
*/
public class TestHadoopLogProcessor extends TestCase {
long serializedSize = 0;
OutputCollector<ChukwaRecordKey, ChukwaRecord> nullcollector = new OutputCollector<ChukwaRecordKey, ChukwaRecord>() {
public void collect(ChukwaRecordKey arg0, ChukwaRecord arg1)
throws IOException {
serializedSize += arg1.toString().length();
}
};
public void testHLPParseTimes() {
HadoopLogProcessor hlp = new HadoopLogProcessor();
int LINES = 50000;
long bytes = 0;
long ts_start = System.currentTimeMillis();
for (int i = 0; i < LINES; ++i) {
Chunk c = getNewChunk();
bytes += c.getData().length;
hlp.process(null, c, nullcollector, Reporter.NULL);
// hlp.parse(line, nullcollector, Reporter.NULL);
}
long time = (System.currentTimeMillis() - ts_start);
System.out.println("parse took " + time + " milliseconds");
System.out.println("aka " + time * 1.0 / LINES + " ms per line or " + time
* 1000.0 / bytes + " ms per kilobyte of log data");
System.out.println("output records had total length of " + serializedSize);
}
java.util.Random r = new java.util.Random();
public Chunk getNewChunk() {
int ms = r.nextInt(1000);
String line = "2008-05-29 10:42:22," + ms
+ " INFO org.apache.hadoop.dfs.DataNode: Some text goes here"
+ r.nextInt() + "\n";
ChunkImpl c = new ChunkImpl("HadoopLogProcessor", "test",
line.length() , line.getBytes(), null);
return c;
}
}
| 8,135 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/TestLog4JMetricsContextChukwaRecord.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.util.ArrayList;
import junit.framework.TestCase;
import org.apache.hadoop.chukwa.extraction.demux.processor.mapper.Log4JMetricsContextProcessor.Log4JMetricsContextChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
public class TestLog4JMetricsContextChukwaRecord extends TestCase {
private static String[] chukwaQueueLog = {
"2009-05-06 00:00:21,982 INFO chukwa.metrics.chunkQueue: {\"removedChunk\":1,\"recordName\":\"chunkQueue\",\"queueSize\":94,\"timestamp\":1241568021982,\"removedChunk_raw\":0,\"dataSize\":10373608,\"fullQueue\":1,\"addedChunk_rate\":0,\"addedChunk_raw\":0,\"period\":60,\"addedChunk\":95,\"hostName\":\"test.com\",\"removedChunk_rate\":0,\"contextName\":\"chunkQueue\"}",
"2009-05-06 00:01:21,981 INFO chukwa.metrics.chunkQueue: {\"removedChunk\":1,\"recordName\":\"chunkQueue\",\"queueSize\":94,\"timestamp\":1241568081981,\"removedChunk_raw\":0,\"dataSize\":10373608,\"fullQueue\":1,\"addedChunk_rate\":0,\"addedChunk_raw\":0,\"period\":60,\"addedChunk\":95,\"hostName\":\"test.com\",\"removedChunk_rate\":0,\"contextName\":\"chunkQueue\"}",
"2009-05-06 00:02:21,982 INFO chukwa.metrics.chunkQueue: {\"removedChunk\":1,\"recordName\":\"chunkQueue\",\"queueSize\":94,\"timestamp\":1241568141982,\"removedChunk_raw\":0,\"dataSize\":10373608,\"fullQueue\":1,\"addedChunk_rate\":0,\"addedChunk_raw\":0,\"period\":60,\"addedChunk\":95,\"hostName\":\"test.com\",\"removedChunk_rate\":0,\"contextName\":\"chunkQueue\"}",
};
private static String[] chukwaAgentLog = {
"2009-05-06 23:33:35,213 INFO chukwa.metrics.chukwaAgent: {\"addedAdaptor_rate\":0,\"addedAdaptor_raw\":0,\"recordName\":\"chukwaAgent\",\"timestamp\":1241652815212,\"removedAdaptor_rate\":0,\"removedAdaptor\":0,\"period\":60,\"adaptorCount\":4,\"removedAdaptor_raw\":0,\"process\":\"ChukwaAgent\",\"addedAdaptor\":4,\"hostName\":\"test.com\",\"contextName\":\"chukwaAgent\"}",
"2009-05-06 23:34:35,211 INFO chukwa.metrics.chukwaAgent: {\"addedAdaptor_rate\":0,\"addedAdaptor_raw\":0,\"recordName\":\"chukwaAgent\",\"timestamp\":1241652875211,\"removedAdaptor_rate\":0,\"removedAdaptor\":0,\"period\":60,\"adaptorCount\":4,\"removedAdaptor_raw\":0,\"process\":\"ChukwaAgent\",\"addedAdaptor\":4,\"hostName\":\"test.com\",\"contextName\":\"chukwaAgent\"}",
"2009-05-06 23:35:35,212 INFO chukwa.metrics.chukwaAgent: {\"addedAdaptor_rate\":0,\"addedAdaptor_raw\":0,\"recordName\":\"chukwaAgent\",\"timestamp\":1241652935212,\"removedAdaptor_rate\":0,\"removedAdaptor\":0,\"period\":60,\"adaptorCount\":4,\"removedAdaptor_raw\":0,\"process\":\"ChukwaAgent\",\"addedAdaptor\":4,\"hostName\":\"test.com\",\"contextName\":\"chukwaAgent\"}",
"2009-05-06 23:39:35,215 INFO chukwa.metrics.chukwaAgent: {\"addedAdaptor_rate\":0,\"addedAdaptor_raw\":0,\"recordName\":\"chukwaAgent\",\"timestamp\":1241653175214,\"removedAdaptor_rate\":0,\"removedAdaptor\":0,\"period\":60,\"adaptorCount\":4,\"removedAdaptor_raw\":0,\"process\":\"ChukwaAgent\",\"addedAdaptor\":4,\"hostName\":\"test.com\",\"contextName\":\"CA\"}",
};
public void testLog4JMetricsContextChukwaRecord() throws Throwable {
{
Log4JMetricsContextChukwaRecord rec = new Log4JMetricsContextChukwaRecord(chukwaQueueLog[0]);
ChukwaRecord chukwaRecord = rec.getChukwaRecord();
assertEquals("chunkQueue", rec.getRecordType());
assertEquals("1241568021982", chukwaRecord.getValue("timestamp"));
assertEquals((1241568021982l/60000)*60000, rec.getTimestamp());
assertEquals("94", chukwaRecord.getValue("queueSize"));
}
{
Log4JMetricsContextChukwaRecord rec = new Log4JMetricsContextChukwaRecord(chukwaAgentLog[3]);
assertEquals("CA_chukwaAgent", rec.getRecordType());
assertEquals(1241653175214l/60000*60000, rec.getTimestamp());
}
}
}
| 8,136 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/MockMapProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
public class MockMapProcessor extends AbstractProcessor {
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output,
Reporter reporter) throws Throwable {
ChukwaRecordKey key = new ChukwaRecordKey("someReduceType", recordEntry);
ChukwaRecord record = new ChukwaRecord();
output.collect(key, record);
}
} | 8,137 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/ChukwaTestOutputCollector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import org.apache.hadoop.mapred.OutputCollector;
public class ChukwaTestOutputCollector<K, V> implements OutputCollector<K, V> {
public HashMap<K, V> data = new HashMap<K, V>();
public void collect(K key, V value) throws IOException {
data.put(key, value);
}
@Override
public String toString() {
Iterator<K> it = data.keySet().iterator();
K key = null;
V value = null;
StringBuilder sb = new StringBuilder();
while (it.hasNext()) {
key = it.next();
value = data.get(key);
sb.append("Key[").append(key).append("] value[").append(value).append(
"]\n");
}
return sb.toString();
}
}
| 8,138 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/processor/mapper/TestAbtractProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.mapper;
import junit.framework.TestCase;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.ChunkBuilder;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.util.RecordConstants;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
public class TestAbtractProcessor extends TestCase {
String[] data = { "dsjsjbsfjds\ndsafsfasd\n",
"asdgHSAJGDGYDGGHAgd7364rt3478tc4\nhr473rt346t\n", "e gqd yeegyxuyexfg\n" };
public void testParse() {
ChunkBuilder cb = new ChunkBuilder();
cb.addRecord(RecordConstants.escapeAllButLastRecordSeparator("\n", data[0])
.getBytes());
cb.addRecord(RecordConstants.escapeAllButLastRecordSeparator("\n", data[1])
.getBytes());
cb.addRecord(RecordConstants.escapeAllButLastRecordSeparator("\n", data[2])
.getBytes());
Chunk chunk = cb.getChunk();
OutputCollector<ChukwaRecordKey, ChukwaRecord> output = new ChukwaTestOutputCollector<ChukwaRecordKey, ChukwaRecord>();
TProcessor p = new TProcessor();
p.data = data;
p.process(null, chunk, output, null);
}
}
class TProcessor extends AbstractProcessor {
String[] data = null;
int count = 0;
@Override
protected void parse(String recordEntry,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter) {
if (!recordEntry.equals(data[count])) {
System.out.println("[" + recordEntry + "]");
System.out.println("[" + data[count] + "]");
throw new RuntimeException("not the same record");
}
count++;
}
public String getDataType() {
// TODO Auto-generated method stub
return null;
}
} | 8,139 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/processor | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/extraction/demux/processor/reducer/MockReduceProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.extraction.demux.processor.reducer;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import java.util.Iterator;
import java.io.IOException;
public class MockReduceProcessor implements ReduceProcessor {
public String getDataType() {
return "MockDataType";
}
public void process(ChukwaRecordKey key, Iterator<ChukwaRecord> values,
OutputCollector<ChukwaRecordKey, ChukwaRecord> output,
Reporter reporter) {
ChukwaRecord record = new ChukwaRecord();
record.add("MockReduceProcessorKey", "MockReduceProcessorValue");
try {
output.collect(key, record);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
} | 8,140 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/rest | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/rest/resource/SetupTestEnv.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.rest.resource;
import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.UnknownHostException;
import org.apache.hadoop.chukwa.hicc.HiccWebServer;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.nio.SelectChannelConnector;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.ServletHolder;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.spi.container.servlet.ServletContainer;
import junit.framework.TestCase;
public class SetupTestEnv extends TestCase {
public WebResource resource;
public URI baseURL;
public Client client;
public static int restPort = 4080;
public static Configuration conf = null;
public static HiccWebServer hicc = null;
public static String user = "admin";
public static String authorization = "Basic YWRtaW46YWRtaW4=";
public static MiniDFSCluster dfs;
public SetupTestEnv() {
try {
Configuration conf=new Configuration();
conf.setBoolean("dfs.permissions",true);
dfs=new MiniDFSCluster(conf,1,true,null);
} catch(Exception e) {
fail("Fail to start MiniDFSCluster");
}
if(hicc==null) {
hicc = HiccWebServer.getInstance();
conf = HiccWebServer.getConfig();
}
}
public void setUp() {
hicc.start();
}
public void tearDown() {
dfs.shutdown();
}
}
| 8,141 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/rest | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/rest/resource/TestClientTrace.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.rest.resource;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.datacollection.writer.PipelineStageWriter;
import org.apache.hadoop.chukwa.datacollection.writer.SocketTeeWriter;
import org.apache.hadoop.chukwa.datacollection.writer.WriterException;
import org.apache.hadoop.chukwa.rest.bean.ClientTraceBean;
import org.apache.hadoop.chukwa.rest.bean.UserBean;
import org.apache.hadoop.chukwa.rest.bean.WidgetBean;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.conf.Configuration;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.GenericType;
public class TestClientTrace extends SetupTestEnv {
public void testClientTrace() {
// Setup Collector
Configuration conf = new Configuration();
conf.set("chukwaCollector.pipeline",
SocketTeeWriter.class.getCanonicalName());
conf.set("chukwaCollector.writerClass",
PipelineStageWriter.class.getCanonicalName());
try {
PipelineStageWriter psw = new PipelineStageWriter();
psw.init(conf);
// Send a client trace chunk
ArrayList<Chunk> l = new ArrayList<Chunk>();
String line = "2009-12-29 22:32:27,047 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /10.10.100.60:43707, dest: /10.10.100.60:50010, bytes: 7003141, op: HDFS_WRITE, cliID: DFSClient_-8389654, offset: 0, srvID: DS-2032680158-98.137.100.60-50010-1259976007324, blockid: blk_-2723720761101769540_705411, duration: 289013780000";
l.add(new ChunkImpl("ClientTrace", "name", 1, line.getBytes(), null));
assertTrue(l.size()==1);
psw.add(l);
assertTrue(true);
} catch (RuntimeException er) {
fail(ExceptionUtil.getStackTrace(er));
} catch (WriterException e) {
fail(ExceptionUtil.getStackTrace(e));
}
try {
// Locate the client trace object
client = Client.create();
resource = client.resource("http://localhost:"+restPort);
List<ClientTraceBean> list = resource.path("/hicc/v1/clienttrace").header("Authorization", authorization).get(new GenericType<List<ClientTraceBean>>(){});
for(ClientTraceBean ctb : list) {
assertEquals("HDFS_WRITE", ctb.getAction());
}
} catch (Exception e) {
fail(ExceptionUtil.getStackTrace(e));
}
}
}
| 8,142 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/hicc/AreaCalculatorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.hicc;
import java.util.Date;
import java.util.Random;
import java.util.TreeMap;
import junit.framework.TestCase;
public class AreaCalculatorTest extends TestCase {
public void testGetAreaTreeMapOfStringTreeMapOfStringDouble() {
TreeMap<String, TreeMap<String, Double>> maps = new TreeMap<String, TreeMap<String, Double>>();
maps.put("abc", getDots());
maps.put("def", getDots());
TreeMap<String, Double> areas = AreaCalculator.getAreas(maps);
System.out.println("Area of 'abc': " + areas.get("abc"));
System.out.println("Area of 'def': " + areas.get("def"));
}
public void testGetAreaTreeMapOfStringDouble() {
TreeMap<String, Double> map = getDots();
System.out.println("Area: " + AreaCalculator.getArea(map));
}
public void testGetAreaDoubleDoubleDoubleDouble() {
Double area = AreaCalculator.getArea(1, 4, 2, 4);
System.out.println(area);
assertEquals(true, area > 3.99999 && area < 4.00001);
}
private TreeMap<String, Double> getDots() {
TreeMap<String, Double> map = new TreeMap<String, Double>();
long now = new Date().getTime();
Random r = new Random(now);
for (long i = 0; i < 4; i++) {
double value = r.nextInt(10) + 2;
System.out.println(now + ": " + value);
map.put(now + "", value);
now += 1000;
}
return map;
}
}
| 8,143 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/hicc/TestChart.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.hicc;
import junit.framework.TestCase;
import javax.servlet.http.HttpServletRequest;
import java.util.TreeMap;
import java.util.ArrayList;
public class TestChart extends TestCase {
public void testLineChart() {
HttpServletRequest request = null;
// Chart c = new Chart(request);
// String render = "line";
// TreeMap<String, TreeMap<String, Double>> dataMap = new TreeMap<String, TreeMap<String, Double>>();
// TreeMap<String, Double> series = new TreeMap<String, Double>();
// ArrayList<String> labels = new ArrayList<String>();
// for (int i = 0; i < 5; i++) {
// labels.add("" + i);
// series.put("" + i, 1.0 * i);
// }
// dataMap.put("series1", series);
// c.setXLabelsRange(labels);
// c.setDataSet(render, dataMap);
// String output = c.plot();
// assertTrue(output.contains("lines"));
}
public void testBarChart() {
HttpServletRequest request = null;
// Chart c = new Chart(request);
// String render = "bar";
// TreeMap<String, TreeMap<String, Double>> dataMap = new TreeMap<String, TreeMap<String, Double>>();
// TreeMap<String, Double> series = new TreeMap<String, Double>();
// ArrayList<String> labels = new ArrayList<String>();
// for (int i = 0; i < 5; i++) {
// labels.add("" + i);
// series.put("" + i, 1.0 * i);
// }
// dataMap.put("series1", series);
// c.setXLabelsRange(labels);
// c.setDataSet(render, dataMap);
// String output = c.plot();
// assertTrue(output.contains("bar"));
}
public void testScatterChart() {
HttpServletRequest request = null;
// Chart c = new Chart(request);
// String render = "point";
// TreeMap<String, TreeMap<String, Double>> dataMap = new TreeMap<String, TreeMap<String, Double>>();
// TreeMap<String, Double> series = new TreeMap<String, Double>();
// ArrayList<String> labels = new ArrayList<String>();
// for (int i = 0; i < 5; i++) {
// labels.add("" + i);
// series.put("" + i, 1.0 * i);
// }
// dataMap.put("series1", series);
// c.setXLabelsRange(labels);
// c.setDataSet(render, dataMap);
// String output = c.plot();
// assertTrue(output.contains("point"));
}
}
| 8,144 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/dataloader/TestSocketDataLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.dataloader;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.Chunk;
import java.util.ArrayList;
import java.util.Collection;
import java.util.NoSuchElementException;
import java.util.regex.Matcher;
import org.apache.hadoop.chukwa.datacollection.collector.CaptureWriter;
import org.apache.hadoop.chukwa.datacollection.writer.PipelineStageWriter;
import org.apache.hadoop.chukwa.datacollection.writer.SocketTeeWriter;
import org.apache.hadoop.chukwa.rest.bean.ClientTraceBean;
import java.net.*;
import java.io.*;
public class TestSocketDataLoader extends TestCase{
public void testSocketTee() throws Exception {
Configuration conf = new Configuration();
conf.set("chukwa.pipeline",
SocketTeeWriter.class.getCanonicalName());
conf.set("chukwa.writerClass",
PipelineStageWriter.class.getCanonicalName());
PipelineStageWriter psw = new PipelineStageWriter(conf);
SocketDataLoader sdl = new SocketDataLoader("all");
System.out.println("pipeline established; now pushing a chunk");
ArrayList<Chunk> l = new ArrayList<Chunk>();
l.add(new ChunkImpl("dt", "name", 1, new byte[] {'a'}, null));
psw.add(l);
//push a chunk through. SocketDataLoader should receive this chunk.
try {
Collection<Chunk> clist = sdl.read();
for(Chunk c : clist) {
if(c!=null && c.getData()!=null) {
assertTrue("a".equals(new String(c.getData())));
}
}
} catch(NoSuchElementException e) {
}
}
}
| 8,145 |
0 | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/test/java/org/apache/hadoop/chukwa/dataloader/TestDatabaseMetricDataLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.dataloader;
import junit.framework.TestCase;
import java.util.Calendar;
import org.apache.hadoop.chukwa.database.Macro;
import org.apache.hadoop.chukwa.util.DatabaseWriter;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.chukwa.database.TableCreator;
import org.apache.hadoop.chukwa.dataloader.MetricDataLoader;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.ArrayList;
public class TestDatabaseMetricDataLoader extends TestCase {
long[] timeWindow = {7, 30, 91, 365, 3650};
String[] tables = {"system_metrics","disk","mr_job","mr_task"}; //,"dfs_namenode","dfs_datanode","dfs_fsnamesystem","dfs_throughput","hadoop_jvm","hadoop_mapred","hdfs_usage"};
String cluster = "demo";
long current = Calendar.getInstance().getTimeInMillis();
public void setUp() {
System.setProperty("CLUSTER","demo");
DatabaseWriter db = new DatabaseWriter(cluster);
String buffer = "";
File aFile = new File(System.getenv("CHUKWA_CONF_DIR")
+ File.separator + "database_create_tables.sql");
buffer = readFile(aFile);
String tables[] = buffer.split(";");
for(String table : tables) {
if(table.length()>5) {
try {
db.execute(table);
} catch (Exception e) {
fail("Fail to retrieve meta data from database table: "+table);
}
}
}
db.close();
for(int i=0;i<timeWindow.length;i++) {
TableCreator tc = new TableCreator();
long start = current;
long end = current + (timeWindow[i]*1440*60*1000);
try {
tc.createTables(start, end);
} catch (Exception e) {
fail("Fail to create database tables.");
}
}
}
public void tearDown() {
DatabaseWriter db = null;
try {
db = new DatabaseWriter(cluster);
ResultSet rs = db.query("show tables");
ArrayList<String> list = new ArrayList<String>();
while(rs.next()) {
String table = rs.getString(1);
list.add(table);
}
for(String table : list) {
db.execute("drop table "+table);
}
} catch(Throwable ex) {
} finally {
if(db!=null) {
db.close();
}
}
}
public String readFile(File aFile) {
StringBuffer contents = new StringBuffer();
try {
BufferedReader input = new BufferedReader(new FileReader(aFile));
try {
String line = null; // not declared within while loop
while ((line = input.readLine()) != null) {
contents.append(line);
contents.append(System.getProperty("line.separator"));
}
} finally {
input.close();
}
} catch (IOException ex) {
ex.printStackTrace();
}
return contents.toString();
}
public void testMetricDataLoader() {
boolean skip=false;
String srcDir = System.getenv("CHUKWA_DATA_DIR") + File.separator + "samples";
try {
ChukwaConfiguration conf = new ChukwaConfiguration();
FileSystem fs = FileSystem.get(conf);
FileStatus[] sources = fs.listStatus(new Path(srcDir));
for (FileStatus sequenceFile : sources) {
MetricDataLoader mdl = new MetricDataLoader(conf, fs, sequenceFile.getPath().toUri().toString());
mdl.call();
}
if(sources.length==0) {
skip=true;
}
} catch (Throwable ex) {
fail("SQL Exception: "+ExceptionUtil.getStackTrace(ex));
}
if(!skip) {
DatabaseWriter db = new DatabaseWriter(cluster);
for(int i=0;i<tables.length;i++) {
String query = "select [avg("+tables[i]+")] from ["+tables[i]+"]";
Macro mp = new Macro(current,query);
query = mp.toString();
try {
ResultSet rs = db.query(query);
ResultSetMetaData rsmd = rs.getMetaData();
int numberOfColumns = rsmd.getColumnCount();
while(rs.next()) {
for(int j=1;j<=numberOfColumns;j++) {
assertTrue("Table: "+tables[i]+", Column: "+rsmd.getColumnName(j)+", contains no data.",rs.getString(j)!=null);
}
}
} catch(Throwable ex) {
fail("MetricDataLoader failed: "+ExceptionUtil.getStackTrace(ex));
}
}
db.close();
assertTrue("MetricDataLoader executed successfully.",true);
}
}
}
| 8,146 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/metrics | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/metrics/spi/AbstractMetricsContext.java | /*
* AbstractMetricsContext.java
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics.spi;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import java.util.TreeMap;
import java.util.Map.Entry;
import org.apache.hadoop.metrics.ContextFactory;
import org.apache.hadoop.metrics.MetricsContext;
import org.apache.hadoop.metrics.MetricsException;
import org.apache.hadoop.metrics.MetricsRecord;
import org.apache.hadoop.metrics.Updater;
/**
* The main class of the Service Provider Interface. This class should be
* extended in order to integrate the Metrics API with a specific metrics
* client library.
*
* This class implements the internal table of metric data, and the timer
* on which data is to be sent to the metrics system. Subclasses must
* override the abstract <code>emitRecord</code> method in order to transmit
* the data.
*/
public abstract class AbstractMetricsContext implements MetricsContext {
private int period = MetricsContext.DEFAULT_PERIOD;
private Timer timer = null;
private boolean computeRate = true;
private Set<Updater> updaters = new HashSet<Updater>(1);
private volatile boolean isMonitoring = false;
private ContextFactory factory = null;
private String contextName = null;
static class TagMap extends TreeMap<String,Object> {
private static final long serialVersionUID = 3546309335061952993L;
TagMap() {
super();
}
TagMap(TagMap orig) {
super(orig);
}
/**
* Returns true if this tagmap contains every tag in other.
* @param other
* @return
*/
public boolean containsAll(TagMap other) {
for (Map.Entry<String,Object> entry : other.entrySet()) {
Object value = get(entry.getKey());
if (value == null || !value.equals(entry.getValue())) {
// either key does not exist here, or the value is different
return false;
}
}
return true;
}
}
static class MetricMap extends TreeMap<String,Number> {
private static final long serialVersionUID = -7495051861141631609L;
}
static class RecordMap extends HashMap<TagMap,MetricMap> {
private static final long serialVersionUID = 259835619700264611L;
}
private Map<String,RecordMap> bufferedData = new HashMap<String,RecordMap>();
/**
* Creates a new instance of AbstractMetricsContext
*/
protected AbstractMetricsContext() {
}
/**
* Initializes the context.
*/
public void init(String contextName, ContextFactory factory)
{
this.contextName = contextName;
this.factory = factory;
}
/**
* Convenience method for subclasses to access factory attributes.
*/
protected String getAttribute(String attributeName) {
String factoryAttribute = contextName + "." + attributeName;
return (String) factory.getAttribute(factoryAttribute);
}
/**
* Returns an attribute-value map derived from the factory attributes
* by finding all factory attributes that begin with
* <i>contextName</i>.<i>tableName</i>. The returned map consists of
* those attributes with the contextName and tableName stripped off.
*/
protected Map<String,String> getAttributeTable(String tableName) {
String prefix = contextName + "." + tableName + ".";
Map<String,String> result = new HashMap<String,String>();
for (String attributeName : factory.getAttributeNames()) {
if (attributeName.startsWith(prefix)) {
String name = attributeName.substring(prefix.length());
String value = (String) factory.getAttribute(attributeName);
result.put(name, value);
}
}
return result;
}
/**
* Returns the context name.
*/
public String getContextName() {
return contextName;
}
/**
* Returns the factory by which this context was created.
* @return the factory by which the context was created
*/
public ContextFactory getContextFactory() {
return factory;
}
/**
* Starts or restarts monitoring, the emitting of metrics records.
*/
public synchronized void startMonitoring()
throws IOException {
if (!isMonitoring) {
startTimer();
isMonitoring = true;
}
}
/**
* Stops monitoring. This does not free buffered data.
* @see #close()
*/
public synchronized void stopMonitoring() {
if (isMonitoring) {
stopTimer();
isMonitoring = false;
}
}
/**
* Returns true if monitoring is currently in progress.
*/
public boolean isMonitoring() {
return isMonitoring;
}
/**
* Stops monitoring and frees buffered data, returning this
* object to its initial state.
*/
public synchronized void close() {
stopMonitoring();
clearUpdaters();
}
/**
* Creates a new AbstractMetricsRecord instance with the given <code>recordName</code>.
* Throws an exception if the metrics implementation is configured with a fixed
* set of record names and <code>recordName</code> is not in that set.
*
* @param recordName the name of the record
* @throws MetricsException if recordName conflicts with configuration data
*/
public final synchronized MetricsRecord createRecord(String recordName) {
if (bufferedData.get(recordName) == null) {
bufferedData.put(recordName, new RecordMap());
}
return newRecord(recordName);
}
/**
* Subclasses should override this if they subclass MetricsRecordImpl.
* @param recordName the name of the record
* @return newly created instance of MetricsRecordImpl or subclass
*/
protected MetricsRecord newRecord(String recordName) {
return new MetricsRecordImpl(recordName, this);
}
/**
* Registers a callback to be called at time intervals determined by
* the configuration.
*
* @param updater object to be run periodically; it should update
* some metrics records
*/
public synchronized void registerUpdater(final Updater updater) {
if (!updaters.contains(updater)) {
updaters.add(updater);
}
}
/**
* Removes a callback, if it exists.
*
* @param updater object to be removed from the callback list
*/
public synchronized void unregisterUpdater(Updater updater) {
updaters.remove(updater);
}
private synchronized void clearUpdaters() {
updaters.clear();
}
/**
* Starts timer if it is not already started
*/
private synchronized void startTimer() {
if (timer == null) {
timer = new Timer("Timer thread for monitoring " + getContextName(),
true);
TimerTask task = new TimerTask() {
public void run() {
try {
timerEvent();
}
catch (IOException ioe) {
ioe.printStackTrace();
}
}
};
long millis = period * 1000;
timer.scheduleAtFixedRate(task, millis, millis);
}
}
/**
* Stops timer if it is running
*/
private synchronized void stopTimer() {
if (timer != null) {
timer.cancel();
timer = null;
}
}
/**
* Timer callback.
*/
private void timerEvent() throws IOException {
if (isMonitoring) {
Collection<Updater> myUpdaters;
synchronized (this) {
myUpdaters = new ArrayList<Updater>(updaters);
}
// Run all the registered updates without holding a lock
// on this context
for (Updater updater : myUpdaters) {
try {
updater.doUpdates(this);
}
catch (Throwable throwable) {
throwable.printStackTrace();
}
}
emitRecords();
}
}
/**
* Emits the records.
*/
private synchronized void emitRecords() throws IOException {
for (Entry<String, RecordMap> record : bufferedData.entrySet()) {
String recordName = record.getKey();
RecordMap recordMap = record.getValue();
for (Entry<TagMap, MetricMap> entry : record.getValue().entrySet()) {
OutputRecord outRec = new OutputRecord(entry.getKey(), entry.getValue());
emitRecord(contextName, recordName, outRec);
}
}
flush();
}
/**
* Sends a record to the metrics system.
*/
protected abstract void emitRecord(String contextName, String recordName,
OutputRecord outRec) throws IOException;
/**
* Called each period after all records have been emitted, this method does nothing.
* Subclasses may override it in order to perform some kind of flush.
*/
protected void flush() throws IOException {
}
/**
* Called by MetricsRecordImpl.update(). Creates or updates a row in
* the internal table of metric data.
*/
protected void update(MetricsRecordImpl record) {
String recordName = record.getRecordName();
TagMap tagTable = record.getTagTable();
Map<String,MetricValue> metricUpdates = record.getMetricTable();
RecordMap recordMap = getRecordMap(recordName);
synchronized (recordMap) {
MetricMap metricMap = recordMap.get(tagTable);
if (metricMap == null) {
metricMap = new MetricMap();
TagMap tagMap = new TagMap(tagTable); // clone tags
recordMap.put(tagMap, metricMap);
}
Set<Entry<String, MetricValue>> entrySet = metricUpdates.entrySet();
for (Entry<String, MetricValue> entry : entrySet) {
String metricName = entry.getKey ();
MetricValue updateValue = entry.getValue ();
Number updateNumber = updateValue.getNumber();
Number currentNumber = metricMap.get(metricName);
if (currentNumber == null || updateValue.isAbsolute()) {
metricMap.put(metricName, updateNumber);
}
else {
Number newNumber = sum(updateNumber, currentNumber);
metricMap.put(metricName, newNumber);
metricMap.put(metricName+"_raw", updateNumber);
if (computeRate ) {
double rate = updateNumber.doubleValue() * 60.0 / period;
metricMap.put(metricName+"_rate", rate);
}
computeRate = true;
}
}
}
}
private synchronized RecordMap getRecordMap(String recordName) {
return bufferedData.get(recordName);
}
/**
* Adds two numbers, coercing the second to the type of the first.
*
*/
private Number sum(Number a, Number b) {
if (a instanceof Integer) {
return Integer.valueOf(a.intValue() + b.intValue());
}
else if (a instanceof Float) {
return new Float(a.floatValue() + b.floatValue());
}
else if (a instanceof Short) {
return Short.valueOf((short)(a.shortValue() + b.shortValue()));
}
else if (a instanceof Byte) {
return Byte.valueOf((byte)(a.byteValue() + b.byteValue()));
}
else if (a instanceof Long) {
return Long.valueOf((a.longValue() + b.longValue()));
}
else {
// should never happen
throw new MetricsException("Invalid number type");
}
}
/**
* Called by MetricsRecordImpl.remove(). Removes all matching rows in
* the internal table of metric data. A row matches if it has the same
* tag names and values as record, but it may also have additional
* tags.
*/
protected void remove(MetricsRecordImpl record) {
String recordName = record.getRecordName();
TagMap tagTable = record.getTagTable();
RecordMap recordMap = getRecordMap(recordName);
synchronized (recordMap) {
Iterator<TagMap> it = recordMap.keySet().iterator();
while (it.hasNext()) {
TagMap rowTags = it.next();
if (rowTags.containsAll(tagTable)) {
it.remove();
}
}
}
}
/**
* Returns the timer period.
*/
public int getPeriod() {
return period;
}
/**
* Sets the timer period
*/
protected void setPeriod(int period) {
this.period = period;
}
}
| 8,147 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/ChunkImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.nio.charset.Charset;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.chukwa.datacollection.DataFactory;
import org.apache.hadoop.chukwa.datacollection.adaptor.Adaptor;
public class ChunkImpl implements org.apache.hadoop.io.Writable, Chunk {
public final static int PROTOCOL_VERSION = 1;
protected DataFactory dataFactory = DataFactory.getInstance();
private String source = "";
private String streamName = "";
private String dataType = "";
private String tags = "";
private byte[] data = null;
private int[] recordEndOffsets;
private int protocolVersion = 1;
private String debuggingInfo = "";
private transient Adaptor initiator;
long seqID;
private static String localHostAddr;
static {
try {
setHostAddress(InetAddress.getLocalHost().getHostName().toLowerCase());
} catch (UnknownHostException e) {
setHostAddress("localhost");
}
}
public static void setHostAddress(String host) {
ChunkImpl.localHostAddr = host;
}
public static ChunkImpl getBlankChunk() {
return new ChunkImpl();
}
ChunkImpl() {
}
public ChunkImpl(String dataType, String streamName, long seq, byte[] data,
Adaptor source) {
this.seqID = seq;
this.source = localHostAddr;
this.tags = dataFactory.getDefaultTags();
this.streamName = streamName;
this.dataType = dataType;
this.data = (byte[]) data.clone();
this.initiator = source;
}
/**
* @see org.apache.hadoop.chukwa.Chunk#getData()
*/
public byte[] getData() {
return data.clone();
}
/**
* @see org.apache.hadoop.chukwa.Chunk#setData(byte[])
*/
public void setData(byte[] logEvent) {
this.data = (byte[]) logEvent.clone();
}
/**
* @see org.apache.hadoop.chukwa.Chunk#getStreamName()
*/
public String getStreamName() {
return streamName;
}
public void setStreamName(String logApplication) {
this.streamName = logApplication;
}
public String getSource() {
return source;
}
public void setSource(String logSource) {
this.source = logSource;
}
public String getDebugInfo() {
return debuggingInfo;
}
public void setDebugInfo(String a) {
this.debuggingInfo = a;
}
/**
* @see org.apache.hadoop.chukwa.Chunk#getSeqID()
*/
public long getSeqID() {
return seqID;
}
public void setSeqID(long l) {
seqID = l;
}
public int getProtocolVersion() {
return protocolVersion;
}
public void setProtocolVersion(int pv) {
this.protocolVersion = pv;
}
public Adaptor getInitiator() {
return initiator;
}
public void setInitiator(Adaptor a) {
initiator = a;
}
public void setLogSource() {
source = localHostAddr;
}
public int[] getRecordOffsets() {
if (recordEndOffsets == null)
recordEndOffsets = new int[] { data.length - 1 };
return recordEndOffsets.clone();
}
public void setRecordOffsets(int[] offsets) {
recordEndOffsets = (int[]) offsets.clone();
}
public String getDataType() {
return dataType;
}
public void setDataType(String t) {
dataType = t;
}
@Override
public void addTag(String tags) {
this.tags += " "+ tags;
}
/**
* @see org.apache.hadoop.chukwa.Chunk#getTags()
*/
public String getTags() {
return tags;
}
/**
* @see org.apache.hadoop.chukwa.Chunk#getTag(java.lang.String)
*/
public String getTag(String tagName) {
Pattern tagPattern = Pattern.compile("\\b"+tagName+"=\"([^\"]*)\"");
if (tags != null) {
Matcher matcher = tagPattern.matcher(tags);
if (matcher.find()) {
return matcher.group(1);
}
}
return null;
}
/**
* @see org.apache.hadoop.io.Writable#readFields(java.io.DataInput)
*/
public void readFields(DataInput in) throws IOException {
setProtocolVersion(in.readInt());
if (protocolVersion != PROTOCOL_VERSION) {
throw new IOException(
"Protocol version mismatched, drop data. source version: "
+ protocolVersion + ", collector version:" + PROTOCOL_VERSION);
}
setSeqID(in.readLong());
setSource(in.readUTF());
tags = in.readUTF(); // no public set method here
setStreamName(in.readUTF());
setDataType(in.readUTF());
setDebugInfo(in.readUTF());
int numRecords = in.readInt();
recordEndOffsets = new int[numRecords];
for (int i = 0; i < numRecords; ++i)
recordEndOffsets[i] = in.readInt();
data = new byte[recordEndOffsets[recordEndOffsets.length - 1] + 1];
in.readFully(data);
}
/**
* @see org.apache.hadoop.io.Writable#write(java.io.DataOutput)
*/
public void write(DataOutput out) throws IOException {
out.writeInt(PROTOCOL_VERSION);
out.writeLong(seqID);
out.writeUTF(source);
out.writeUTF(tags);
out.writeUTF(streamName);
out.writeUTF(dataType);
out.writeUTF(debuggingInfo);
if (recordEndOffsets == null)
recordEndOffsets = new int[] { data.length - 1 };
out.writeInt(recordEndOffsets.length);
for (int i = 0; i < recordEndOffsets.length; ++i)
out.writeInt(recordEndOffsets[i]);
out.write(data, 0, recordEndOffsets[recordEndOffsets.length - 1] + 1);
// byte at last offset is valid
}
public static ChunkImpl read(DataInput in) throws IOException {
ChunkImpl w = new ChunkImpl();
w.readFields(in);
return w;
}
public String toString() {
StringBuilder buffer = new StringBuilder();
buffer.append(source);
buffer.append(":");
buffer.append(streamName);
buffer.append(new String(data, Charset.forName("UTF-8")));
buffer.append("/");
buffer.append(seqID);
return buffer.toString();
}
/**
* @see org.apache.hadoop.chukwa.Chunk#getSerializedSizeEstimate()
*/
public int getSerializedSizeEstimate() {
int size = 2 * (source.length() + streamName.length() + dataType.length()
+ debuggingInfo.length()); // length of strings (pessimistic)
size += data.length + 4;
if (recordEndOffsets == null)
size += 8;
else
size += 4 * (recordEndOffsets.length + 1); // +1 for length of array
size += 8; // uuid
return size;
}
public void setRecordOffsets(java.util.Collection<Integer> carriageReturns) {
recordEndOffsets = new int[carriageReturns.size()];
int i = 0;
for (Integer offset : carriageReturns)
recordEndOffsets[i++] = offset;
}
public int getLength() {
return data.length;
}
}
| 8,148 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/ChukwaArchiveKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// File generated by hadoop record compiler. Do not edit.
package org.apache.hadoop.chukwa;
public class ChukwaArchiveKey extends org.apache.hadoop.record.Record {
private static final org.apache.hadoop.record.meta.RecordTypeInfo _rio_recTypeInfo;
private static org.apache.hadoop.record.meta.RecordTypeInfo _rio_rtiFilter;
private static int[] _rio_rtiFilterFields;
static {
_rio_recTypeInfo = new org.apache.hadoop.record.meta.RecordTypeInfo(
"ChukwaArchiveKey");
_rio_recTypeInfo.addField("timePartition",
org.apache.hadoop.record.meta.TypeID.LongTypeID);
_rio_recTypeInfo.addField("dataType",
org.apache.hadoop.record.meta.TypeID.StringTypeID);
_rio_recTypeInfo.addField("streamName",
org.apache.hadoop.record.meta.TypeID.StringTypeID);
_rio_recTypeInfo.addField("seqId",
org.apache.hadoop.record.meta.TypeID.LongTypeID);
}
private long timePartition;
private String dataType;
private String streamName;
private long seqId;
public ChukwaArchiveKey() {
}
public ChukwaArchiveKey(final long timePartition, final String dataType,
final String streamName, final long seqId) {
this.timePartition = timePartition;
this.dataType = dataType;
this.streamName = streamName;
this.seqId = seqId;
}
public static org.apache.hadoop.record.meta.RecordTypeInfo getTypeInfo() {
return _rio_recTypeInfo;
}
public static void setTypeFilter(
org.apache.hadoop.record.meta.RecordTypeInfo rti) {
if (null == rti)
return;
_rio_rtiFilter = rti;
_rio_rtiFilterFields = null;
}
private static void setupRtiFields() {
if (null == _rio_rtiFilter)
return;
// we may already have done this
if (null != _rio_rtiFilterFields)
return;
int _rio_i, _rio_j;
_rio_rtiFilterFields = new int[_rio_rtiFilter.getFieldTypeInfos().size()];
for (_rio_i = 0; _rio_i < _rio_rtiFilterFields.length; _rio_i++) {
_rio_rtiFilterFields[_rio_i] = 0;
}
java.util.Iterator<org.apache.hadoop.record.meta.FieldTypeInfo> _rio_itFilter = _rio_rtiFilter
.getFieldTypeInfos().iterator();
_rio_i = 0;
while (_rio_itFilter.hasNext()) {
org.apache.hadoop.record.meta.FieldTypeInfo _rio_tInfoFilter = _rio_itFilter
.next();
java.util.Iterator<org.apache.hadoop.record.meta.FieldTypeInfo> _rio_it = _rio_recTypeInfo
.getFieldTypeInfos().iterator();
_rio_j = 1;
while (_rio_it.hasNext()) {
org.apache.hadoop.record.meta.FieldTypeInfo _rio_tInfo = _rio_it.next();
if (_rio_tInfo.equals(_rio_tInfoFilter)) {
_rio_rtiFilterFields[_rio_i] = _rio_j;
break;
}
_rio_j++;
}
_rio_i++;
}
}
public long getTimePartition() {
return timePartition;
}
public void setTimePartition(final long timePartition) {
this.timePartition = timePartition;
}
public String getDataType() {
return dataType;
}
public void setDataType(final String dataType) {
this.dataType = dataType;
}
public String getStreamName() {
return streamName;
}
public void setStreamName(final String streamName) {
this.streamName = streamName;
}
public long getSeqId() {
return seqId;
}
public void setSeqId(final long seqId) {
this.seqId = seqId;
}
public void serialize(final org.apache.hadoop.record.RecordOutput _rio_a,
final String _rio_tag) throws java.io.IOException {
_rio_a.startRecord(this, _rio_tag);
_rio_a.writeLong(timePartition, "timePartition");
_rio_a.writeString(dataType, "dataType");
_rio_a.writeString(streamName, "streamName");
_rio_a.writeLong(seqId, "seqId");
_rio_a.endRecord(this, _rio_tag);
}
private void deserializeWithoutFilter(
final org.apache.hadoop.record.RecordInput _rio_a, final String _rio_tag)
throws java.io.IOException {
_rio_a.startRecord(_rio_tag);
timePartition = _rio_a.readLong("timePartition");
dataType = _rio_a.readString("dataType");
streamName = _rio_a.readString("streamName");
seqId = _rio_a.readLong("seqId");
_rio_a.endRecord(_rio_tag);
}
public void deserialize(final org.apache.hadoop.record.RecordInput _rio_a,
final String _rio_tag) throws java.io.IOException {
if (null == _rio_rtiFilter) {
deserializeWithoutFilter(_rio_a, _rio_tag);
return;
}
// if we're here, we need to read based on version info
_rio_a.startRecord(_rio_tag);
setupRtiFields();
for (int _rio_i = 0; _rio_i < _rio_rtiFilter.getFieldTypeInfos().size(); _rio_i++) {
if (1 == _rio_rtiFilterFields[_rio_i]) {
timePartition = _rio_a.readLong("timePartition");
} else if (2 == _rio_rtiFilterFields[_rio_i]) {
dataType = _rio_a.readString("dataType");
} else if (3 == _rio_rtiFilterFields[_rio_i]) {
streamName = _rio_a.readString("streamName");
} else if (4 == _rio_rtiFilterFields[_rio_i]) {
seqId = _rio_a.readLong("seqId");
} else {
java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo> typeInfos = (java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo>) (_rio_rtiFilter
.getFieldTypeInfos());
org.apache.hadoop.record.meta.Utils.skip(_rio_a, typeInfos.get(_rio_i)
.getFieldID(), typeInfos.get(_rio_i).getTypeID());
}
}
_rio_a.endRecord(_rio_tag);
}
public int compareTo(final Object _rio_peer_) throws ClassCastException {
if (!(_rio_peer_ instanceof ChukwaArchiveKey)) {
throw new ClassCastException("Comparing different types of records.");
}
ChukwaArchiveKey _rio_peer = (ChukwaArchiveKey) _rio_peer_;
int _rio_ret = 0;
_rio_ret = (timePartition == _rio_peer.timePartition) ? 0
: ((timePartition < _rio_peer.timePartition) ? -1 : 1);
if (_rio_ret != 0)
return _rio_ret;
_rio_ret = dataType.compareTo(_rio_peer.dataType);
if (_rio_ret != 0)
return _rio_ret;
_rio_ret = streamName.compareTo(_rio_peer.streamName);
if (_rio_ret != 0)
return _rio_ret;
_rio_ret = (seqId == _rio_peer.seqId) ? 0 : ((seqId < _rio_peer.seqId) ? -1
: 1);
if (_rio_ret != 0)
return _rio_ret;
return _rio_ret;
}
public boolean equals(final Object _rio_peer_) {
if (!(_rio_peer_ instanceof ChukwaArchiveKey)) {
return false;
}
if (_rio_peer_ == this) {
return true;
}
ChukwaArchiveKey _rio_peer = (ChukwaArchiveKey) _rio_peer_;
boolean _rio_ret = false;
_rio_ret = (timePartition == _rio_peer.timePartition);
if (!_rio_ret)
return _rio_ret;
_rio_ret = dataType.equals(_rio_peer.dataType);
if (!_rio_ret)
return _rio_ret;
_rio_ret = streamName.equals(_rio_peer.streamName);
if (!_rio_ret)
return _rio_ret;
_rio_ret = (seqId == _rio_peer.seqId);
if (!_rio_ret)
return _rio_ret;
return _rio_ret;
}
public Object clone() throws CloneNotSupportedException {
super.clone();
ChukwaArchiveKey _rio_other = new ChukwaArchiveKey();
_rio_other.timePartition = this.timePartition;
_rio_other.dataType = this.dataType;
_rio_other.streamName = this.streamName;
_rio_other.seqId = this.seqId;
return _rio_other;
}
public int hashCode() {
int _rio_result = 17;
int _rio_ret;
_rio_ret = (int) (timePartition ^ (timePartition >>> 32));
_rio_result = 37 * _rio_result + _rio_ret;
_rio_ret = dataType.hashCode();
_rio_result = 37 * _rio_result + _rio_ret;
_rio_ret = streamName.hashCode();
_rio_result = 37 * _rio_result + _rio_ret;
_rio_ret = (int) (seqId ^ (seqId >>> 32));
_rio_result = 37 * _rio_result + _rio_ret;
return _rio_result;
}
public static String signature() {
return "LChukwaArchiveKey(lssl)";
}
public static class Comparator extends
org.apache.hadoop.record.RecordComparator {
public Comparator() {
super(ChukwaArchiveKey.class);
}
static public int slurpRaw(byte[] b, int s, int l) {
try {
int os = s;
{
long i = org.apache.hadoop.record.Utils.readVLong(b, s);
int z = org.apache.hadoop.record.Utils.getVIntSize(i);
s += z;
l -= z;
}
{
int i = org.apache.hadoop.record.Utils.readVInt(b, s);
int z = org.apache.hadoop.record.Utils.getVIntSize(i);
s += (z + i);
l -= (z + i);
}
{
int i = org.apache.hadoop.record.Utils.readVInt(b, s);
int z = org.apache.hadoop.record.Utils.getVIntSize(i);
s += (z + i);
l -= (z + i);
}
{
long i = org.apache.hadoop.record.Utils.readVLong(b, s);
int z = org.apache.hadoop.record.Utils.getVIntSize(i);
s += z;
l -= z;
}
return (os - s);
} catch (java.io.IOException e) {
throw new RuntimeException(e);
}
}
static public int compareRaw(byte[] b1, int s1, int l1, byte[] b2, int s2,
int l2) {
try {
int os1 = s1;
{
long i1 = org.apache.hadoop.record.Utils.readVLong(b1, s1);
long i2 = org.apache.hadoop.record.Utils.readVLong(b2, s2);
if (i1 != i2) {
return ((i1 - i2) < 0) ? -1 : 0;
}
int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1 += z1;
s2 += z2;
l1 -= z1;
l2 -= z2;
}
{
int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);
int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1 += z1;
s2 += z2;
l1 -= z1;
l2 -= z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1, b2,
s2, i2);
if (r1 != 0) {
return (r1 < 0) ? -1 : 0;
}
s1 += i1;
s2 += i2;
l1 -= i1;
l1 -= i2;
}
{
int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);
int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1 += z1;
s2 += z2;
l1 -= z1;
l2 -= z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1, b2,
s2, i2);
if (r1 != 0) {
return (r1 < 0) ? -1 : 0;
}
s1 += i1;
s2 += i2;
l1 -= i1;
l1 -= i2;
}
{
long i1 = org.apache.hadoop.record.Utils.readVLong(b1, s1);
long i2 = org.apache.hadoop.record.Utils.readVLong(b2, s2);
if (i1 != i2) {
return ((i1 - i2) < 0) ? -1 : 0;
}
int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1 += z1;
s2 += z2;
l1 -= z1;
l2 -= z2;
}
return (os1 - s1);
} catch (java.io.IOException e) {
throw new RuntimeException(e);
}
}
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
int ret = compareRaw(b1, s1, l1, b2, s2, l2);
return (ret == -1) ? -1 : ((ret == 0) ? 1 : 0);
}
}
static {
org.apache.hadoop.record.RecordComparator.define(ChukwaArchiveKey.class,
new Comparator());
}
}
| 8,149 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/ChunkBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa;
import java.util.*;
import org.apache.hadoop.io.DataOutputBuffer;
import java.io.*;
/**
* Right now, just handles record collection.
*
*/
public class ChunkBuilder {
ArrayList<Integer> recOffsets = new ArrayList<Integer>();
int lastRecOffset = -1;
DataOutputBuffer buf = new DataOutputBuffer();
/**
* Adds the data in rec to an internal buffer; rec can be reused immediately.
*
* @param rec is byte array of data
*/
public void addRecord(byte[] rec) {
lastRecOffset = lastRecOffset + rec.length;
recOffsets.add(lastRecOffset);
try {
buf.write(rec);
} catch (IOException e) {
throw new RuntimeException("buffer write failed. Out of memory?", e);
}
}
public Chunk getChunk() {
ChunkImpl c = new ChunkImpl();
c.setData(buf.getData());
c.setSeqID(buf.getLength());
int[] offsets = new int[recOffsets.size()];
for (int i = 0; i < offsets.length; ++i)
offsets[i] = recOffsets.get(i);
c.setRecordOffsets(offsets);
return c;
}
}
| 8,150 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/Chunk.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.chukwa.datacollection.adaptor.*;
/**
* A chunk is a sequence of bytes at a particular logical offset in a stream,
* and containing one or more "records". Chunks have various metadata, such as
* source, format, and pointers to record boundaries within the chunk.
*
*/
public interface Chunk {
// these conceptually are really network addresses
public String getSource();
public void setSource(String logSource);
/**
* Get the name of the stream that this Chunk is a chunk of
*
* @return the name of this stream; e.g. file name
*/
public String getStreamName();
public void setStreamName(String streamName);
// These describe the format of the data buffer
public String getDataType();
public void setDataType(String t);
/**
* @return the user data in the chunk
*/
public byte[] getData();
/**
* @param logEvent the user data in the chunk
*/
public void setData(byte[] logEvent);
/**
* get/set the <b>end</b> offsets of records in the buffer.
*
* We use end, rather than start offsets, since the first start offset is
* always 0, but the last end offset specifies how much of the buffer is
* valid.
*
* More precisely, offsets[i] is the offset in the Chunk of the last byte of
* record i in this chunk.
*
* @return a list of record end offsets
*/
public int[] getRecordOffsets();
public void setRecordOffsets(int[] offsets);
/**
* @return the byte offset of the first byte not in this chunk.
*
* We pick this convention so that subtracting sequence IDs yields
* length.
*
* Furthermore, seqID - length = first byte pos.
*/
public long getSeqID();
public void setSeqID(long l);
/**
* Retrieve a reference to the adaptor that sent this event. Used by
* LocalAgent and Connectors to deliver acks to the appropriate place.
* @return Adaptor
*/
public Adaptor getInitiator();
/**
* Estimate the size of this Chunk on the wire, assuming each char of metadata
* takes two bytes to serialize. This is pessimistic.
*
* @return size in bytes that this Chunk might take once serialized.
*/
public int getSerializedSizeEstimate();
/**
* @return tags.
*
*/
public String getTags();
/**
* Add tag.
* @param tag is a comma separated list
*
*/
public void addTag(String tag);
/**
* Returns the value of a single tag, assuming tags are of the form
* tagname="val"
* @param tagName the tag to return
* @return null if not matched.
*/
public String getTag(String tagName);
public void write(DataOutput data) throws IOException;
}
| 8,151 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/database/DatabaseConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.database;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import java.util.*;
import java.io.File;
import java.io.FilenameFilter;
public class DatabaseConfig {
private Configuration config = null;
public final static long CENTURY = 36500 * 24 * 60 * 60 * 1000L;
public final static long DECADE = 3650 * 24 * 60 * 60 * 1000L;
public final static long YEAR = 365 * 24 * 60 * 60 * 1000L;
public final static long QUARTER = 91250 * 24 * 60 * 60L;
public final static long MONTH = 30 * 24 * 60 * 60 * 1000L;
public final static long WEEK = 7 * 24 * 60 * 60 * 1000L;
public final static long DAY = 24 * 60 * 60 * 1000L;
public final static String MDL_XML = "mdl.xml";
public DatabaseConfig(String path) {
Path fileResource = new Path(path);
config = new Configuration();
config.addResource(fileResource);
}
public DatabaseConfig() {
String dataConfig = System.getenv("CHUKWA_CONF_DIR");
if (dataConfig == null) {
dataConfig = MDL_XML;
} else {
dataConfig += File.separator + MDL_XML;
}
Path fileResource = new Path(dataConfig);
config = new Configuration();
config.addResource(fileResource);
if (System.getenv("CHUKWA_CONF_DIR") != null) {
// Allow site-specific MDL files to be included in the
// configuration so as to keep the "main" mdl.xml pure.
File confDir = new File(System.getenv("CHUKWA_CONF_DIR"));
File[] confFiles = confDir.listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
// Implements a naming convention of ending with "mdl.xml"
// but is careful not to pick up mdl.xml itself again.
return name.endsWith(MDL_XML) && !name.equals(MDL_XML);
}
});
if (confFiles != null) {
for (File confFile : confFiles)
config.addResource(new Path(confFile.getAbsolutePath()));
}
}
}
public String get(String key) {
return config.get(key);
}
public void put(String key, String value) {
this.config.set(key, value);
}
public Iterator<?> iterator() {
return this.config.iterator();
}
public HashMap<String, String> startWith(String key) {
HashMap<String, String> transformer = new HashMap<String, String>();
Iterator<?> entries = config.iterator();
while (entries.hasNext()) {
String entry = entries.next().toString();
if (entry.startsWith(key)) {
String[] metrics = entry.split("=");
transformer.put(metrics[0], metrics[1]);
}
}
return transformer;
}
public String[] findTableName(String tableName, long start, long end) {
String[] tableNames = null;
String tableType = "_week";
long now = (new Date()).getTime();
long timeWindow = end - start;
long partitionSize = WEEK;
boolean fallback = true;
if (config.get("consolidator.table." + tableName) == null) {
tableNames = new String[1];
tableNames[0] = tableName;
return tableNames;
}
if (timeWindow <= 0) {
timeWindow = 1;
}
if (timeWindow > DECADE) {
tableType = "_century";
partitionSize = CENTURY;
} else if (timeWindow > YEAR) {
tableType = "_decade";
partitionSize = DECADE;
} else if (timeWindow > QUARTER) {
tableType = "_year";
partitionSize = YEAR;
} else if (timeWindow > MONTH) {
tableType = "_quarter";
partitionSize = QUARTER;
} else if (timeWindow > WEEK) {
tableType = "_month";
partitionSize = MONTH;
} else {
tableType = "_week";
partitionSize = WEEK;
}
long currentPartition = now / partitionSize;
long startPartition = start / partitionSize;
long endPartition = end / partitionSize;
while (fallback && partitionSize != CENTURY * 100) {
// Check if the starting date is in the far distance from current time. If
// it is, use down sampled data.
if (startPartition + 2 < currentPartition) {
fallback = true;
if (partitionSize == DAY) {
tableType = "_week";
partitionSize = WEEK;
} else if (partitionSize == WEEK) {
tableType = "_month";
partitionSize = MONTH;
} else if (partitionSize == MONTH) {
tableType = "_year";
partitionSize = YEAR;
} else if (partitionSize == YEAR) {
tableType = "_decade";
partitionSize = DECADE;
} else if (partitionSize == DECADE) {
tableType = "_century";
partitionSize = CENTURY;
} else {
partitionSize = 100 * CENTURY;
}
currentPartition = now / partitionSize;
startPartition = start / partitionSize;
endPartition = end / partitionSize;
} else {
fallback = false;
}
}
if (startPartition != endPartition) {
int delta = (int) (endPartition - startPartition);
tableNames = new String[delta + 1];
for (int i = 0; i <= delta; i++) {
long partition = startPartition + (long) i;
tableNames[i] = tableName + "_" + partition + tableType;
}
} else {
tableNames = new String[1];
tableNames[0] = tableName + "_" + startPartition + tableType;
}
return tableNames;
}
public String[] findTableNameForCharts(String tableName, long start, long end) {
String[] tableNames = null;
String tableType = "_week";
long now = (new Date()).getTime();
long timeWindow = end - start;
if (timeWindow > 60 * 60 * 1000) {
timeWindow = timeWindow + 1;
}
long partitionSize = WEEK;
boolean fallback = true;
if (config.get("consolidator.table." + tableName) == null) {
tableNames = new String[1];
tableNames[0] = tableName;
return tableNames;
}
if (timeWindow <= 0) {
timeWindow = 1;
}
if (timeWindow > DECADE) {
tableType = "_decade";
partitionSize = CENTURY;
} else if (timeWindow > YEAR) {
tableType = "_decade";
partitionSize = CENTURY;
} else if (timeWindow > QUARTER) {
tableType = "_decade";
partitionSize = DECADE;
} else if (timeWindow > MONTH) {
tableType = "_year";
partitionSize = YEAR;
} else if (timeWindow > WEEK) {
tableType = "_quarter";
partitionSize = QUARTER;
} else if (timeWindow > DAY) {
tableType = "_month";
partitionSize = MONTH;
} else {
tableType = "_week";
partitionSize = WEEK;
}
long currentPartition = now / partitionSize;
long startPartition = start / partitionSize;
long endPartition = end / partitionSize;
while (fallback && partitionSize != DECADE * 100) {
// Check if the starting date is in the far distance from current time. If
// it is, use down sampled data.
if (startPartition + 2 < currentPartition) {
fallback = true;
if (partitionSize == DAY) {
tableType = "_month";
partitionSize = MONTH;
} else if (partitionSize == WEEK) {
tableType = "_quarter";
partitionSize = QUARTER;
} else if (partitionSize == MONTH) {
tableType = "_year";
partitionSize = YEAR;
} else if (partitionSize == YEAR) {
tableType = "_decade";
partitionSize = DECADE;
} else {
partitionSize = CENTURY;
}
currentPartition = now / partitionSize;
startPartition = start / partitionSize;
endPartition = end / partitionSize;
} else {
fallback = false;
}
}
if (startPartition != endPartition) {
int delta = (int) (endPartition - startPartition);
tableNames = new String[delta + 1];
for (int i = 0; i <= delta; i++) {
long partition = startPartition + (long) i;
tableNames[i] = tableName + "_" + partition + tableType;
}
} else {
tableNames = new String[1];
tableNames[0] = tableName + "_" + startPartition + tableType;
}
return tableNames;
}
public static void main(String[] args) {
DatabaseConfig dbc = new DatabaseConfig();
String[] names = dbc.findTableName("system_metrics", 1216140020000L,
1218645620000L);
for (String n : names) {
System.out.println("name:" + n);
}
}
}
| 8,152 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/database/MetricsAggregation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.database;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public class MetricsAggregation {
private static Log log = LogFactory.getLog(MetricsAggregation.class);
private static Connection conn = null;
private static Statement stmt = null;
private static ResultSet rs = null;
private static DatabaseConfig mdlConfig;
/**
* @param args is list of command line parameters
* @throws SQLException if SQL query fails
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value =
"SQL_NONCONSTANT_STRING_PASSED_TO_EXECUTE",
justification = "Dynamic based upon tables in the database")
public static void main(String[] args) throws SQLException {
mdlConfig = new DatabaseConfig();
// Connect to the database
String jdbc_url = System.getenv("JDBC_URL_PREFIX")
+ mdlConfig.get("jdbc.host") + "/" + mdlConfig.get("jdbc.db");
if (mdlConfig.get("jdbc.user") != null) {
jdbc_url = jdbc_url + "?user=" + mdlConfig.get("jdbc.user");
if (mdlConfig.get("jdbc.password") != null) {
jdbc_url = jdbc_url + "&password=" + mdlConfig.get("jdbc.password");
}
}
try {
// The newInstance() call is a work around for some
// broken Java implementations
org.apache.hadoop.chukwa.util.DriverManagerUtil.loadDriver().newInstance();
log.info("Initialized JDBC URL: " + jdbc_url);
} catch (Exception ex) {
// handle the error
ex.printStackTrace();
log.error(ex, ex);
}
try {
conn = org.apache.hadoop.chukwa.util.DriverManagerUtil.getConnection(jdbc_url);
} catch (SQLException ex) {
ex.printStackTrace();
log.error(ex, ex);
}
// get the latest timestamp for aggregation on this table
// Start = latest
SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
long start = System.currentTimeMillis() - (1000 * 60 * 60 * 24);
long end = System.currentTimeMillis() - (1000 * 60 * 10);
// retrieve metadata for cluster_system_metrics
DatabaseConfig dbConf = new DatabaseConfig();
String[] tables = dbConf.findTableName("cluster_system_metrics_2018_week",
start, end);
for (String table : tables) {
System.out.println("Table to aggregate per Ts: " + table);
stmt = conn.createStatement();
rs = stmt
.executeQuery("select table_ts from aggregation_admin_table where table_name=\""
+ table + "\"");
if (rs.next()) {
start = rs.getLong(1);
} else {
start = 0;
}
end = start + (1000 * 60 * 60 * 1); // do 1 hour aggregation max
long now = System.currentTimeMillis();
now = now - (1000 * 60 * 10); // wait for 10 minutes
end = Math.min(now, end);
// TODO REMOVE DEBUG ONLY!
end = now;
System.out.println("Start Date:" + new Date(start));
System.out.println("End Date:" + new Date(end));
DatabaseMetaData dbm = conn.getMetaData();
rs = dbm.getColumns(null, null, table, null);
List<String> cols = new ArrayList<String>();
while (rs.next()) {
String s = rs.getString(4); // 4 is column name, 5 data type etc.
System.out.println("Name: " + s);
int type = rs.getInt(5);
if (type == java.sql.Types.VARCHAR) {
System.out.println("Type: Varchar " + type);
} else {
cols.add(s);
System.out.println("Type: Number " + type);
}
}// end of while.
// build insert into from select query
String initTable = table.replace("cluster_", "");
StringBuilder sb0 = new StringBuilder();
StringBuilder sb = new StringBuilder();
sb0.append("insert into ").append(table).append(" (");
sb.append(" ( select ");
for (int i = 0; i < cols.size(); i++) {
sb0.append(cols.get(i));
sb.append("avg(").append(cols.get(i)).append(") ");
if (i < cols.size() - 1) {
sb0.append(",");
sb.append(",");
}
}
sb.append(" from ").append(initTable);
sb.append(" where timestamp between \"");
sb.append(formatter.format(start));
sb.append("\" and \"").append(formatter.format(end));
sb.append("\" group by timestamp )");
// close fields
sb0.append(" )").append(sb);
System.out.println(sb0.toString());
// run query
conn.setAutoCommit(false);
stmt = conn.createStatement();
final String query = sb0.toString();
stmt.execute(query);
// update last run
stmt = conn.createStatement();
stmt.execute("insert into aggregation_admin_table set table_ts=\""
+ formatter.format(end) + "\" where table_name=\"" + table + "\"");
conn.commit();
}
}
}
| 8,153 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/database/DataExpiration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.database;
import java.sql.SQLException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import java.util.HashMap;
import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.chukwa.util.DatabaseWriter;
import org.apache.hadoop.chukwa.util.RegexUtil;
public class DataExpiration {
private static DatabaseConfig dbc = null;
private static Log log = LogFactory.getLog(DataExpiration.class);
public DataExpiration() {
if (dbc == null) {
dbc = new DatabaseConfig();
}
}
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value =
"SQL_NONCONSTANT_STRING_PASSED_TO_EXECUTE",
justification = "Dynamic based upon tables in the database")
public void dropTables(long start, long end) {
String cluster = System.getProperty("CLUSTER");
if (cluster == null) {
cluster = "unknown";
}
DatabaseWriter dbw = new DatabaseWriter(cluster);
try {
HashMap<String, String> dbNames = dbc.startWith("report.db.name.");
for(Entry<String, String> entry : dbNames.entrySet()) {
String tableName = entry.getValue();
if (!RegexUtil.isRegex(tableName)) {
log.warn("Skipping tableName: '" + tableName
+ "' because there was an error parsing it as a regex: "
+ RegexUtil.regexError(tableName));
return;
}
String[] tableList = dbc.findTableName(tableName, start, end);
for (String tl : tableList) {
log.debug("table name: " + tableList[0]);
try {
String[] parts = tl.split("_");
int partition = Integer.parseInt(parts[parts.length - 2]);
StringBuilder table = new StringBuilder();
for (int i = 0; i < parts.length - 2; i++) {
if (i != 0) {
table.append("_");
}
table.append(parts[i]);
}
partition = partition - 3;
if(partition>=0) {
StringBuilder dropPartition = new StringBuilder();
dropPartition.append("drop table if exists ");
dropPartition.append(table);
dropPartition.append("_");
dropPartition.append(partition);
dropPartition.append("_");
dropPartition.append(parts[parts.length - 1]);
final String query = dropPartition.toString();
dbw.execute(query);
}
} catch (NumberFormatException e) {
log
.error("Error in parsing table partition number, skipping table:"
+ tableList[0]);
} catch (ArrayIndexOutOfBoundsException e) {
log.debug("Skipping table:" + tableList[0]
+ ", because it has no partition configuration.");
}
}
}
dbw.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
public static void usage() {
System.out.println("DataExpiration usage:");
System.out
.println("java -jar chukwa-core.jar org.apache.hadoop.chukwa.DataExpiration <date> <time window size>");
System.out.println(" date format: YYYY-MM-DD");
System.out.println(" time window size: 7, 30, 91, 365");
}
public static void main(String[] args) {
DataExpiration de = new DataExpiration();
long now = (new Date()).getTime();
long start = now;
long end = now;
if (args.length == 2) {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
try {
long dataExpStart = Calendar.getInstance().getTimeInMillis();
start = sdf.parse(args[0]).getTime();
end = start + (Long.parseLong(args[1]) * 1440 * 60 * 1000L);
de.dropTables(start, end);
long dataExpEnd = Calendar.getInstance().getTimeInMillis();
log.info("DataExpiration for: "+args[0]+" "+args[1]+" finished: ("+(double) (dataExpEnd-dataExpStart)/1000+" seconds)");
} catch (ParseException e) {
usage();
}
} else {
usage();
}
}
}
| 8,154 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/database/Aggregator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.database;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.Charset;
import java.text.ParsePosition;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.chukwa.util.DatabaseWriter;
@SuppressWarnings("unused")
public class Aggregator {
private static Log log = LogFactory.getLog(Aggregator.class);
private String table = null;
private String jdbc = null;
private int[] intervals;
private long current = 0;
private DatabaseWriter db = null;
public Aggregator() {
Calendar now = Calendar.getInstance();
current = now.getTimeInMillis();
}
public static String getContents(File aFile) {
StringBuffer contents = new StringBuffer();
try {
BufferedReader input = new BufferedReader(new InputStreamReader(new FileInputStream(aFile.getAbsolutePath()), Charset.forName("UTF-8")));
try {
String line = null; // not declared within while loop
while ((line = input.readLine()) != null) {
contents.append(line);
contents.append(System.getProperty("line.separator"));
}
} finally {
input.close();
}
} catch (IOException ex) {
ex.printStackTrace();
}
return contents.toString();
}
public void process(long start, long end, String query) throws Throwable {
try {
Macro macroProcessor = new Macro(start, end, query);
query = macroProcessor.toString();
db.execute(query);
} catch(Exception e) {
log.error("Query: "+query);
throw new Exception("Aggregation failed for: "+query);
} finally {
db.close();
}
}
public void process(String query) throws Throwable {
long start = current;
long end = current;
process(current, current, query);
}
public void setWriter(DatabaseWriter dbw) {
db = dbw;
}
public static void main(String[] args) {
long startTime = 0;
long endTime = 0;
long aggregatorStart = Calendar.getInstance().getTimeInMillis();
long longest = 0;
if(args.length>=4) {
ParsePosition pp = new ParsePosition(0);
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm");
String buffer = args[0]+" "+args[1];
Date tmp = format.parse(buffer, pp);
startTime = tmp.getTime();
buffer = args[2]+" "+args[3];
pp = new ParsePosition(0);
tmp = format.parse(buffer, pp);
endTime = tmp.getTime();
}
String longQuery = null;
log.info("Aggregator started.");
String cluster = System.getProperty("CLUSTER");
if (cluster == null) {
cluster = "unknown";
}
String queries = Aggregator.getContents(new File(System
.getenv("CHUKWA_CONF_DIR")
+ File.separator + "aggregator.sql"));
String[] query = queries.split("\n");
while(startTime<=endTime) {
for (int i = 0; i < query.length; i++) {
if (query[i].indexOf("#") == 0) {
log.debug("skipping: " + query[i]);
} else if(!query[i].equals("")) {
Aggregator dba = new Aggregator();
dba.setWriter(new DatabaseWriter(cluster));
long start = Calendar.getInstance().getTimeInMillis();
try {
if(startTime!=0 && endTime!=0) {
dba.process(startTime, startTime, query[i]);
} else {
dba.process(query[i]);
}
} catch(Throwable e) {
log.error("Invalid query:"+query[i]);
}
long end = Calendar.getInstance().getTimeInMillis();
long duration = end - start;
if (duration >= longest) {
longest = duration;
longQuery = query[i];
}
}
}
startTime = startTime + 5*60000;
}
long aggregatorEnd = Calendar.getInstance().getTimeInMillis();
log.info("Longest running query: " + longQuery + " (" + (double) longest
/ 1000 + " seconds)");
log.info("Total running time: ("+(double) (aggregatorEnd-aggregatorStart)/1000+" seconds)");
log.info("Aggregator finished.");
}
}
| 8,155 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/database/TableCreator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.database;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.chukwa.util.DatabaseWriter;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.chukwa.util.RegexUtil;
public class TableCreator {
private static DatabaseConfig dbc = null;
private static Log log = LogFactory.getLog(TableCreator.class);
public TableCreator() {
if (dbc == null) {
dbc = new DatabaseConfig();
}
}
public void createTables() throws Exception {
long now = (new Date()).getTime();
createTables(now, now);
}
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value =
"SQL_NONCONSTANT_STRING_PASSED_TO_EXECUTE",
justification = "Dynamic based upon tables in the database")
public void createTables(long start, long end) throws Exception {
String cluster = System.getProperty("CLUSTER");
if (cluster == null) {
cluster = "unknown";
}
DatabaseWriter dbw = new DatabaseWriter(cluster);
HashMap<String, String> dbNames = dbc.startWith("report.db.name.");
for(Entry<String, String> entry : dbNames.entrySet()) {
String tableName = entry.getValue();
if (!RegexUtil.isRegex(tableName)) {
log.warn("Skipping tableName: '" + tableName
+ "' because there was an error parsing it as a regex: "
+ RegexUtil.regexError(tableName));
return;
}
String[] tableList = dbc.findTableName(tableName, start, end);
log.debug("table name: " + tableList[0]);
try {
String[] parts = tableList[0].split("_");
int partition = Integer.parseInt(parts[parts.length - 2]);
StringBuilder tableNameBuffer = new StringBuilder();
for (int i = 0; i < parts.length - 2; i++) {
if (i != 0) {
tableNameBuffer.append("_");
}
tableNameBuffer.append(parts[i]);
}
String table = tableNameBuffer.toString();
StringBuilder q = new StringBuilder();
q.append("show create table ");
q.append(table);
q.append("_template;");
final String query = q.toString();
ResultSet rs = dbw.query(query);
while (rs.next()) {
log.debug("table schema: " + rs.getString(2));
String tbl = rs.getString(2);
log.debug("template table name:" + table + "_template");
log.debug("replacing with table name:" + table + "_" + partition
+ "_" + parts[parts.length - 1]);
log.debug("creating table: " + tbl);
for(int i=0;i<2;i++) {
StringBuilder templateName = new StringBuilder();
templateName.append(table);
templateName.append("_template");
StringBuilder partitionName = new StringBuilder();
partitionName.append(table);
partitionName.append("_");
partitionName.append(partition);
partitionName.append("_");
partitionName.append(parts[parts.length - 1]);
tbl = tbl.replaceFirst("TABLE", "TABLE IF NOT EXISTS");
tbl = tbl.replaceFirst(templateName.toString(), partitionName.toString());
final String createTable = tbl;
dbw.execute(createTable);
partition++;
}
}
} catch (NumberFormatException e) {
log.error("Error in parsing table partition number, skipping table:"
+ tableList[0]);
} catch (ArrayIndexOutOfBoundsException e) {
log.debug("Skipping table:" + tableList[0]
+ ", because it has no partition configuration.");
} catch (SQLException e) {
throw e;
}
}
}
public static void usage() {
System.out.println("TableCreator usage:");
System.out
.println("java -jar chukwa-core.jar org.apache.hadoop.chukwa.TableCreator <date> <time window size>");
System.out.println(" date format: YYYY-MM-DD");
System.out.println(" time window size: 7, 30, 91, 365, 3650");
}
public static void main(String[] args) {
TableCreator tc = new TableCreator();
if (args.length == 2) {
try {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
long start = sdf.parse(args[0]).getTime();
long end = start + (Long.parseLong(args[1]) * 1440 * 60 * 1000L);
tc.createTables(start, end);
} catch (Exception e) {
System.out.println("Invalid date format or time window size.");
e.printStackTrace();
usage();
}
} else {
try {
tc.createTables();
} catch (Exception e) {
log.error(ExceptionUtil.getStackTrace(e));
}
}
}
}
| 8,156 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/database/Macro.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.database;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map.Entry;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.servlet.http.HttpServletRequest;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.chukwa.util.DatabaseWriter;
public class Macro {
private static Log log = LogFactory.getLog(Macro.class);
private boolean forCharting = false;
private long current = 0;
private long start = 0;
private long end = 0;
private static DatabaseConfig dbc = new DatabaseConfig();
private DatabaseWriter db = null;
private String query = null;
private HttpServletRequest request = null;
public Macro(long timestamp, String query) {
this.current = timestamp;
this.start = timestamp;
this.end = timestamp;
this.query = query;
}
public Macro(long startTime, long endTime, String query) {
this.current = endTime;
this.start = startTime;
this.end = endTime;
forCharting = true;
this.query = query;
}
public Macro(long startTime, long endTime, String query, HttpServletRequest request) {
this.request = request;
this.current = endTime;
this.start = startTime;
this.end = endTime;
forCharting = true;
this.query = query;
}
public HashMap<String,String> findMacros(String query) throws SQLException {
boolean add=false;
HashMap<String,String> macroList = new HashMap<String,String>();
String macro="";
for(int i=0;i<query.length();i++) {
if(query.charAt(i)==']') {
add=false;
if(!macroList.containsKey(macro)) {
String subString = computeMacro(macro);
macroList.put(macro,subString);
}
macro="";
}
if(add) {
macro=macro+query.charAt(i);
}
if(query.charAt(i)=='[') {
add=true;
}
}
return macroList;
}
public String computeMacro(String macro) throws SQLException {
Pattern p = Pattern.compile("past_(.*)_minutes");
Matcher matcher = p.matcher(macro);
if(macro.indexOf("avg(")==0 || macro.indexOf("group_avg(")==0 || macro.indexOf("sum(")==0) {
String meta="";
String[] table = null;
if(forCharting) {
table = dbc.findTableNameForCharts(macro.substring(macro.indexOf("(")+1,macro.indexOf(")")), start, end);
} else {
table = dbc.findTableName(macro.substring(macro.indexOf("(")+1,macro.indexOf(")")), start, end);
}
try {
String cluster = System.getProperty("CLUSTER");
if(cluster==null) {
cluster="unknown";
}
db = new DatabaseWriter(cluster);
DatabaseMetaData dbMetaData = db.getConnection().getMetaData();
ResultSet rs = dbMetaData.getColumns ( null,null,table[0], null);
boolean first=true;
while(rs.next()) {
if(!first) {
meta = meta+",";
}
String name = rs.getString(4);
int type = rs.getInt(5);
if(type==java.sql.Types.VARCHAR) {
if(macro.indexOf("group_avg(")<0) {
meta=meta+"count("+name+") as "+name;
} else {
meta=meta+name;
}
first=false;
} else if(type==java.sql.Types.DOUBLE ||
type==java.sql.Types.FLOAT ||
type==java.sql.Types.INTEGER) {
if(macro.indexOf("sum(")==0) {
meta=meta+"sum("+name+")";
} else {
meta=meta+"avg("+name+")";
}
first=false;
} else if(type==java.sql.Types.TIMESTAMP) {
meta=meta+name;
first=false;
} else {
if(macro.indexOf("sum(")==0) {
meta=meta+"SUM("+name+")";
} else {
meta=meta+"AVG("+name+")";
}
first=false;
}
}
db.close();
if(first) {
throw new SQLException("Table is undefined.");
}
} catch(SQLException ex) {
throw new SQLException("Table does not exist:"+ table[0]);
}
return meta;
} else if(macro.indexOf("now")==0) {
return DatabaseWriter.formatTimeStamp(current);
} else if(macro.intern()=="start".intern()) {
return DatabaseWriter.formatTimeStamp(start);
} else if(macro.intern()=="end".intern()) {
return DatabaseWriter.formatTimeStamp(end);
} else if(matcher.find()) {
int period = Integer.parseInt(matcher.group(1));
long timestamp = current - (current % (period*60*1000L)) - (period*60*1000L);
return DatabaseWriter.formatTimeStamp(timestamp);
} else if(macro.indexOf("past_hour")==0) {
return DatabaseWriter.formatTimeStamp(current-3600*1000L);
} else if(macro.endsWith("_week")) {
long partition = current / DatabaseConfig.WEEK;
if(partition<=0) {
partition=1;
}
String[] buffers = macro.split("_");
StringBuffer tableName = new StringBuffer();
for(int i=0;i<buffers.length-1;i++) {
tableName.append(buffers[i]);
tableName.append("_");
}
tableName.append(partition);
tableName.append("_week");
return tableName.toString();
} else if(macro.endsWith("_month")) {
long partition = current / DatabaseConfig.MONTH;
if(partition<=0) {
partition=1;
}
String[] buffers = macro.split("_");
StringBuffer tableName = new StringBuffer();
for(int i=0;i<buffers.length-1;i++) {
tableName.append(buffers[i]);
tableName.append("_");
}
tableName.append(partition);
tableName.append("_month");
return tableName.toString();
} else if(macro.endsWith("_quarter")) {
long partition = current / DatabaseConfig.QUARTER;
if(partition<=0) {
partition=1;
}
String[] buffers = macro.split("_");
StringBuffer tableName = new StringBuffer();
for(int i=0;i<buffers.length-1;i++) {
tableName.append(buffers[i]);
tableName.append("_");
}
tableName.append(partition);
tableName.append("_quarter");
return tableName.toString();
} else if(macro.endsWith("_year")) {
long partition = current / DatabaseConfig.YEAR;
if(partition<=0) {
partition=1;
}
String[] buffers = macro.split("_");
StringBuffer tableName = new StringBuffer();
for(int i=0;i<buffers.length-1;i++) {
tableName.append(buffers[i]);
tableName.append("_");
}
tableName.append(partition);
tableName.append("_year");
return tableName.toString();
} else if(macro.endsWith("_decade")) {
long partition = current / DatabaseConfig.DECADE;
if(partition<=0) {
partition=1;
}
String[] buffers = macro.split("_");
StringBuffer tableName = new StringBuffer();
for(int i=0;i<buffers.length-1;i++) {
tableName.append(buffers[i]);
tableName.append("_");
}
tableName.append(partition);
tableName.append("_decade");
return tableName.toString();
}
if(forCharting) {
if(macro.startsWith("session(") && request!=null){
String keyword = macro.substring(macro.indexOf("(")+1,macro.indexOf(")"));
String[] objects = null;
if(request.getSession().getAttribute(keyword)!=null) {
objects = ((String)request.getSession().getAttribute(keyword)).split(",");
}
StringBuffer buf = new StringBuffer();
boolean first = true;
if(objects!=null) {
for(String object : objects) {
if(!first) {
buf.append(" or ");
}
first = false;
buf.append(macro.substring(macro.indexOf("(")+1,macro.indexOf(")"))+"='"+object+"'");
}
return buf.toString();
}
return "";
} else {
String[] tableList = dbc.findTableNameForCharts(macro, start, end);
StringBuffer buf = new StringBuffer();
boolean first = true;
for(String table : tableList) {
if(!first) {
buf.append("|");
}
first = false;
buf.append(table);
}
return buf.toString();
}
}
String[] tableList = dbc.findTableName(macro,current,current);
return tableList[0];
}
public String toString() {
try {
HashMap<String, String> macroList = findMacros(query);
for(Entry<String, String> entry : macroList.entrySet()) {
String mkey = entry.getKey();
String value = entry.getValue();
if(value.contains("|")) {
StringBuffer buf = new StringBuffer();
String[] tableList = value.split("\\|");
boolean first = true;
for(String table : tableList) {
String newQuery = query.replace("["+mkey+"]", table);
if(!first) {
buf.append(" union ");
}
buf.append("(");
buf.append(newQuery);
buf.append(")");
first = false;
}
query = buf.toString();
} else {
log.debug("replacing:"+mkey+" with "+macroList.get(mkey));
query = query.replace("["+mkey+"]", macroList.get(mkey));
}
}
} catch(SQLException ex) {
log.error(query);
log.error(ex.getMessage());
}
return query;
}
}
| 8,157 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/OffsetStatsManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection;
import org.apache.log4j.Logger;
import java.util.Map;
import java.util.LinkedList;
import java.util.Date;
import java.util.concurrent.ConcurrentHashMap;
/**
* Manages stats for multiple objects of type T. T can be any class that is used
* as a key for offset statistics (i.e. Agent, Collector, etc.). A client would
* create an instance of this class and call <code>addOffsetDataPoint</code>
* repeatedly over time. Then <code>calcAverageRate</code> can be called to
* retrieve the average offset-unit per second over a given time interval.
* <P>
* For a given object T that is actively adding data points, stats are kept for
* up to 20 minutes.</p>
* <P>
* Care should be taken to always call <code>remove()</code> when old T objects
* should no longer be tracked.</p>
* @param <T> is object type
*/
public class OffsetStatsManager<T> {
protected Logger log = Logger.getLogger(getClass());
/*
* This value is how far back we keep data for. Old data is purge when new
* data is added.
*/
private static long DEFAULT_STATS_DATA_TTL = 20L * 60L * 1000L; // 20 minutes
/**
* How far back can our data be to be considered fresh enough, relative to the
* interval requests. For example if this value is 0.25 and interval requested
* is 60 seconds, our most recent data point must be no more than 15 seconds old.
*/
private static double DEFAULT_STALE_THRESHOLD = 0.25;
/**
* How far back do we need to have historical data for, relative to the
* interval requested. For example if this value is 0.25 and the interval
* requested is 60 seconds, our most oldest data point must be within 15
* seconds of the most recent data point - 60.
*/
private static double DEFAULT_AGE_THRESHOLD = 0.25;
// These can be made configurable if someone needs to do so
private long statsDataTTL = DEFAULT_STATS_DATA_TTL;
private double staleThresholdPercent = DEFAULT_STALE_THRESHOLD;
private double ageThresholdPercent = DEFAULT_AGE_THRESHOLD;
private Map<T, OffsetDataStats> offsetStatsMap =
new ConcurrentHashMap<T, OffsetDataStats>();
public OffsetStatsManager() {
this(DEFAULT_STATS_DATA_TTL);
}
public OffsetStatsManager(long statsDataTTL) {
this.statsDataTTL = statsDataTTL;
}
/**
* Record that at a given point in time an object key had a given offset.
* @param key Object to key this data point to
* @param offset How much of an offset to record
* @param timestamp The time the offset occured
*/
public void addOffsetDataPoint(T key, long offset, long timestamp) {
OffsetDataStats stats = null;
if (offsetStatsMap.get(key) == null)
offsetStatsMap.put(key, new OffsetDataStats());
stats = offsetStatsMap.get(key);
stats.add(new OffsetData(offset, timestamp));
stats.prune(statsDataTTL);
if (log.isDebugEnabled())
log.debug("Added offset - key=" + key + ", offset=" + offset +
", time=" + new Date(timestamp) + ", dataCount=" +
stats.getOffsetDataList().size());
}
public double calcAverageRate(T key, long timeIntervalSecs) {
OffsetDataStats stats = get(key);
if (stats == null) {
if (log.isDebugEnabled())
log.debug("No stats data found key=" + key);
return -1;
}
// first get the most recent data point to see if we're stale
long now = System.currentTimeMillis();
long mostRecentThreashold = now -
timeIntervalSecs * (long)(staleThresholdPercent * 1000);
OffsetData newestOffsetData = stats.mostRecentDataPoint();
if (newestOffsetData == null || newestOffsetData.olderThan(mostRecentThreashold)) {
if (log.isDebugEnabled())
log.debug("Stats data too stale for key=" + key);
return -1; // data is too stale
}
// then get the oldest data point to see if we have enough coverage
long then = newestOffsetData.getTimestamp() - timeIntervalSecs * 1000L;
long thenDelta = timeIntervalSecs * (long)(ageThresholdPercent * 1000);
OffsetData oldestOffsetData = null;
long minDiff = -1;
long lastDiff = -1;
for (OffsetData offsetData : stats.getOffsetDataList()) {
long diff = offsetData.within(then, thenDelta);
if (diff < 0) continue;
if (minDiff == -1 || minDiff < diff) {
// this is the data point closest to our target then time
minDiff = diff;
oldestOffsetData = offsetData;
}
// optimize so is we got a minDiff, but the diffs are getting worse, then
// we've found the closet point and we can move on
if (minDiff != -1 && lastDiff != -1 && diff > lastDiff) {
break;
}
lastDiff = diff;
}
if (oldestOffsetData == null) {
if (log.isDebugEnabled())
log.debug("Stats data history too short for key=" + key);
return -1;
}
return newestOffsetData.averageRate(oldestOffsetData);
}
public OffsetData oldestDataPoint(T key) {
OffsetDataStats stats = get(key);
return stats.oldestDataPoint();
}
public OffsetData mostRecentDataPoint(T key) {
OffsetDataStats stats = get(key);
return stats.mostRecentDataPoint();
}
/**
* Remove key from the set of objects that we're tracking stats for.
* @param key key of stats to be removed
*/
public void remove(T key) {
offsetStatsMap.remove(key);
}
/**
* Remove all objectst that we're tracking stats for.
*/
public void clear() {
offsetStatsMap.clear();
}
/**
* Fetch OffsetDataStats for key.
* @param key key that stats are to be returned for
*/
private OffsetDataStats get(T key) {
return offsetStatsMap.get(key);
}
public class OffsetData {
private long offset;
private long timestamp;
private OffsetData(long offset, long timestamp) {
this.offset = offset;
this.timestamp = timestamp;
}
public long getOffset() { return offset; }
public long getTimestamp() { return timestamp; }
public double averageRate(OffsetData previous) {
if (previous == null) return -1;
double elapseOffset = offset - previous.getOffset();
double elapseTime = (timestamp - previous.getTimestamp()) / 1000d;
double rate = elapseOffset / elapseTime;
return rate;
}
public boolean olderThan(long timestamp) {
return this.timestamp < timestamp;
}
public long within(long timestamp, long delta) {
long diff = Math.abs(this.timestamp - timestamp);
if (diff < delta) return diff;
return -1;
}
}
private class OffsetDataStats {
private volatile LinkedList<OffsetData> offsetDataList = new LinkedList<OffsetData>();
public LinkedList<OffsetData> getOffsetDataList() {
return offsetDataList;
}
public void add(OffsetData offsetData) {
synchronized(offsetDataList) {
offsetDataList.add(offsetData);
}
}
public OffsetData oldestDataPoint() {
synchronized(offsetDataList) {
return offsetDataList.peekFirst();
}
}
public OffsetData mostRecentDataPoint() {
synchronized(offsetDataList) {
return offsetDataList.peekLast();
}
}
public void prune(long ttl) {
long cutoff = System.currentTimeMillis() - ttl;
OffsetData data;
synchronized(offsetDataList) {
while ((data = offsetDataList.peekFirst()) != null) {
if (data.getTimestamp() > cutoff) break;
offsetDataList.removeFirst();
}
}
}
}
}
| 8,158 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/ChunkReceiver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection;
import org.apache.hadoop.chukwa.Chunk;
public interface ChunkReceiver {
/**
* Add a chunk to the queue, potentially blocking.
*
* @param event is a Chukwa Chunk
* @throws InterruptedException if thread is interrupted while blocking
*/
public void add(Chunk event) throws java.lang.InterruptedException;
}
| 8,159 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/ChunkQueue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection;
import java.util.List;
import org.apache.hadoop.chukwa.Chunk;
/**
* A generic interface for queues of Chunks.
*
* Differs from a normal queue interface primarily by having collect().
*/
public interface ChunkQueue extends ChunkReceiver {
/**
* Add a chunk to the queue, blocking if queue is full.
*
* @param chunk A binary blob
* @throws InterruptedException if thread is interrupted while blocking
*/
public void add(Chunk chunk) throws InterruptedException;
/**
* Return at least one, and no more than count, Chunks into chunks. Blocks if
* queue is empty.
* @param chunks List of binary blobs
* @param count maximum number of chunk to return
* @throws InterruptedException if thread is interrupted while collecting
*/
public void collect(List<Chunk> chunks, int count)
throws InterruptedException;
/**
* Return an approximation of the number of chunks in the queue currently. No
* guarantees are made about the accuracy of this number.
* @return number of chunks in the queue currently
*/
public int size();
}
| 8,160 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/DataFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.util.Iterator;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.apache.hadoop.chukwa.datacollection.agent.MemLimitQueue;
import org.apache.hadoop.chukwa.datacollection.sender.RetryListOfCollectors;
import org.apache.log4j.Logger;
public class DataFactory {
static Logger log = Logger.getLogger(DataFactory.class);
static final String COLLECTORS_FILENAME = "collectors";
static final String CHUNK_QUEUE = "chukwaAgent.chunk.queue";
protected static final DataFactory dataFactory = new DataFactory();
private ChunkQueue chunkQueue = null;
private String defaultTags = "";
private DataFactory() {
}
public static DataFactory getInstance() {
return dataFactory;
}
public synchronized ChunkQueue getEventQueue() {
if (chunkQueue == null) {
chunkQueue = createEventQueue();
}
return chunkQueue;
}
public void put(Chunk c) throws InterruptedException {
chunkQueue.add(c);
}
public synchronized ChunkQueue createEventQueue() {
Configuration conf = ChukwaAgent.getStaticConfiguration();
if(conf == null){
//Must be a unit test, use default queue with default configuration
return new MemLimitQueue(null);
}
String receiver = conf.get(CHUNK_QUEUE);
ChunkQueue queue = null;
if(receiver == null){
log.warn("Empty configuration for " + CHUNK_QUEUE + ". Defaulting to MemLimitQueue");
queue = new MemLimitQueue(conf);
return queue;
}
try {
Class<?> clazz = Class.forName(receiver);
log.info(clazz);
if(!ChunkQueue.class.isAssignableFrom(clazz)){
throw new Exception(receiver + " is not an instance of ChunkQueue");
}
try {
Constructor<?> ctor = clazz.getConstructor(new Class[]{Configuration.class});
queue = (ChunkQueue) ctor.newInstance(conf);
} catch(NoSuchMethodException nsme){
//Queue implementations which take no configuration parameter
queue = (ChunkQueue) clazz.newInstance();
}
} catch(Exception e) {
log.error("Could not instantiate configured ChunkQueue due to: " + e);
log.error("Defaulting to MemLimitQueue");
queue = new MemLimitQueue(conf);
}
return queue;
}
public String getDefaultTags() {
return defaultTags;
}
public void setDefaultTags(String tags) {
defaultTags = tags;
}
public void addDefaultTag(String tag) {
this.defaultTags += " " + tag.trim();
}
/**
* @param conf is Chukwa configuration
* @param filename is collector list
* @return empty list if file does not exist
* @throws IOException on other error
*/
public Iterator<String> getCollectorURLs(Configuration conf, String filename) throws IOException {
String chukwaHome = System.getenv("CHUKWA_HOME");
if (chukwaHome == null) {
chukwaHome = ".";
}
if (!chukwaHome.endsWith("/")) {
chukwaHome = chukwaHome + File.separator;
}
log.info("Config - System.getenv(\"CHUKWA_HOME\"): [" + chukwaHome + "]");
String chukwaConf = System.getenv("CHUKWA_CONF_DIR");
if (chukwaConf == null) {
chukwaConf = chukwaHome + "conf" + File.separator;
}
log.info("Config - System.getenv(\"chukwaConf\"): [" + chukwaConf + "]");
log.info("setting up collectors file: " + chukwaConf + File.separator
+ COLLECTORS_FILENAME);
File collectors = new File(chukwaConf + File.separator + filename);
try {
return new RetryListOfCollectors(collectors, conf);
} catch (java.io.IOException e) {
log.error("failed to read collectors file: ", e);
throw e;
}
}
public Iterator<String> getCollectorURLs(Configuration conf) throws IOException {
return getCollectorURLs(conf, "collectors");
}
}
| 8,161 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/test/ConsoleOutConnector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.test;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.*;
import org.apache.hadoop.chukwa.datacollection.agent.*;
import org.apache.hadoop.chukwa.datacollection.connector.Connector;
import java.nio.charset.Charset;
import java.util.*;
/**
* Output events to stdout. Intended for debugging use.
*
*/
public class ConsoleOutConnector extends Thread implements Connector {
final ChukwaAgent agent;
volatile boolean shutdown;
final boolean silent;
public ConsoleOutConnector(ChukwaAgent a) {
this(a, false);
}
public ConsoleOutConnector(ChukwaAgent a, boolean silent) {
agent = a;
this.silent = silent;
}
public void run() {
try {
System.out.println("console connector started");
ChunkQueue eventQueue = DataFactory.getInstance().getEventQueue();
if (!silent)
System.out.println("-------------------");
while (!shutdown) {
List<Chunk> evts = new ArrayList<Chunk>();
eventQueue.collect(evts, 1);
for (Chunk e : evts) {
if (!silent) {
System.out.println("Console out connector got event at offset "
+ e.getSeqID());
System.out.println("data type was " + e.getDataType());
if (e.getData().length > 1000)
System.out.println("data length was " + e.getData().length
+ ", not printing");
else
System.out.println(new String(e.getData(), Charset.forName("UTF-8")));
}
agent.reportCommit(e.getInitiator(), e.getSeqID());
if (!silent)
System.out.println("-------------------");
}
}
} catch (InterruptedException e) {
} // thread is about to exit anyway
}
public void shutdown() {
shutdown = true;
this.interrupt();
}
@Override
public void reloadConfiguration() {
System.out.println("reloadConfiguration");
}
}
| 8,162 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/test/FilePerPostWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.test;
import java.io.IOException;
import java.net.URI;
import java.util.List;
import java.util.Timer;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.datacollection.writer.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
/**
* A writer that writes a file for each post. Intended ONLY for architectural
* performance comparisons. Do not use this in production.
*
*/
public class FilePerPostWriter extends SeqFileWriter {
String baseName;
AtomicLong counter = new AtomicLong(0);
@Override
public synchronized CommitStatus add(List<Chunk> chunks) throws WriterException {
try {
String newName = baseName +"_" +counter.incrementAndGet();
Path newOutputPath = new Path(newName + ".done");
FSDataOutputStream currentOutputStr = fs.create(newOutputPath);
currentPath = newOutputPath;
currentFileName = newName;
// Uncompressed for now
SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(conf, currentOutputStr,
ChukwaArchiveKey.class, ChunkImpl.class,
SequenceFile.CompressionType.NONE, null);
ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
if (System.currentTimeMillis() >= nextTimePeriodComputation) {
computeTimePeriod();
}
for (Chunk chunk : chunks) {
archiveKey.setTimePartition(timePeriod);
archiveKey.setDataType(chunk.getDataType());
archiveKey.setStreamName(chunk.getTags() + "/" + chunk.getSource()
+ "/" + chunk.getStreamName());
archiveKey.setSeqId(chunk.getSeqID());
// compute size for stats
dataSize += chunk.getData().length;
bytesThisRotate += chunk.getData().length;
seqFileWriter.append(archiveKey, chunk);
}
seqFileWriter.close();
currentOutputStr.close();
} catch(IOException e) {
throw new WriterException(e);
}
return COMMIT_OK;
}
@Override
public void close() {
}
@Override
public void init(Configuration conf) throws WriterException {
try {
this.conf = conf;
outputDir = conf.get(SeqFileWriter.OUTPUT_DIR_OPT, "/chukwa");
baseName = outputDir + "/"+System.currentTimeMillis()+ "_" + localHostAddr.hashCode();
String fsname = conf.get("writer.hdfs.filesystem");
if (fsname == null || fsname.equals("")) {
// otherwise try to get the filesystem from hadoop
fsname = conf.get("fs.defaultFS");
}
fs = FileSystem.get(new URI(fsname), conf);
isRunning = true;
statTimer = new Timer();
statTimer.schedule(new StatReportingTask(), 1000,
STAT_INTERVAL_SECONDS * 1000);
nextTimePeriodComputation = 0;
} catch(Exception e) {
throw new WriterException(e);
}
}
protected String getCurrentFileName() {
return currentFileName;
}
protected Path getCurrentPath() {
return currentPath;
}
}
| 8,163 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/test/SinkFileValidator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.test;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.Charset;
import org.apache.hadoop.chukwa.ChukwaArchiveKey;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Writable;
public class SinkFileValidator {
public static void main(String[] args) {
String fsURL = "hdfs://localhost:9000";
String fname;
if (args.length < 1) {
System.out
.println("usage: SinkFileValidator <filename> [filesystem URI] ");
return;
}
fname = args[0];
if (args.length > 1)
fsURL = args[1];
Configuration conf = new Configuration();
try {
FileSystem fs;
if (fsURL.equals("local"))
fs = FileSystem.getLocal(conf);
else
fs = FileSystem.get(new URI(fsURL), conf);
SequenceFile.Reader r = new SequenceFile.Reader(fs, new Path(fname), conf);
System.out.println("key class name is " + r.getKeyClassName());
System.out.println("value class name is " + r.getValueClassName());
ChukwaArchiveKey key = new ChukwaArchiveKey();
ChunkImpl evt = ChunkImpl.getBlankChunk();
int events = 0;
while (r.next(key, evt) && (events < 5)) {
if (!Writable.class.isAssignableFrom(key.getClass()))
System.out.println("warning: keys aren't writable");
if (!Writable.class.isAssignableFrom(evt.getClass()))
System.out.println("warning: values aren't writable");
if (evt.getData().length > 1000) {
System.out.println("got event; data: "
+ new String(evt.getData(), 0, 1000, Charset.forName("UTF-8")));
System.out.println("....[truncating]");
} else
System.out.println("got event; data: " + new String(evt.getData(), Charset.forName("UTF-8")));
events++;
}
System.out.println("file looks OK!");
} catch (IOException e) {
e.printStackTrace();
} catch (URISyntaxException e) {
e.printStackTrace();
}
}
}
| 8,164 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/test/FileTailerStressTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.test;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.apache.hadoop.chukwa.datacollection.collector.servlet.ServletCollector;
import org.apache.hadoop.chukwa.datacollection.connector.http.HttpConnector;
import org.apache.hadoop.chukwa.datacollection.controller.ChukwaAgentController;
import org.apache.hadoop.chukwa.datacollection.writer.ConsoleWriter;
import org.apache.hadoop.chukwa.datacollection.writer.SeqFileWriter;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.ServletHolder;
import java.io.*;
import java.util.*;
public class FileTailerStressTest {
static final int DELAY_MIN = 10 * 1000;
static final int DELAY_RANGE = 2 * 1000;
static final Logger log = Logger.getLogger(FileTailerStressTest.class);
static class OccasionalWriterThread extends Thread {
File file;
OccasionalWriterThread(File f) {
file = f;
}
public void run() {
PrintWriter out = null;
try {
out = new PrintWriter(file.getAbsolutePath(), "UTF-8");
Random rand = new Random();
while (true) {
int delay = rand.nextInt(DELAY_RANGE) + DELAY_MIN;
Thread.sleep(delay);
Date d = new Date();
out.println("some test data written at " + d.toString());
out.flush();
}
} catch (IOException e) {
e.printStackTrace();
} catch (InterruptedException e) {
if(out != null) {
out.close();
}
}
}
}
static int FILES_TO_USE = 100;
/**
* @param args is command line parameters
*/
public static void main(String[] args) {
try {
Server server = new Server(9990);
Context root = new Context(server, "/", Context.SESSIONS);
Configuration conf = new Configuration();
ServletCollector collector = new ServletCollector(conf);
collector.setWriter(new ConsoleWriter(true));
root.addServlet(new ServletHolder(collector), "/*");
server.start();
server.setStopAtShutdown(false);
Thread.sleep(1000);
ChukwaAgent agent = ChukwaAgent.getAgent();
HttpConnector connector = new HttpConnector(agent,
"http://localhost:9990/chukwa");
connector.start();
ChukwaConfiguration cc = new ChukwaConfiguration();
int portno = cc.getInt("chukwaAgent.control.port", 9093);
ChukwaAgentController cli = new ChukwaAgentController("localhost", portno);
File workdir = new File("/tmp/stresstest/");
if(!workdir.mkdir()) {
log.warn("Error creating working directory:" + workdir.getAbsolutePath());
}
for (int i = 0; i < FILES_TO_USE; ++i) {
File newTestF = new File("/tmp/stresstest/" + i);
newTestF.deleteOnExit();
(new OccasionalWriterThread(newTestF)).start();
cli.addFile("test-lines", newTestF.getAbsolutePath());
}
Thread.sleep(60 * 1000);
System.out.println("cleaning up");
if(!workdir.delete()) {
log.warn("Error clean up working directory:" + workdir.getAbsolutePath());
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
| 8,165 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/sender/AsyncAckSender.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.sender;
import java.io.IOException;
import org.apache.hadoop.chukwa.datacollection.DataFactory;
import org.apache.hadoop.chukwa.datacollection.agent.*;
import org.apache.hadoop.chukwa.datacollection.adaptor.Adaptor;
import org.apache.hadoop.chukwa.datacollection.collector.servlet.CommitCheckServlet;
import org.apache.hadoop.chukwa.datacollection.collector.servlet.ServletCollector;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.*;
import org.apache.commons.httpclient.methods.GetMethod;
import org.apache.commons.httpclient.methods.PostMethod;
//import com.google.common.collect.SortedSetMultimap;
//import com.google.common.collect.TreeMultimap;
import org.apache.log4j.Logger;
/**
* An enhancement to ChukwaHttpSender that handles asynchronous acknowledgment.
*
* This class will periodically poll the collectors to find out how much data
* has been committed to HDFS, and will then pass those acks on to the Agent.
*/
public class AsyncAckSender extends ChukwaHttpSender{
protected final static Logger log = Logger.getLogger(AsyncAckSender.class);
/*
* Represents the state required for an asynchronous ack.
*
* Supplements CommitListEntry with a filename and offset;
* the data commits when that file reaches that length.
*/
public static class DelayedCommit extends CommitListEntry implements Comparable<DelayedCommit> {
final String fname;
long fOffset;
final String aName;
public DelayedCommit(Adaptor a, long uuid, long len, String fname,
long offset, String aName) {
super(a, uuid, len);
this.fname = fname;
this.fOffset = offset;
this.aName = aName;
}
@Override
public int hashCode() {
return super.hashCode() ^ fname.hashCode() ^ (int)(fOffset) ^ (int) (fOffset >> 32);
}
//sort by adaptor name first, then by start offset
//note that returning 1 means this is "greater" than RHS
public int compareTo(DelayedCommit o) {
int c = o.aName.compareTo(this.aName);
if(c != 0)
return c;
c = o.fname.compareTo(this.fname);
if(c != 0)
return c;
if(o.start < start)
return 1;
else if(o.start > start)
return -1;
else return 0;
}
@Override
public boolean equals(Object o) {
if(!(o instanceof DelayedCommit)) {
return false;
}
DelayedCommit dc = (DelayedCommit) o;
if(this.aName.equals(dc.aName)) {
return true;
}
return false;
}
public String toString() {
return adaptor +" commits from" + start + " to " + uuid + " when " + fname + " hits " + fOffset;
}
}
public static final String POLLPERIOD_OPT = "connector.commitpoll.period";
public static final String POLLHOSTS_OPT = "connector.commitpoll.hostfile";
final ChukwaAgent agent;
/*
* The list of commits that we're expecting.
* This is the structure used to pass the list to the CommitPollThread.
* Adjacent commits to the same file will be coalesced.
*
*/
final List<DelayedCommit> mergedList;
/**
* Periodically scans a subset of the collectors, looking for committed files.
* This way, not every collector is pestering the namenode with periodic lses.
*/
final class CommitPollThread extends Thread {
private ChukwaHttpSender scanPath;
private int pollPeriod = 1000 * 30;
private final Map<String, PriorityQueue<DelayedCommit>> pendingCommits;
CommitPollThread(Configuration conf, Iterator<String> tryList) {
pollPeriod = conf.getInt(POLLPERIOD_OPT, pollPeriod);
scanPath = new ChukwaHttpSender(conf);
scanPath.setCollectors(tryList);
pendingCommits = new HashMap<String, PriorityQueue<DelayedCommit>>();
}
private volatile boolean running = true;
public void shutdown() {
running = false;
this.interrupt();
}
public void run() {
try {
while(running) {
Thread.sleep(pollPeriod);
//update table using list of pending delayed commits, in this thread
checkForCommits();
mergePendingTable();
}
} catch(InterruptedException e) {}
catch(IOException e) {
log.error(e);
}
}
/*
* Note that this method is NOT threadsafe, and should only be called
* from the same thread that will later check for commits
*/
private void mergePendingTable() {
synchronized(mergedList) {
for(DelayedCommit dc:mergedList) {
PriorityQueue<DelayedCommit> pendList = pendingCommits.get(dc.fname);
if(pendList == null) {
pendList = new PriorityQueue<DelayedCommit>();
pendingCommits.put(dc.fname, pendList);
}
pendList.add(dc);
}
mergedList.clear();
} //end synchronized
}
Pattern respLine = Pattern.compile("<li>(.*) ([0-9]+)</li>");
private void checkForCommits() throws IOException, InterruptedException {
log.info("checking for commited chunks");
GetMethod method = new GetMethod();
List<String> parsedFStatuses = scanPath.reliablySend(method, CommitCheckServlet.DEFAULT_PATH);
//do an http get
for(String stat: parsedFStatuses) {
Matcher m = respLine.matcher(stat);
if(!m.matches())
continue;
String path = m.group(1);
Long committedOffset = Long.parseLong(m.group(2));
PriorityQueue<DelayedCommit> delayedOnFile = pendingCommits.get(path);
if(delayedOnFile == null)
continue;
HashSet<Adaptor> committed = new HashSet<Adaptor>();
while(!delayedOnFile.isEmpty()) {
DelayedCommit fired = delayedOnFile.element();
if(fired.fOffset > committedOffset)
break;
else {
ChukwaAgent.Offset o = agent.offset(fired.adaptor);
if(o != null && fired.start > o.offset()) {
log.error("can't commit "+ o.adaptorID() + " without ordering assumption");
break; //don't commit
}
delayedOnFile.remove();
String s = agent.reportCommit(fired.adaptor, fired.uuid);
committed.add(fired.adaptor);
//TODO: if s == null, then the adaptor has been stopped.
//should we stop sending acks?
log.info("COMMIT to "+ committedOffset+ " on "+ path+ ", updating " +s);
}
}
adaptorReset.reportCommits(committed);
}
}
}
CommitPollThread pollThread;
//note that at present we don't actually run this thread; we just use its methods.
public AdaptorResetThread adaptorReset;
Configuration conf;
public AsyncAckSender(Configuration conf, ChukwaAgent a) throws IOException {
super(conf);
log.info("delayed-commit processing enabled");
agent = a;
mergedList = new ArrayList<DelayedCommit>();
this.conf = conf;
adaptorReset = new AdaptorResetThread(conf, a);
adaptorReset.start();
//initialize the commitpoll later, once we have the list of collectors
}
@Override
public void setCollectors(Iterator<String> collectors) {
Iterator<String> tryList = null;
String scanHostsFilename = conf.get(POLLHOSTS_OPT, "collectors");
try {
tryList = DataFactory.getInstance().getCollectorURLs(conf, scanHostsFilename);
} catch(IOException e) {
log.warn("couldn't read " + scanHostsFilename+ " falling back on collectors list");
}
if(collectors instanceof RetryListOfCollectors) {
super.setCollectors(collectors);
if(tryList == null)
tryList = ((RetryListOfCollectors) collectors).clone();
}
else {
ArrayList<String> l = new ArrayList<String>();
while(collectors.hasNext())
l.add(collectors.next());
super.setCollectors(l.iterator());
if(tryList == null)
tryList = l.iterator();
}
pollThread = new CommitPollThread(conf, tryList);
pollThread.setDaemon(true);
pollThread.start();
}
/*
* This method is the interface from AsyncAckSender to the CommitPollThread --
* it gets a lock on the merge table, and then updates it with a batch of pending acks
*
* This method is called from the thread doing a post; the merge table is
* read by the CommitPollThread when it figures out what commits are expected.
*/
private void delayCommits(List<DelayedCommit> delayed) {
Collections.sort(delayed);
synchronized(mergedList) {
DelayedCommit region =null;
for(DelayedCommit cur: delayed) {
if(region == null)
region = cur;
else if((cur.adaptor == region.adaptor) &&
cur.fname.equals(region.fname) && (cur.start <= region.uuid)) {
//since the list is sorted, region.start < cur.start
region.uuid = Math.max(region.uuid, cur.uuid); //merge
region.fOffset = Math.max(region.fOffset, cur.fOffset);
} else {
mergedList.add(region);
region= cur;
}
}
mergedList.add(region);
}
}
Pattern partialCommitPat = Pattern.compile("(.*) ([0-9]+)");
@Override
public List<CommitListEntry> postAndParseResponse(PostMethod method,
List<CommitListEntry> expectedCommitResults)
throws IOException, InterruptedException {
adaptorReset.reportPending(expectedCommitResults);
List<String> resp = reliablySend(method, ServletCollector.PATH);
//expect most of 'em to be delayed
List<DelayedCommit> toDelay = new ArrayList<DelayedCommit>(resp.size());
ArrayList<CommitListEntry> result = new ArrayList<CommitListEntry>();
for(int i = 0; i < resp.size(); ++i) {
if(resp.get(i).startsWith(ServletCollector.ACK_PREFIX))
result.add(expectedCommitResults.get(i));
else {
CommitListEntry cle = expectedCommitResults.get(i);
Matcher m = partialCommitPat.matcher(resp.get(i));
if(!m.matches())
log.warn("unexpected response: "+ resp.get(i));
else
log.info("waiting for " + m.group(1) + " to hit " + m.group(2) +
" before committing "+ agent.getAdaptorName(cle.adaptor));
String name = agent.getAdaptorName(cle.adaptor);
if(name != null)//null name implies adaptor no longer running
toDelay.add(new DelayedCommit(cle.adaptor, cle.uuid, cle.start, m.group(1),
Long.parseLong(m.group(2)), name));
}
}
delayCommits(toDelay);
return result;
}
@Override
protected boolean failedCollector(String downed) {
log.info("collector "+ downed + " down; resetting adaptors");
adaptorReset.resetTimedOutAdaptors(0); //reset all adaptors with outstanding data.
return false;
}
@Override
public void stop() {
pollThread.shutdown();
}
}
| 8,166 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/sender/ChukwaSender.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.sender;
/**
* Encapsulates all of the communication overhead needed for chunks to be delivered
* to a collector.
*/
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.sender.ChukwaHttpSender.CommitListEntry;
public interface ChukwaSender {
/**
*
* @param chunksToSend a list of chunks to commit
* @return the list of committed chunks
* @throws InterruptedException if interrupted while trying to send
* @throws java.io.IOException when writing fails
*/
public List<CommitListEntry> send(List<Chunk> chunksToSend)
throws InterruptedException, java.io.IOException;
public void setCollectors(Iterator<String> collectors);
public void stop();
}
| 8,167 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/sender/ChukwaHttpSender.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.sender;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.commons.httpclient.HttpClient;
import org.apache.commons.httpclient.HttpException;
import org.apache.commons.httpclient.HttpMethod;
import org.apache.commons.httpclient.HttpMethodBase;
import org.apache.commons.httpclient.HttpMethodRetryHandler;
import org.apache.commons.httpclient.HttpStatus;
import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager;
import org.apache.commons.httpclient.methods.PostMethod;
import org.apache.commons.httpclient.methods.RequestEntity;
import org.apache.commons.httpclient.params.HttpMethodParams;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.adaptor.Adaptor;
import org.apache.hadoop.chukwa.datacollection.sender.metrics.HttpSenderMetrics;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.log4j.Logger;
/**
* Encapsulates all of the http setup and connection details needed for chunks
* to be delivered to a collector.
*
* This class should encapsulate the details of the low level data formatting.
* The Connector is responsible for picking what to send and to whom;
* retry policy is encoded in the collectors iterator.
*
* This class is not thread safe. Synchronization is the caller's responsibility.
*
* <p>
* On error, tries the list of available collectors, pauses for a minute, and
* then repeats.
* </p>
* <p>
* Will wait forever for collectors to come up.
* </p>
*/
public class ChukwaHttpSender implements ChukwaSender {
final int MAX_RETRIES_PER_COLLECTOR; // fast retries, in http client
final int SENDER_RETRIES;
final int WAIT_FOR_COLLECTOR_REBOOT;
final int COLLECTOR_TIMEOUT;
public static final String COLLECTOR_TIMEOUT_OPT = "chukwaAgent.sender.collectorTimeout";
// FIXME: this should really correspond to the timer in RetryListOfCollectors
static final HttpSenderMetrics metrics = new HttpSenderMetrics("chukwaAgent", "httpSender");
static Logger log = Logger.getLogger(ChukwaHttpSender.class);
static HttpClient client = null;
static MultiThreadedHttpConnectionManager connectionManager = null;
String currCollector = null;
int postID = 0;
protected Iterator<String> collectors;
boolean COMPRESS;
String CODEC_NAME;
CompressionCodec codec;
static {
connectionManager = new MultiThreadedHttpConnectionManager();
client = new HttpClient(connectionManager);
connectionManager.closeIdleConnections(1000);
}
public static class CommitListEntry {
public Adaptor adaptor;
public long uuid;
public long start; //how many bytes of stream
public CommitListEntry(Adaptor a, long uuid, long start) {
adaptor = a;
this.uuid = uuid;
this.start = start;
}
}
// FIXME: probably we're better off with an EventListRequestEntity
static class BuffersRequestEntity implements RequestEntity {
List<DataOutputBuffer> buffers;
boolean compress;
CompressionCodec codec;
public BuffersRequestEntity(List<DataOutputBuffer> buf, boolean compress, CompressionCodec codec) {
buffers = buf;
this.compress = compress;
this.codec = codec;
}
private long getUncompressedContentLenght(){
long len = 4;// first we send post length, then buffers
for (DataOutputBuffer b : buffers)
len += b.getLength();
return len;
}
public long getContentLength() {
if(compress) {
return -1;
}
else {
return getUncompressedContentLenght();
}
}
public String getContentType() {
return "application/octet-stream";
}
public boolean isRepeatable() {
return true;
}
private void doWriteRequest( DataOutputStream out ) throws IOException {
out.writeInt(buffers.size());
for (DataOutputBuffer b : buffers)
out.write(b.getData(), 0, b.getLength());
}
public void writeRequest(OutputStream out) throws IOException {
if(compress) {
CompressionOutputStream cos = codec.createOutputStream(out);
DataOutputStream dos = new DataOutputStream( cos);
doWriteRequest( dos);
cos.finish();
}
else {
DataOutputStream dos = new DataOutputStream( out);
doWriteRequest( dos);
}
}
}
public ChukwaHttpSender(Configuration c) {
// setup default collector
ArrayList<String> tmp = new ArrayList<String>();
this.collectors = tmp.iterator();
MAX_RETRIES_PER_COLLECTOR = c.getInt("chukwaAgent.sender.fastRetries", 4);
SENDER_RETRIES = c.getInt("chukwaAgent.sender.retries", 144000);
WAIT_FOR_COLLECTOR_REBOOT = c.getInt("chukwaAgent.sender.retryInterval",
20 * 1000);
COLLECTOR_TIMEOUT = c.getInt(COLLECTOR_TIMEOUT_OPT, 30*1000);
COMPRESS = c.getBoolean("chukwaAgent.output.compress", false);
if( COMPRESS) {
CODEC_NAME = c.get( "chukwaAgent.output.compression.type", "org.apache.hadoop.io.compress.DefaultCodec");
Class<?> codecClass = null;
try {
codecClass = Class.forName( CODEC_NAME);
codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, c);
log.info("codec " + CODEC_NAME + " loaded for network compression");
} catch (ClassNotFoundException e) {
log.warn("failed to create codec " + CODEC_NAME + ". Network compression won't be enabled.", e);
COMPRESS = false;
}
}
}
/**
* Set up a list of connectors for this client to send {@link Chunk}s to
*
* @param collectors is a list of collectors
*/
public void setCollectors(Iterator<String> collectors) {
this.collectors = collectors;
// setup a new destination from our list of collectors if one hasn't been
// set up
if (currCollector == null) {
if (collectors.hasNext()) {
currCollector = collectors.next();
} else
log.error("No collectors to try in send(), won't even try to do doPost()");
}
}
/**
* grab all of the chunks currently in the chunkQueue, stores a copy of them
* locally, calculates their size, sets them up
*
* @return array of chunk id's which were ACKed by collector
*/
@Override
public List<CommitListEntry> send(List<Chunk> toSend)
throws InterruptedException, IOException {
List<DataOutputBuffer> serializedEvents = new ArrayList<DataOutputBuffer>();
List<CommitListEntry> commitResults = new ArrayList<CommitListEntry>();
int thisPost = postID++;
int toSendSize = toSend.size();
log.info("collected " + toSendSize + " chunks for post_"+thisPost);
// Serialize each chunk in turn into it's own DataOutputBuffer and add that
// buffer to serializedEvents
for (Chunk c : toSend) {
DataOutputBuffer b = new DataOutputBuffer(c.getSerializedSizeEstimate());
try {
c.write(b);
} catch (IOException err) {
log.error("serialization threw IOException", err);
}
serializedEvents.add(b);
// store a CLE for this chunk which we will use to ack this chunk to the
// caller of send()
// (e.g. the agent will use the list of CLE's for checkpointing)
log.info("chunk seqID:"+c.getSeqID());
commitResults.add(new CommitListEntry(c.getInitiator(), c.getSeqID(),
c.getSeqID() - c.getData().length));
}
toSend.clear();
// collect all serialized chunks into a single buffer to send
RequestEntity postData = new BuffersRequestEntity(serializedEvents, COMPRESS, codec);
PostMethod method = new PostMethod();
method.setRequestEntity(postData);
StringBuilder sb = new StringBuilder( ">>>>>> HTTP post_");
sb.append( thisPost).append( " to ").append( currCollector).append( " length = ");
if( COMPRESS) {
sb.append( ((BuffersRequestEntity)postData).getUncompressedContentLenght())
.append( " of uncompressed data");
}
else {
sb.append( postData.getContentLength());
}
log.info( sb);
List<CommitListEntry> results = postAndParseResponse(method, commitResults);
log.info("post_" + thisPost + " sent " + toSendSize + " chunks, got back " + results.size() + " acks");
return results;
}
/**
*
* @param method the data to push
* @param expectedCommitResults the list
* @return the list of committed chunks
* @throws IOException if error writing
* @throws InterruptedException if shutdown has been initiated
*/
public List<CommitListEntry> postAndParseResponse(PostMethod method,
List<CommitListEntry> expectedCommitResults)
throws IOException, InterruptedException{
reliablySend(method, "chukwa"); //FIXME: shouldn't need to hardcode this here
return expectedCommitResults;
}
/**
* Responsible for executing the supplied method on at least one collector
* @param method is HTTP method
* @return the list of commited status
* @throws InterruptedException if shutdown has been initiated
* @throws IOException if no collector responds with an OK
*/
protected List<String> reliablySend(HttpMethodBase method, String pathSuffix) throws InterruptedException, IOException {
int retries = SENDER_RETRIES;
while (currCollector != null) {
// need to pick a destination here
try {
// send it across the network
List<String> responses = doRequest(method, currCollector+ pathSuffix);
retries = SENDER_RETRIES; // reset count on success
return responses;
} catch (Throwable e) {
log.error("Http post exception on "+ currCollector +": "+ e.toString());
log.debug("Http post exception on "+ currCollector, e);
ChukwaHttpSender.metrics.httpThrowable.inc();
if (collectors.hasNext()) {
ChukwaHttpSender.metrics.collectorRollover.inc();
boolean repeatPost = failedCollector(currCollector);
currCollector = collectors.next();
if(repeatPost)
log.info("Found a new collector to roll over to, retrying HTTP Post to collector "
+ currCollector);
else {
log.info("Using " + currCollector + " in the future, but not retrying this post");
break;
}
} else {
if (retries > 0) {
log.warn("No more collectors to try rolling over to; waiting "
+ WAIT_FOR_COLLECTOR_REBOOT + " ms (" + retries
+ " retries left)");
Thread.sleep(WAIT_FOR_COLLECTOR_REBOOT);
retries--;
} else {
log.error("No more collectors to try rolling over to; aborting post");
throw new IOException("no collectors");
}
}
} finally {
// be sure the connection is released back to the connection manager
method.releaseConnection();
}
} // end retry loop
return new ArrayList<String>();
}
/**
* A hook for taking action when a collector is declared failed.
* Returns whether to retry current post, or junk it
* @param downCollector
*/
protected boolean failedCollector(String downCollector) {
log.debug("declaring "+ downCollector + " down");
return true;
}
/**
* Responsible for performing a single operation to a specified collector URL.
*
* @param dest the URL being requested. (Including hostname)
*/
protected List<String> doRequest(HttpMethodBase method, String dest)
throws IOException, HttpException {
HttpMethodParams pars = method.getParams();
pars.setParameter(HttpMethodParams.RETRY_HANDLER,
(Object) new HttpMethodRetryHandler() {
public boolean retryMethod(HttpMethod m, IOException e, int exec) {
return !(e instanceof java.net.ConnectException)
&& (exec < MAX_RETRIES_PER_COLLECTOR);
}
});
pars.setParameter(HttpMethodParams.SO_TIMEOUT, Integer.valueOf(COLLECTOR_TIMEOUT));
method.setParams(pars);
method.setPath(dest);
// Send POST request
ChukwaHttpSender.metrics.httpPost.inc();
int statusCode = client.executeMethod(method);
if (statusCode != HttpStatus.SC_OK) {
ChukwaHttpSender.metrics.httpException.inc();
if (statusCode == HttpStatus.SC_REQUEST_TIMEOUT ) {
ChukwaHttpSender.metrics.httpTimeOutException.inc();
}
log.error(">>>>>> HTTP response from " + dest + " statusLine: " + method.getStatusLine());
// do something aggressive here
throw new HttpException("got back a failure from server");
}
// implicitly "else"
log.info(">>>>>> HTTP Got success back from "+ dest + "; response length "
+ method.getResponseContentLength());
// FIXME: should parse acks here
InputStream rstream = null;
// Get the response body
byte[] resp_buf = method.getResponseBody();
rstream = new ByteArrayInputStream(resp_buf);
BufferedReader br = new BufferedReader(new InputStreamReader(rstream, Charset.forName("UTF-8")));
String line;
List<String> resp = new ArrayList<String>();
while ((line = br.readLine()) != null) {
if (log.isDebugEnabled()) {
log.debug("response: " + line);
}
resp.add(line);
}
return resp;
}
@Override
public void stop() {
}
}
| 8,168 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/sender/RetryListOfCollectors.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.sender;
import java.io.*;
import java.nio.charset.Charset;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
/***
* An iterator returning a list of Collectors to try. This class is
* nondeterministic, since it puts collectors back on the list after some
* period.
*
* No node will be polled more than once per maxRetryRateMs milliseconds.
* hasNext() will continue return true if you have not called it recently.
*
*
*/
public class RetryListOfCollectors implements Iterator<String>, Cloneable {
int maxRetryRateMs;
List<String> collectors;
long lastLookAtFirstNode;
int nextCollector = 0;
private String portNo;
public static final String RETRY_RATE_OPT = "chukwaAgent.connector.retryRate";
public RetryListOfCollectors(File collectorFile, Configuration conf)
throws IOException {
this(conf);
try {
FileInputStream fis = new FileInputStream(collectorFile);
BufferedReader br = new BufferedReader(new InputStreamReader(fis, Charset.forName("UTF-8")));
String line, parsedline;
while ((line = br.readLine()) != null) {
parsedline = canonicalizeLine(line);
collectors.add(parsedline);
}
br.close();
} catch (FileNotFoundException e) {
System.err.println("Error in RetryListOfCollectors() opening file"
+ collectorFile.getCanonicalPath() + ", double check that you have"
+ "set the CHUKWA_CONF_DIR environment variable. Also, ensure file"
+ " exists and is in classpath");
throw e;
} catch (IOException e) {
System.err
.println("I/O error in RetryListOfcollectors instantiation in readLine() from specified collectors file");
throw e;
}
shuffleList();
}
private String canonicalizeLine(String line) {
String parsedline;
if (!line.contains("://")) {
// no protocol, assume http
if (line.matches(".*:\\d+.*")) {
parsedline = "http://" + line+"/";
} else {
parsedline = "http://" + line + ":" + portNo;
}
} else {
if (line.matches(".*:\\d+.*")) {
parsedline = line;
} else {
parsedline = line + ":" + portNo;
}
}
if(!parsedline.matches(".*\\w/.*")) //no resource name
parsedline = parsedline+"/";
return parsedline;
}
/**
* This is only used for debugging. Possibly it should sanitize urls the same way the other
* constructor does.
* @param collectors is list of collector hostname
* @param conf is Chukwa configuration
*/
public RetryListOfCollectors(final List<String> collectors, Configuration conf) {
this(conf);
this.collectors.addAll(collectors);
//we don't shuffle the list here -- this constructor is only used for test purposes
}
public RetryListOfCollectors(Configuration conf) {
collectors = new ArrayList<String>();
portNo = conf.get("chukwaCollector.http.port", "8080");
maxRetryRateMs = conf.getInt(RETRY_RATE_OPT, 15 * 1000);
lastLookAtFirstNode = 0;
}
// for now, use a simple O(n^2) algorithm.
// safe, because we only do this once, and on smallish lists
public void shuffleList() {
ArrayList<String> newList = new ArrayList<String>();
Random r = new java.util.Random();
while (!collectors.isEmpty()) {
int toRemove = r.nextInt(collectors.size());
String next = collectors.remove(toRemove);
newList.add(next);
}
collectors = newList;
}
public boolean hasNext() {
return collectors.size() > 0
&& ((nextCollector != 0) || (System.currentTimeMillis()
- lastLookAtFirstNode > maxRetryRateMs));
}
public String next() {
if (hasNext()) {
int currCollector = nextCollector;
nextCollector = (nextCollector + 1) % collectors.size();
if (currCollector == 0)
lastLookAtFirstNode = System.currentTimeMillis();
return collectors.get(currCollector);
} else
return null;
}
public void add(String collector) {
collectors.add(collector);
}
public void remove() {
throw new UnsupportedOperationException();
// FIXME: maybe just remove a collector from our list and then
// FIXME: make sure next doesn't break (i.e. reset nextCollector if
// necessary)
}
/**
*
* @return total number of collectors in list
*/
int total() {
return collectors.size();
}
public RetryListOfCollectors clone() {
try {
RetryListOfCollectors clone = (RetryListOfCollectors) super.clone();
return clone;
} catch(CloneNotSupportedException e) {
return null;
}
}
}
| 8,169 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/sender | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/sender/metrics/HttpSenderActivityMBean.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.sender.metrics;
import javax.management.ObjectName;
import org.apache.hadoop.metrics.util.MBeanUtil;
import org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase;
import org.apache.hadoop.metrics.util.MetricsRegistry;
public class HttpSenderActivityMBean extends MetricsDynamicMBeanBase {
final private ObjectName mbeanName;
public HttpSenderActivityMBean(final MetricsRegistry mr, final String serviceName) {
super(mr, "Http Sender layer statistics");
mbeanName = MBeanUtil.registerMBean(serviceName,
"HttpSenderActivity", this);
}
public void shutdown() {
if (mbeanName != null)
MBeanUtil.unregisterMBean(mbeanName);
}
}
| 8,170 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/sender | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/sender/metrics/HttpSenderMetrics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.sender.metrics;
import org.apache.hadoop.metrics.MetricsContext;
import org.apache.hadoop.metrics.MetricsRecord;
import org.apache.hadoop.metrics.MetricsUtil;
import org.apache.hadoop.metrics.Updater;
import org.apache.hadoop.metrics.util.MetricsBase;
import org.apache.hadoop.metrics.util.MetricsRegistry;
import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
public class HttpSenderMetrics implements Updater {
public MetricsRegistry registry = new MetricsRegistry();
private MetricsRecord metricsRecord;
private HttpSenderActivityMBean mbean;
public MetricsTimeVaryingInt collectorRollover =
new MetricsTimeVaryingInt("collectorRollover", registry,"number of collector rollovert");
public MetricsTimeVaryingInt httpPost =
new MetricsTimeVaryingInt("httpPost", registry,"number of HTTP post");
public MetricsTimeVaryingInt httpException =
new MetricsTimeVaryingInt("httpException", registry,"number of HTTP Exception");
public MetricsTimeVaryingInt httpThrowable =
new MetricsTimeVaryingInt("httpThrowable", registry,"number of HTTP Throwable exception");
public MetricsTimeVaryingInt httpTimeOutException =
new MetricsTimeVaryingInt("httpTimeOutException", registry,"number of HTTP TimeOutException");
/** Creates a new instance of HttpSenderMetrics
* @param processName is jvm process name
* @param recordName is Hadoop metrics data type
* */
public HttpSenderMetrics(String processName, String recordName) {
MetricsContext context = MetricsUtil.getContext(processName);
metricsRecord = MetricsUtil.createRecord(context, recordName);
metricsRecord.setTag("process", processName);
mbean = new HttpSenderActivityMBean(registry, recordName);
context.registerUpdater(this);
}
/**
* Since this object is a registered updater, this method will be called
* periodically, e.g. every 5 seconds.
*/
public void doUpdates(MetricsContext unused) {
synchronized (this) {
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
}
metricsRecord.update();
}
public void shutdown() {
if (mbean != null)
mbean.shutdown();
}
}
| 8,171 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/collector | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/collector/servlet/CommitCheckServlet.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.collector.servlet;
import java.io.IOException;
import java.io.PrintStream;
import java.net.URI;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.log4j.Logger;
import java.util.*;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.chukwa.datacollection.writer.SeqFileWriter;
import org.apache.hadoop.chukwa.extraction.CHUKWA_CONSTANT;
import org.apache.hadoop.chukwa.extraction.archive.SinkArchiver;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
@Deprecated
public class CommitCheckServlet extends HttpServlet {
private static final long serialVersionUID = -4627538252371890849L;
protected final static Logger log = Logger.getLogger(CommitCheckServlet.class);
transient CommitCheckThread commitCheck;
transient Configuration conf;
//interval at which to scan the filesystem, ms
public static final String SCANPERIOD_OPT = "chukwaCollector.asyncAcks.scanperiod";
//interval at which to discard seen files, ms
public static final String PURGEDELAY_OPT = "chukwaCollector.asyncAcks.purgedelay";
//list of dirs to search, separated by commas
public static final String SCANPATHS_OPT = "chukwaCollector.asyncAcks.scanpaths";
public static final String DEFAULT_PATH = "acks"; //path to this servlet on collector
public CommitCheckServlet(Configuration conf) {
this.conf = conf;
}
public void init(ServletConfig servletConf) throws ServletException {
log.info("initing commit check servlet");
try {
FileSystem fs = FileSystem.get(
new URI(conf.get("writer.hdfs.filesystem", "file:///")), conf);
log.info("commitcheck fs is " + fs.getUri());
commitCheck = new CommitCheckThread(conf, fs);
commitCheck.start();
} catch(Exception e) {
log.error("couldn't start CommitCheckServlet", e);
throw new ServletException(e);
}
}
@Override
protected void doTrace(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED);
}
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
PrintStream out = new PrintStream(resp.getOutputStream(), true, "UTF-8");
resp.setStatus(200);
out.println("<html><body><h2>Commit status</h2><ul>");
for(String s: commitCheck.getLengthList())
out.println("<li>" + s + "</li>");
out.println("</ul></body></html>");
}
@Override
public void destroy() {
commitCheck.shutdown();
}
/**
* Ideally, we'd use zookeeper to monitor archiver/demux rotation.
* For now, instead, we'll just do an ls in a bunch of places.
*/
private static class CommitCheckThread extends Thread implements CHUKWA_CONSTANT {
int checkInterval = 1000 * 30;
volatile boolean running = true;
final Collection<Path> pathsToSearch;
final FileSystem fs;
final Map<String, Long> lengthTable;
final PriorityQueue<PurgeTask> oldEntries;
long delayUntilPurge = 1000 * 60 * 60 * 12;
static class PurgeTask implements Comparable<PurgeTask>{
long purgeTime;
String toPurge;
long len;
public PurgeTask(String s, long time, long len) {
this.toPurge = s;
this.purgeTime = time;
this.len = len;
}
@Override
public boolean equals (Object o) {
if(o == null || !(o instanceof PurgeTask)) {
return false;
}
PurgeTask other = (PurgeTask) o;
return this.hashCode() == other.hashCode();
}
@Override
public int compareTo(PurgeTask p) {
if(purgeTime < p.purgeTime)
return -1;
else if (this.equals(p))
return 0;
else
return 1;
}
@Override
public int hashCode() {
return new HashCodeBuilder(3221, 4271).append(purgeTime).toHashCode();
}
}
public CommitCheckThread(Configuration conf, FileSystem fs) {
this.fs = fs;
pathsToSearch = new ArrayList<Path>();
lengthTable = new LinkedHashMap<String, Long>();
oldEntries = new PriorityQueue<PurgeTask>();
checkInterval = conf.getInt(SCANPERIOD_OPT, checkInterval);
String sinkPath = conf.get(SeqFileWriter.OUTPUT_DIR_OPT, "/chukwa/logs");
pathsToSearch.add(new Path(sinkPath));
String additionalSearchPaths = conf.get(SCANPATHS_OPT, "");
String[] paths = additionalSearchPaths.split(",");
for(String s: paths)
if(s.length() > 1) {
Path path = new Path(s);
if(!pathsToSearch.contains(path))
pathsToSearch.add(path);
}
delayUntilPurge = conf.getLong(PURGEDELAY_OPT, delayUntilPurge);
String chukwaRootDir = conf.get(CHUKWA_ROOT_DIR_FIELD, DEFAULT_CHUKWA_ROOT_DIR_NAME);
String archivesRootProcessingDir = chukwaRootDir + ARCHIVES_PROCESSING_DIR_NAME;
String archivesMRInputDir = archivesRootProcessingDir + ARCHIVES_MR_INPUT_DIR_NAME;
pathsToSearch.add(new Path(archivesMRInputDir));
//TODO: set checkInterval using conf
}
public void shutdown() {
running = false;
this.interrupt();
}
public void run() {
while(running) {
try {
Thread.sleep(checkInterval);
scanFS();
purgeOldEntries();
} catch(InterruptedException e) {}
catch(IOException e) {
log.error("io problem", e);
}
}
}
private synchronized void purgeOldEntries() {
long now = System.currentTimeMillis();
PurgeTask p = oldEntries.peek();
while(p != null && p.purgeTime < now) {
oldEntries.remove();
Long curLen = lengthTable.get(p.toPurge);
if(curLen != null && p.len >= curLen)
lengthTable.remove(p.toPurge);
}
}
private void scanFS() throws IOException {
long nextPurgeTime = System.currentTimeMillis() + delayUntilPurge;
for(Path dir: pathsToSearch) {
int filesSeen = 0;
FileStatus[] dataSinkFiles = fs.listStatus(dir, SinkArchiver.DATA_SINK_FILTER);
if(dataSinkFiles == null || dataSinkFiles.length == 0)
continue;
synchronized(this) {
for(FileStatus fstatus: dataSinkFiles) {
filesSeen++;
String name = fstatus.getPath().getName();
long len = fstatus.getLen();
oldEntries.add(new PurgeTask(name, nextPurgeTime, len));
lengthTable.put(name, len);
}
}
log.info("scanning fs: " + dir + "; saw "+ filesSeen+ " files");
}
}
public synchronized List<String> getLengthList() {
ArrayList<String> list = new ArrayList<String>(lengthTable.size());
for(Map.Entry<String, Long> e: lengthTable.entrySet()) {
list.add(e.getKey() + " " + e.getValue());
}
return list;
}
}
}
| 8,172 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/collector | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/collector/servlet/ServletCollector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.collector.servlet;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.util.LinkedList;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletOutputStream;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.datacollection.writer.ChukwaWriter;
import org.apache.hadoop.chukwa.datacollection.writer.SeqFileWriter;
import org.apache.hadoop.chukwa.datacollection.writer.WriterException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.log4j.Logger;
@Deprecated
public class ServletCollector extends HttpServlet {
static final boolean FANCY_DIAGNOSTICS = false;
public static final String PATH = "chukwa";
/**
* If a chunk is committed; then the ack will start with the following string.
*/
public static final String ACK_PREFIX = "ok: ";
transient ChukwaWriter writer = null;
private static final long serialVersionUID = 6286162898591407111L;
transient Logger log = Logger.getLogger(ServletCollector.class);
boolean COMPRESS;
String CODEC_NAME;
transient CompressionCodec codec;
public void setWriter(ChukwaWriter w) {
writer = w;
}
public ChukwaWriter getWriter() {
return writer;
}
long statTime = 0L;
int numberHTTPConnection = 0;
int numberchunks = 0;
long lifetimechunks = 0;
transient Configuration conf;
public ServletCollector(Configuration c) {
conf = c;
}
public void init(ServletConfig servletConf) throws ServletException {
log.info("initing servletCollector");
if (servletConf == null) {
log.fatal("no servlet config");
return;
}
Timer statTimer = new Timer();
statTimer.schedule(new TimerTask() {
public void run() {
log.info("stats:ServletCollector,numberHTTPConnection:"
+ numberHTTPConnection + ",numberchunks:" + numberchunks);
statTime = System.currentTimeMillis();
numberHTTPConnection = 0;
numberchunks = 0;
}
}, (1000), (60 * 1000));
if (writer != null) {
log.info("writer set up statically, no need for Collector.init() to do it");
return;
}
try {
String writerClassName = conf.get("chukwaCollector.writerClass",
SeqFileWriter.class.getCanonicalName());
Class<?> writerClass = Class.forName(writerClassName);
if (writerClass != null
&& ChukwaWriter.class.isAssignableFrom(writerClass))
writer = (ChukwaWriter) writerClass.newInstance();
} catch (Exception e) {
log.warn("failed to use user-chosen writer class, defaulting to SeqFileWriter", e);
}
COMPRESS = conf.getBoolean("chukwaAgent.output.compress", false);
if( COMPRESS) {
CODEC_NAME = conf.get( "chukwaAgent.output.compression.type", "org.apache.hadoop.io.compress.DefaultCodec");
Class<?> codecClass = null;
try {
codecClass = Class.forName( CODEC_NAME);
codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf);
log.info("codec " + CODEC_NAME + " loaded for network compression");
} catch (ClassNotFoundException e) {
log.warn("failed to create codec " + CODEC_NAME + ". Network compression won't be enabled.", e);
COMPRESS = false;
}
}
// We default to here if the pipeline construction failed or didn't happen.
try {
if (writer == null) {
writer = new SeqFileWriter();
}
writer.init(conf);
} catch (Throwable e) {
log.warn("Exception trying to initialize SeqFileWriter",e);
}
}
@Override
protected void doTrace(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED);
}
protected void accept(HttpServletRequest req, HttpServletResponse resp)
throws ServletException {
numberHTTPConnection++;
final long currentTime = System.currentTimeMillis();
try {
log.debug("new post from " + req.getRemoteHost() + " at " + currentTime);
java.io.InputStream in = req.getInputStream();
ServletOutputStream l_out = resp.getOutputStream();
DataInputStream di = null;
boolean compressNetwork = COMPRESS;
if( compressNetwork){
InputStream cin = codec.createInputStream( in);
di = new DataInputStream(cin);
}
else {
di = new DataInputStream(in);
}
final int numEvents = di.readInt();
// log.info("saw " + numEvents+ " in request");
List<Chunk> events = new LinkedList<Chunk>();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < numEvents; i++) {
ChunkImpl logEvent = ChunkImpl.read(di);
events.add(logEvent);
}
int responseStatus = HttpServletResponse.SC_OK;
// write new data to data sync file
if (writer != null) {
ChukwaWriter.CommitStatus result = writer.add(events);
// this is where we ACK this connection
if(result == ChukwaWriter.COMMIT_OK) {
// only count the chunks if result is commit or commit pending
numberchunks += events.size();
lifetimechunks += events.size();
for(Chunk receivedChunk: events) {
sb.append(ACK_PREFIX);
sb.append(receivedChunk.getData().length);
sb.append(" bytes ending at offset ");
sb.append(receivedChunk.getSeqID() - 1).append("\n");
}
} else if(result instanceof ChukwaWriter.COMMIT_PENDING) {
// only count the chunks if result is commit or commit pending
numberchunks += events.size();
lifetimechunks += events.size();
for(String s: ((ChukwaWriter.COMMIT_PENDING) result).pendingEntries)
sb.append(s);
} else if(result == ChukwaWriter.COMMIT_FAIL) {
sb.append("Commit failed");
responseStatus = HttpServletResponse.SC_SERVICE_UNAVAILABLE;
}
l_out.print(sb.toString());
} else {
l_out.println("can't write: no writer");
}
resp.setStatus(responseStatus);
} catch (Throwable e) {
log.warn("Exception talking to " + req.getRemoteHost() + " at t="
+ currentTime, e);
throw new ServletException(e);
}
}
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
accept(req, resp);
}
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
log.info("new GET from " + req.getRemoteHost() + " at " + System.currentTimeMillis());
PrintStream out = new PrintStream(resp.getOutputStream(), true, "UTF-8");
resp.setStatus(200);
String pingAtt = req.getParameter("ping");
if (pingAtt != null) {
out.println("Date:" + statTime);
out.println("Now:" + System.currentTimeMillis());
out.println("numberHTTPConnection in time window:"
+ numberHTTPConnection);
out.println("numberchunks in time window:" + numberchunks);
out.println("lifetimechunks:" + lifetimechunks);
} else {
out.println("<html><body><h2>Chukwa servlet running</h2>");
out.println("</body></html>");
}
}
@Override
public String getServletInfo() {
return "Chukwa Servlet Collector";
}
@Override
public void destroy() {
try {
writer.close();
} catch (WriterException e) {
log.warn("Exception during close", e);
e.printStackTrace();
}
super.destroy();
}
}
| 8,173 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/collector | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/collector/servlet/ServletDiagnostics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.collector.servlet;
import java.io.PrintStream;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.log4j.Logger;
import java.util.*;
/**
* One per post
*/
@Deprecated
public class ServletDiagnostics {
static Logger log = Logger.getLogger(ServletDiagnostics.class);
static int CHUNKS_TO_KEEP = 50;
static int CHUNKS_TO_DISPLAY = 50;
private static class PostStats { // statistics about a chunk
public PostStats(String src, int count, long receivedTs) {
this.count = count;
this.src = src;
this.receivedTs = receivedTs;
types = new String[count];
names = new String[count];
lengths = new int[count];
seenChunkCount = 0;
dataSize = 0;
}
final int count;
final String src;
final long receivedTs;
final String[] types, names;
final int[] lengths;
int seenChunkCount;
long dataSize;
public void addChunk(ChunkImpl c, int position) {
if (position != seenChunkCount)
log.warn("servlet collector is passing chunk " + position
+ " but diagnostics has seen" + seenChunkCount);
else if (seenChunkCount >= count) {
log.warn("too many chunks in post declared as length " + count);
} else {
types[seenChunkCount] = c.getDataType();
lengths[seenChunkCount] = c.getData().length;
names[seenChunkCount] = c.getStreamName();
dataSize += c.getData().length;
++seenChunkCount;
}
}
}
static {
lastPosts = new LinkedList<PostStats>();
}
static LinkedList<PostStats> lastPosts;
PostStats curPost;
public void sawPost(String source, int chunks, long receivedTs) {
if (curPost != null) {
log.warn("should only have one HTTP post per ServletDiagnostics");
doneWithPost();
}
curPost = new PostStats(source, chunks, receivedTs);
}
public void sawChunk(ChunkImpl c, int pos) {
curPost.addChunk(c, pos);
}
public static void printPage(PrintStream out) {
HashMap<String, Long> bytesFromHost = new HashMap<String, Long>();
long timeWindowOfSample = Long.MAX_VALUE;
long now = System.currentTimeMillis();
out.println("<ul>");
synchronized (lastPosts) {
int toSkip = lastPosts.size() - CHUNKS_TO_DISPLAY;
if (!lastPosts.isEmpty())
timeWindowOfSample = now - lastPosts.peek().receivedTs;
for (PostStats stats : lastPosts) {
Long oldBytes = bytesFromHost.get(stats.src);
long newBytes = stats.dataSize;
if (oldBytes != null)
newBytes += oldBytes;
bytesFromHost.put(stats.src, newBytes);
if (--toSkip < 0) { // done skipping
out.print("<li>");
out.print(stats.dataSize + " bytes from " + stats.src
+ " at timestamp " + stats.receivedTs);
out.println(" which was " + ((now - stats.receivedTs) / 1000)
+ " seconds ago");
out.println("<ol>");
for (int i = 0; i < stats.count; ++i)
out.println("<li> " + stats.lengths[i] + " bytes of type "
+ stats.types[i] + ". Adaptor name =" + stats.names[i]
+ " </li>");
out.println("</ol></li>");
}
}
}
out.println("</ul>");
out.println("<ul>");
for (Map.Entry<String, Long> h : bytesFromHost.entrySet()) {
out.print("<li>rate from " + h.getKey() + " was "
+ (1000 * h.getValue() / timeWindowOfSample));
out.println(" bytes/second in last " + timeWindowOfSample / 1000
+ " seconds.</li>");
}
out.println("</ul>");
out.println("total of " + bytesFromHost.size() + " unique hosts seen");
out.println("<p>current time is " + System.currentTimeMillis() + " </p>");
}
public void doneWithPost() {
synchronized (lastPosts) {
if (lastPosts.size() > CHUNKS_TO_KEEP)
lastPosts.removeFirst();
lastPosts.add(curPost);
}
}
}
| 8,174 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/collector | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/collector/servlet/LogDisplayServlet.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.collector.servlet;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.log4j.Logger;
import java.io.*;
import java.nio.charset.Charset;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.*;
import org.apache.hadoop.chukwa.*;
import org.apache.hadoop.chukwa.datacollection.writer.ExtractorWriter;
import org.apache.hadoop.conf.Configuration;
@Deprecated
public class LogDisplayServlet extends HttpServlet {
/*
static class StreamName {
byte[] md5;
public StreamName(Chunk c) {
}
@Override
public int hashCode() {
int x=0;
for(int i=0; i< md5.length; ++i) {
x ^= (md5[i] << 4 * i);
}
return x;
}
public boolean equals(Object x) {
if(x instanceof StreamName)
return Arrays.equals(md5, ((StreamName)x).md5);
else return false;
}
}*/
public static final String DEFAULT_PATH = "logs";
public static final String ENABLED_OPT = "chukwaCollector.showLogs.enabled";
public static final String BUF_SIZE_OPT = "chukwaCollector.showLogs.buffer";
long BUF_SIZE = 1024* 1024;
transient Configuration conf;
transient Map<String, Deque<Chunk>> chunksBySID;
Queue<String> receivedSIDs = new LinkedList<String>();
long totalStoredSize = 0;
private static final long serialVersionUID = -4602082382919009285L;
protected final static Logger log = Logger.getLogger(LogDisplayServlet.class);
public LogDisplayServlet() {
conf = new Configuration();
chunksBySID = new HashMap<String, Deque<Chunk>>();
ExtractorWriter.setRecipient(this);
}
public LogDisplayServlet(Configuration c) {
conf = c;
chunksBySID = new HashMap<String, Deque<Chunk>>();
ExtractorWriter.setRecipient(this);
}
public LogDisplayServlet(Configuration c, Map<String, Deque<Chunk>> chunksBySID) {
conf = c;
this.chunksBySID = chunksBySID;
ExtractorWriter.setRecipient(this);
}
public void init(ServletConfig servletConf) throws ServletException {
BUF_SIZE = conf.getLong(BUF_SIZE_OPT, BUF_SIZE);
}
@Override
protected void doTrace(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED);
}
private String getSID(Chunk c) {
try {
MessageDigest md;
md = MessageDigest.getInstance("MD5");
md.update(c.getSource().getBytes(Charset.forName("UTF-8")));
md.update(c.getStreamName().getBytes(Charset.forName("UTF-8")));
md.update(c.getTags().getBytes(Charset.forName("UTF-8")));
StringBuilder sb = new StringBuilder();
byte[] bytes = md.digest();
for(int i=0; i < bytes.length; ++i) {
if( (bytes[i] & 0xF0) == 0)
sb.append('0');
sb.append( Integer.toHexString(0xFF & bytes[i]) );
}
return sb.toString();
} catch(NoSuchAlgorithmException n) {
log.fatal(n);
return null;
}
}
private void pruneOldEntries() {
while(totalStoredSize > BUF_SIZE) {
String queueToPrune = receivedSIDs.remove();
Deque<Chunk> stream = chunksBySID.get(queueToPrune);
assert !stream.isEmpty() : " expected a chunk in stream with ID " + queueToPrune;
Chunk c = stream.poll();
if(c != null)
totalStoredSize -= c.getData().length;
if(stream.isEmpty()) { //remove empty deques and their names.
chunksBySID.remove(queueToPrune);
}
}
}
public synchronized void add(List<Chunk> chunks) {
for(Chunk c : chunks) {
String sid = getSID(c);
Deque<Chunk> stream = chunksBySID.get(sid);
if(stream == null) {
stream = new LinkedList<Chunk>();
chunksBySID.put(sid, stream);
}
stream.add(c);
receivedSIDs.add(sid);
totalStoredSize += c.getData().length;
}
pruneOldEntries();
}
@Override
protected synchronized void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
PrintStream out = new PrintStream(new BufferedOutputStream(resp.getOutputStream()), true, "UTF-8");
resp.setStatus(200);
String path = req.getServletPath();
String streamID = req.getParameter("sid");
if (streamID != null) {
try {
Deque<Chunk> chunks = chunksBySID.get(streamID);
if(chunks != null) {
String streamName = getFriendlyName(chunks.peek());
out.println("<html><title>Chukwa:Received Data</title><body><h2>Data from "+ streamName + "</h2>");
out.println("<pre>");
for(Chunk c: chunks) {
out.write(c.getData());
}
out.println("</pre><hr><a href=\""+path+"\">Back to list of streams</a>");
} else
out.println("No data");
} catch(Exception e) {
out.println("<html><body>No data</body></html>");
}
out.println("</body></html>");
} else {
out.println("<html><title>Chukwa:Received Data</title><body><h2>Recently-seen streams</h2><ul>");
for(Map.Entry<String, Deque<Chunk>> sid: chunksBySID.entrySet())
out.println("<li> <a href=\"" + path + "?sid="+sid.getKey() + "\">"+ getFriendlyName(sid.getValue().peek()) + "</a></li>");
out.println("</ul></body></html>");
}
out.flush();
}
private String getFriendlyName(Chunk chunk) {
if(chunk != null)
return chunk.getTags() + "/" + chunk.getSource() + "/" + chunk.getStreamName();
else return "null";
}
}
| 8,175 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/MemLimitQueue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.ChunkQueue;
import org.apache.hadoop.chukwa.datacollection.agent.metrics.ChunkQueueMetrics;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
/**
* An event queue that blocks once a fixed upper limit of data is enqueued.
*
* For now, uses the size of the data field. Should really use
* estimatedSerializedSize()?
*
*/
public class MemLimitQueue implements ChunkQueue {
static Logger log = Logger.getLogger(WaitingQueue.class);
static final ChunkQueueMetrics metrics = new ChunkQueueMetrics("chukwaAgent", "chunkQueue");;
private Queue<Chunk> queue = new LinkedList<Chunk>();
private long dataSize = 0;
private long MAX_MEM_USAGE;
static final String CHUNK_QUEUE_LIMIT = "chukwaAgent.chunk.queue.limit";
static final int QUEUE_SIZE = 10 * 1024 * 1024;
public MemLimitQueue(Configuration conf) {
configure(conf);
}
/**
* @see org.apache.hadoop.chukwa.datacollection.ChunkQueue#add(org.apache.hadoop.chukwa.Chunk)
*/
public void add(Chunk chunk) throws InterruptedException {
assert chunk != null : "can't enqueue null chunks";
synchronized (this) {
while (chunk.getData().length + dataSize > MAX_MEM_USAGE) {
try {
if(dataSize == 0) { //queue is empty, but data is still too big
log.error("JUMBO CHUNK SPOTTED: type= " + chunk.getDataType() +
" and source =" +chunk.getStreamName());
return; //return without sending; otherwise we'd deadlock.
//this error should probably be fatal; there's no way to recover.
}
metrics.fullQueue.set(1);
this.wait();
log.info("MemLimitQueue is full [" + dataSize + "]");
} catch (InterruptedException e) {
}
}
metrics.fullQueue.set(0);
dataSize += chunk.getData().length;
queue.add(chunk);
metrics.addedChunk.inc();
metrics.queueSize.set(queue.size());
metrics.dataSize.set(dataSize);
this.notifyAll();
}
}
/**
* @see org.apache.hadoop.chukwa.datacollection.ChunkQueue#collect(java.util.List,
* int)
*/
public void collect(List<Chunk> events, int maxSize)
throws InterruptedException {
synchronized (this) {
// we can't just say queue.take() here, since we're holding a lock.
while (queue.isEmpty()) {
this.wait();
}
int size = 0;
while (!queue.isEmpty() && (size < maxSize)) {
Chunk e = this.queue.remove();
metrics.removedChunk.inc();
int chunkSize = e.getData().length;
size += chunkSize;
dataSize -= chunkSize;
metrics.dataSize.set(dataSize);
events.add(e);
}
metrics.queueSize.set(queue.size());
this.notifyAll();
}
if (log.isDebugEnabled()) {
log.debug("WaitingQueue.inQueueCount:" + queue.size()
+ "\tWaitingQueue.collectCount:" + events.size());
}
}
public int size() {
return queue.size();
}
private void configure(Configuration conf) {
MAX_MEM_USAGE = QUEUE_SIZE;
if(conf == null){
return;
}
String limit = conf.get(CHUNK_QUEUE_LIMIT);
if(limit != null){
try{
MAX_MEM_USAGE = Integer.parseInt(limit);
} catch(NumberFormatException nfe) {
log.error("Exception reading property " + CHUNK_QUEUE_LIMIT
+ ". Defaulting internal queue size to " + QUEUE_SIZE);
}
}
log.info("Using MemLimitQueue limit of " + MAX_MEM_USAGE);
}
}
| 8,176 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/AdaptorResetThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent;
import java.util.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.chukwa.datacollection.adaptor.Adaptor;
import org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorShutdownPolicy;
import org.apache.hadoop.chukwa.datacollection.sender.AsyncAckSender;
import org.apache.hadoop.chukwa.datacollection.writer.SeqFileWriter;
import org.apache.log4j.Logger;
public class AdaptorResetThread extends Thread {
static Logger log = Logger.getLogger(AdaptorResetThread.class);
public static final String TIMEOUT_OPT = "connector.commitpoll.timeout";
int resetCount = 0;
private static class AdaptorStat {
long lastCommitTime = 0;
long maxByteSent = 0 ;
public AdaptorStat(long lastCommit, long maxByte) {
maxByteSent = maxByte;
lastCommitTime = lastCommit;
}
}
int timeout = 15*60 * 1000; //default to wait fifteen minutes for an ack
//note that this is overridden using the poll and rotate periods.
Map<Adaptor, AdaptorStat> status;
ChukwaAgent agent;
private volatile boolean running = true;
public AdaptorResetThread(Configuration conf, ChukwaAgent a) {
//
timeout = conf.getInt(SeqFileWriter.ROTATE_INTERVAL_OPT, timeout/2)
+ conf.getInt(AsyncAckSender.POLLPERIOD_OPT, timeout/2);
timeout = conf.getInt(TIMEOUT_OPT, timeout); //unless overridden
status = new LinkedHashMap<Adaptor, AdaptorStat>();
this.agent = a;
this.setDaemon(true);
}
/**
* Resets all adaptors with outstanding data more than timeSinceLastCommit old.
* @param timeSinceLastCommit is millisecond since last check point
* @return the number of reset adaptors
*/
public int resetTimedOutAdaptors(int timeSinceLastCommit) {
int resetThisTime = 0;
long timeoutThresh = System.currentTimeMillis() - timeSinceLastCommit;
List<Adaptor> toResetList = new ArrayList<Adaptor>(); //also contains stopped
//adaptors
synchronized(this) {
for(Map.Entry<Adaptor, AdaptorStat> ent: status.entrySet()) {
AdaptorStat stat = ent.getValue();
ChukwaAgent.Offset off = agent.offset(ent.getKey());
if(off == null) {
toResetList.add(ent.getKey());
} else if(stat.maxByteSent > off.offset //some data outstanding
&& stat.lastCommitTime < timeoutThresh) { //but no progress made
toResetList.add(ent.getKey());
log.warn("restarting " + off.id + " at " + off.offset + " due to timeout; "+
"last commit was ");
}
}
}
for(Adaptor a: toResetList) {
status.remove(a); //it'll get added again when adaptor resumes, if it does
ChukwaAgent.Offset off = agent.offset(a);
if(off != null) {
agent.stopAdaptor(off.id, AdaptorShutdownPolicy.RESTARTING);
String a_status = a.getCurrentStatus();
agent.processAddCommand("add " + off.id + "= " + a.getClass().getCanonicalName()
+ " "+ a_status + " " + off.offset);
resetThisTime ++;
//will be implicitly added to table once adaptor starts sending
}
//implicitly do nothing if adaptor was stopped. We already removed
//its entry from the status table.
}
resetCount += resetThisTime;
return resetThisTime;
}
public synchronized void reportPending(List<AsyncAckSender.CommitListEntry> delayedCommits) {
long now = System.currentTimeMillis();
for(AsyncAckSender.CommitListEntry dc: delayedCommits) {
AdaptorStat a = status.get(dc.adaptor);
if(a == null)
status.put(dc.adaptor, new AdaptorStat(now, dc.uuid));
else if(a.maxByteSent < dc.uuid)
a.maxByteSent = dc.uuid;
}
}
public synchronized void reportCommits(Set<Adaptor> commits) {
long now = System.currentTimeMillis();
for(Adaptor a: commits) {
if(status.containsKey(a)) {
status.get(a).lastCommitTime = now;
} else
log.warn("saw commit for adaptor " + a + " before seeing sends");
}
}
public void reportStop(Adaptor a) {
status.remove(a);
}
public void run() {
try {
while(running) {
Thread.sleep(timeout/2);
resetTimedOutAdaptors(timeout);
}
} catch(InterruptedException e) {}
}
public int getResetCount() {
return resetCount;
}
}
| 8,177 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/ChukwaRestServer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
import org.mortbay.jetty.AbstractConnector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.nio.SelectChannelConnector;
import org.mortbay.jetty.security.SslSocketConnector;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.ServletHolder;
import org.mortbay.thread.QueuedThreadPool;
import com.sun.jersey.spi.container.servlet.ServletContainer;
import static org.apache.hadoop.chukwa.datacollection.agent.ChukwaConstants.*;
public class ChukwaRestServer {
private Configuration conf;
private Server jettyServer;
private final Logger log = Logger.getLogger(ChukwaRestServer.class);
private final String AGENT_HTTP_PORT = "chukwaAgent.http.port";
private final String AGENT_REST_CONTROLLER_PACKAGES = "chukwaAgent.http.rest.controller.packages";
private final int HTTP_SERVER_THREADS = 120;
private static ChukwaRestServer instance = null;
public static synchronized void startInstance(Configuration conf) throws Exception{
if(instance == null){
instance = new ChukwaRestServer(conf);
instance.start();
}
}
public static synchronized void stopInstance() throws Exception {
if(instance != null) {
instance.stop();
instance = null;
}
}
private ChukwaRestServer(Configuration conf){
this.conf = conf;
}
private void start() throws Exception{
int portNum = conf.getInt(AGENT_HTTP_PORT, 9090);
String jaxRsAddlPackages = conf.get(AGENT_REST_CONTROLLER_PACKAGES);
StringBuilder jaxRsPackages = new StringBuilder(
"org.apache.hadoop.chukwa.datacollection.agent.rest");
// Allow the ability to add additional servlets to the server
if (jaxRsAddlPackages != null)
jaxRsPackages.append(';').append(jaxRsAddlPackages);
// Set up jetty connector
AbstractConnector jettyConnector;
if("true".equals(conf.get(SSL_ENABLE))){
SslSocketConnector sslConnector = new SslSocketConnector();
sslConnector.setKeystore(conf.get(KEYSTORE_STORE));
sslConnector.setPassword(conf.get(KEYSTORE_PASSWORD));
sslConnector.setKeyPassword(conf.get(KEYSTORE_KEY_PASSWORD));
sslConnector.setKeystoreType(conf.get(KEYSTORE_TYPE, DEFAULT_STORE_TYPE));
String trustStore = conf.get(TRUSTSTORE_STORE);
if(trustStore != null){
sslConnector.setTruststore(trustStore);
sslConnector.setTrustPassword(conf.get(TRUST_PASSWORD));
sslConnector.setTruststoreType(conf.get(TRUSTSTORE_TYPE, DEFAULT_STORE_TYPE));
sslConnector.setNeedClientAuth(false);
}
jettyConnector = sslConnector;
} else {
jettyConnector = new SelectChannelConnector();
}
//jettyConnector.setLowResourcesConnections(HTTP_SERVER_THREADS - 10);
jettyConnector.setLowResourceMaxIdleTime(1500);
jettyConnector.setPort(portNum);
jettyConnector.setReuseAddress(true);
// Set up jetty server, using connector
jettyServer = new Server(portNum);
jettyServer.setConnectors(new org.mortbay.jetty.Connector[] { jettyConnector });
QueuedThreadPool pool = new QueuedThreadPool();
pool.setMaxThreads(HTTP_SERVER_THREADS);
jettyServer.setThreadPool(pool);
// Create the controller servlets
ServletHolder servletHolder = new ServletHolder(ServletContainer.class);
servletHolder.setInitParameter("com.sun.jersey.config.property.resourceConfigClass",
"com.sun.jersey.api.core.PackagesResourceConfig");
servletHolder.setInitParameter("com.sun.jersey.config.property.packages",
jaxRsPackages.toString());
// Create the server context and add the servlet
Context root = new Context(jettyServer, "/rest/v2", Context.SESSIONS);
root.setAttribute("ChukwaAgent", ChukwaAgent.getAgent());
root.addServlet(servletHolder, "/*");
root.setAllowNullPathInfo(false);
// And finally, fire up the server
jettyServer.start();
jettyServer.setStopAtShutdown(true);
log.info("started Chukwa http agent interface on port " + portNum);
}
private void stop() throws Exception{
jettyServer.stop();
log.info("Successfully stopped Chukwa http agent interface");
}
}
| 8,178 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/ChukwaAgent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.nio.charset.Charset;
import java.security.NoSuchAlgorithmException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.datacollection.DataFactory;
import org.apache.hadoop.chukwa.datacollection.adaptor.Adaptor;
import org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorException;
import org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorShutdownPolicy;
import org.apache.hadoop.chukwa.datacollection.adaptor.NotifyOnCommitAdaptor;
import org.apache.hadoop.chukwa.datacollection.OffsetStatsManager;
import org.apache.hadoop.chukwa.datacollection.agent.metrics.AgentMetrics;
import org.apache.hadoop.chukwa.datacollection.connector.Connector;
import org.apache.hadoop.chukwa.datacollection.connector.http.HttpConnector;
import org.apache.hadoop.chukwa.datacollection.test.ConsoleOutConnector;
import org.apache.hadoop.chukwa.util.AdaptorNamingUtils;
import org.apache.hadoop.chukwa.util.ChukwaUtil;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
/**
* The local agent daemon that runs on each machine. This class is designed to
* be embeddable, for use in testing.
* <P>
* The agent will start an HTTP REST interface listening on port. Configs for
* the agent are:
* <ul>
* <li><code>chukwaAgent.http.port</code> Port to listen on (default=9090).</li>
* <li><code>chukwaAgent.http.rest.controller.packages</code> Java packages to
* inspect for JAX-RS annotated classes to be added as servlets to the REST
* server.</li>
* </ul>
*
*/
public class ChukwaAgent implements AdaptorManager {
// boolean WRITE_CHECKPOINTS = true;
static AgentMetrics agentMetrics = new AgentMetrics("ChukwaAgent", "metrics");
private final static Logger log = Logger.getLogger(ChukwaAgent.class);
private OffsetStatsManager<Adaptor> adaptorStatsManager = null;
private Timer statsCollector = null;
private static Configuration conf = null;
private volatile static ChukwaAgent agent = null;
public Connector connector = null;
private boolean stopped = false;
private ChukwaAgent() {
agent = new ChukwaAgent(new ChukwaConfiguration());
}
private ChukwaAgent(Configuration conf) {
agent = this;
ChukwaAgent.conf = conf;
// almost always just reading this; so use a ConcurrentHM.
// since we wrapped the offset, it's not a structural mod.
adaptorPositions = new ConcurrentHashMap<Adaptor, Offset>();
adaptorsByName = new HashMap<String, Adaptor>();
checkpointNumber = 0;
stopped = false;
}
public static ChukwaAgent getAgent() {
if(agent == null || agent.isStopped()) {
agent = new ChukwaAgent();
}
return agent;
}
public static ChukwaAgent getAgent(Configuration conf) {
if(agent == null || agent.isStopped()) {
agent = new ChukwaAgent(conf);
}
return agent;
}
public void start() throws AlreadyRunningException {
boolean checkPointRestore = conf.getBoolean(
"chukwaAgent.checkpoint.enabled", true);
checkPointBaseName = conf.get("chukwaAgent.checkpoint.name",
"chukwa_checkpoint_");
final int checkPointIntervalMs = conf.getInt(
"chukwaAgent.checkpoint.interval", 5000);
final int statsIntervalMs = conf.getInt(
"chukwaAgent.stats.collection.interval", 10000);
int statsDataTTLMs = conf.getInt(
"chukwaAgent.stats.data.ttl", 1200000);
if (conf.get("chukwaAgent.checkpoint.dir") != null)
checkpointDir = new File(conf.get("chukwaAgent.checkpoint.dir", null));
else
checkPointRestore = false;
if (checkpointDir != null && !checkpointDir.exists()) {
boolean result = checkpointDir.mkdirs();
if(!result) {
log.error("Failed to create check point directory.");
}
}
String tags = conf.get("chukwaAgent.tags", "cluster=\"unknown\"");
DataFactory.getInstance().addDefaultTag(conf.get("chukwaAgent.tags", "cluster=\"unknown_cluster\""));
log.info("Config - CHECKPOINT_BASE_NAME: [" + checkPointBaseName + "]");
log.info("Config - checkpointDir: [" + checkpointDir + "]");
log.info("Config - CHECKPOINT_INTERVAL_MS: [" + checkPointIntervalMs
+ "]");
log.info("Config - DO_CHECKPOINT_RESTORE: [" + checkPointRestore + "]");
log.info("Config - STATS_INTERVAL_MS: [" + statsIntervalMs + "]");
log.info("Config - tags: [" + tags + "]");
if (checkPointRestore) {
log.info("checkpoints are enabled, period is " + checkPointIntervalMs);
}
File initialAdaptors = null;
if (conf.get("chukwaAgent.initial_adaptors") != null)
initialAdaptors = new File(conf.get("chukwaAgent.initial_adaptors"));
try {
if (checkPointRestore) {
restoreFromCheckpoint();
}
} catch (IOException e) {
log.warn("failed to restart from checkpoint: ", e);
}
try {
if (initialAdaptors != null && initialAdaptors.exists())
readAdaptorsFile(initialAdaptors);
} catch (IOException e) {
log.warn("couldn't read user-specified file "
+ initialAdaptors.getAbsolutePath());
}
controlSock = new AgentControlSocketListener(this);
try {
controlSock.tryToBind(); // do this synchronously; if it fails, we know
// another agent is running.
controlSock.start(); // this sets us up as a daemon
log.info("control socket started on port " + controlSock.portno);
} catch (IOException e) {
log.info("failed to bind to socket; aborting agent launch", e);
throw new AlreadyRunningException();
}
// start the HTTP server with stats collection
try {
adaptorStatsManager = new OffsetStatsManager<Adaptor>(statsDataTTLMs);
statsCollector = new Timer("ChukwaAgent Stats Collector");
startHttpServer(conf);
statsCollector.scheduleAtFixedRate(new StatsCollectorTask(),
statsIntervalMs, statsIntervalMs);
} catch (Exception e) {
log.error("Couldn't start HTTP server", e);
throw new RuntimeException(e);
}
// shouldn't start check pointing until we're finishing launching
// adaptors on boot
if (checkPointIntervalMs > 0 && checkpointDir != null) {
checkpointer = new Timer();
checkpointer.schedule(new CheckpointTask(), 0, checkPointIntervalMs);
}
}
// doesn't need an equals(), comparator, etc
public static class Offset {
public Offset(long l, String id) {
offset = l;
this.id = id;
}
final String id;
volatile long offset;
public long offset() {
return this.offset;
}
public String adaptorID() {
return id;
}
}
public static class AlreadyRunningException extends Exception {
private static final long serialVersionUID = 1L;
public AlreadyRunningException() {
super("Agent already running; aborting");
}
}
private static Map<Adaptor, Offset> adaptorPositions;
// basically only used by the control socket thread.
//must be locked before access
private static Map<String, Adaptor> adaptorsByName;
private File checkpointDir; // lock this object to indicate checkpoint in
// progress
private String checkPointBaseName; // base filename for checkpoint files
// checkpoints
private Timer checkpointer;
private volatile boolean needNewCheckpoint = false; // set to true if any
// event has happened
// that should cause a new checkpoint to be written
private int checkpointNumber; // id number of next checkpoint.
// should be protected by grabbing lock on checkpointDir
private AgentControlSocketListener controlSock;
public int getControllerPort() {
return controlSock.getPort();
}
public OffsetStatsManager<Adaptor> getAdaptorStatsManager() {
return adaptorStatsManager;
}
/**
* @param args is command line arguements
* @throws AdaptorException if error registering adaptors
*/
public static void main(String[] args) throws AdaptorException {
try {
if (args.length > 0 && args[0].equals("-help")) {
System.out.println("usage: LocalAgent [-noCheckPoint]"
+ "[default collector URL]");
return;
}
Configuration conf = ChukwaUtil.readConfiguration();
agent = ChukwaAgent.getAgent(conf);
if (agent.anotherAgentIsRunning()) {
log.error("another agent is running (or port has been usurped). "
+ "Bailing out now");
throw new AlreadyRunningException();
}
int uriArgNumber = 0;
if (args.length > 0) {
if (args[uriArgNumber].equals("local")) {
agent.connector = new ConsoleOutConnector(agent);
} else {
if (!args[uriArgNumber].contains("://")) {
args[uriArgNumber] = "http://" + args[uriArgNumber];
}
agent.connector = new HttpConnector(agent, args[uriArgNumber]);
}
} else {
String connectorType = conf.get("chukwa.agent.connector",
"org.apache.hadoop.chukwa.datacollection.connector.PipelineConnector");
agent.connector = (Connector) Class.forName(connectorType).newInstance();
}
agent.start();
agent.connector.start();
log.info("local agent started on port " + agent.getControlSock().portno);
System.out.close();
System.err.close();
} catch (AlreadyRunningException e) {
log.error("agent started already on this machine with same portno;"
+ " bailing out");
System.out
.println("agent started already on this machine with same portno;"
+ " bailing out");
return;
} catch (Exception e) {
e.printStackTrace();
}
}
private boolean anotherAgentIsRunning() {
boolean result = false;
if(controlSock!=null) {
result = !controlSock.isBound();
}
return result;
}
/**
* @return the number of running adaptors inside this local agent
*/
@Override
public int adaptorCount() {
synchronized(adaptorsByName) {
return adaptorsByName.size();
}
}
private void startHttpServer(Configuration conf) throws Exception {
ChukwaRestServer.startInstance(conf);
}
private void stopHttpServer() throws Exception {
ChukwaRestServer.stopInstance();
}
/**
* Take snapshots of offset data so we can report flow rate stats.
*/
private class StatsCollectorTask extends TimerTask {
public void run() {
long now = System.currentTimeMillis();
for(String adaptorId : getAdaptorList().keySet()) {
Adaptor adaptor = getAdaptor(adaptorId);
if(adaptor == null) continue;
Offset offset = adaptorPositions.get(adaptor);
if(offset == null) continue;
adaptorStatsManager.addOffsetDataPoint(adaptor, offset.offset, now);
}
}
}
// words should contain (space delimited):
// 0) command ("add")
// 1) Optional adaptor name, followed by =
// 2) AdaptorClassname
// 3) dataType (e.g. "hadoop_log")
// 4) params <optional>
// (e.g. for files, this is filename,
// but can be arbitrarily many space
// delimited agent specific params )
// 5) offset
private Pattern addCmdPattern = Pattern.compile("[aA][dD][dD]\\s+" // command "add",
// any case, plus
// at least one
// space
+ "(?:" //noncapturing group
+ "([^\\s=]+)" //containing a string (captured)
+ "\\s*=\\s*" //then an equals sign, potentially set off with whitespace
+ ")?" //end optional noncapturing group
+ "([^\\s=]+)\\s+" // the adaptor classname, plus at least one space. No '=' in name
+ "(\\S+)\\s+" // datatype, plus at least one space
+ "(?:" // start a non-capturing group, for the parameters
+ "(.*?)\\s+" // capture the actual parameters reluctantly, followed by
// whitespace
+ ")?" // end non-matching group for params; group is optional
+ "(\\d+)\\s*"); // finally, an offset and some trailing whitespace
/**
* Most of the Chukwa wire protocol is implemented in @link{AgentControlSocketListener}
*
* Unlike the rest of the chukwa wire protocol, add commands can appear in
* initial_adaptors and checkpoint files. So it makes sense to handle them here.
*
*/
public String processAddCommand(String cmd) {
try {
return processAddCommandE(cmd);
} catch(AdaptorException e) {
return null;
}
}
public String processAddCommandE(String cmd) throws AdaptorException {
Matcher m = addCmdPattern.matcher(cmd);
if (m.matches()) {
long offset; // check for obvious errors first
try {
offset = Long.parseLong(m.group(5));
} catch (NumberFormatException e) {
log.warn("malformed line " + cmd);
throw new AdaptorException("bad input syntax");
}
String adaptorID = m.group(1);
String adaptorClassName = m.group(2);
String dataType = m.group(3);
String params = m.group(4);
if (params == null)
params = "";
Adaptor adaptor = AdaptorFactory.createAdaptor(adaptorClassName);
if (adaptor == null) {
log.warn("Error creating adaptor of class " + adaptorClassName);
throw new AdaptorException("Can't load class " + adaptorClassName);
}
String coreParams = adaptor.parseArgs(dataType,params,this);
if(coreParams == null) {
log.warn("invalid params for adaptor: " + params);
throw new AdaptorException("invalid params for adaptor: " + params);
}
if(adaptorID == null) { //user didn't specify, so synthesize
try {
adaptorID = AdaptorNamingUtils.synthesizeAdaptorID(adaptorClassName, dataType, coreParams);
} catch(NoSuchAlgorithmException e) {
log.fatal("MD5 apparently doesn't work on your machine; bailing", e);
shutdown(true);
}
} else if(!adaptorID.startsWith("adaptor_"))
adaptorID = "adaptor_"+adaptorID;
synchronized (adaptorsByName) {
if(adaptorsByName.containsKey(adaptorID))
return adaptorID;
adaptorsByName.put(adaptorID, adaptor);
adaptorPositions.put(adaptor, new Offset(offset, adaptorID));
needNewCheckpoint = true;
try {
adaptor.start(adaptorID, dataType, offset, DataFactory
.getInstance().getEventQueue());
log.info("started a new adaptor, id = " + adaptorID + " function=["+adaptor.toString()+"]");
ChukwaAgent.agentMetrics.adaptorCount.set(adaptorsByName.size());
ChukwaAgent.agentMetrics.addedAdaptor.inc();
return adaptorID;
} catch (Exception e) {
Adaptor failed = adaptorsByName.remove(adaptorID);
adaptorPositions.remove(failed);
adaptorStatsManager.remove(failed);
log.warn("failed to start adaptor", e);
if(e instanceof AdaptorException)
throw (AdaptorException)e;
}
}
} else if (cmd.length() > 0)
log.warn("only 'add' command supported in config files; cmd was: " + cmd);
// no warning for blank line
return null;
}
/**
* Tries to restore from a checkpoint file in checkpointDir. There should
* usually only be one checkpoint present -- two checkpoints present implies a
* crash during writing the higher-numbered one. As a result, this method
* chooses the lowest-numbered file present.
*
* Lines in the checkpoint file are processed one at a time with
* processCommand();
*
* @return true if the restore succeeded
* @throws IOException
*/
private boolean restoreFromCheckpoint() throws IOException {
synchronized (checkpointDir) {
String[] checkpointNames = checkpointDir.list(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.startsWith(checkPointBaseName);
}
});
if (checkpointNames == null) {
log.error("Unable to list files in checkpoint dir");
return false;
} else if (checkpointNames.length == 0) {
log.info("No checkpoints found in " + checkpointDir);
return false;
} else if (checkpointNames.length > 2) {
log.warn("expected at most two checkpoint files in " + checkpointDir
+ "; saw " + checkpointNames.length);
}
String lowestName = null;
int lowestIndex = Integer.MAX_VALUE;
for (String n : checkpointNames) {
int index = Integer
.parseInt(n.substring(checkPointBaseName.length()));
if (index < lowestIndex) {
lowestName = n;
lowestIndex = index;
}
}
checkpointNumber = lowestIndex + 1;
File checkpoint = new File(checkpointDir, lowestName);
readAdaptorsFile(checkpoint);
}
return true;
}
private void readAdaptorsFile(File checkpoint) throws FileNotFoundException,
IOException {
log.info("starting adaptors listed in " + checkpoint.getAbsolutePath());
BufferedReader br = new BufferedReader(new InputStreamReader(
new FileInputStream(checkpoint), Charset.forName("UTF-8")));
String cmd = null;
while ((cmd = br.readLine()) != null)
processAddCommand(cmd);
br.close();
}
/**
* Called periodically to write checkpoints
*
* @throws IOException
*/
private void writeCheckpoint() throws IOException {
needNewCheckpoint = false;
synchronized (checkpointDir) {
log.info("writing checkpoint " + checkpointNumber);
FileOutputStream fos = new FileOutputStream(new File(checkpointDir,
checkPointBaseName + checkpointNumber));
PrintWriter out = new PrintWriter(new BufferedWriter(
new OutputStreamWriter(fos, Charset.forName("UTF-8"))));
for (Map.Entry<String, String> stat : getAdaptorList().entrySet()) {
out.println("ADD "+ stat.getKey()+ " = " + stat.getValue());
}
out.close();
File lastCheckpoint = new File(checkpointDir, checkPointBaseName
+ (checkpointNumber - 1));
log.debug("hopefully removing old checkpoint file "
+ lastCheckpoint.getAbsolutePath());
boolean result = lastCheckpoint.delete();
if(!result) {
log.warn("Unable to delete lastCheckpoint file: "+lastCheckpoint.getAbsolutePath());
}
checkpointNumber++;
}
}
public String reportCommit(Adaptor src, long uuid) {
needNewCheckpoint = true;
Offset o = adaptorPositions.get(src);
if (o != null) {
synchronized (o) { // order writes to offset, in case commits are
// processed out of order
if (uuid > o.offset)
o.offset = uuid;
}
log.debug("got commit up to " + uuid + " on " + src + " = " + o.id);
if(src instanceof NotifyOnCommitAdaptor) {
((NotifyOnCommitAdaptor) src).committed(uuid);
}
return o.id;
} else {
log.warn("got commit up to " + uuid + " for adaptor " + src
+ " that doesn't appear to be running: " + adaptorCount()
+ " total");
return null;
}
}
private class CheckpointTask extends TimerTask {
public void run() {
try {
if (needNewCheckpoint) {
writeCheckpoint();
}
} catch (IOException e) {
log.warn("failed to write checkpoint", e);
}
}
}
private String formatAdaptorStatus(Adaptor a) {
return a.getClass().getCanonicalName() + " " + a.getCurrentStatus() +
" " + adaptorPositions.get(a).offset;
}
/**
* Expose the adaptor list. Keys are adaptor ID numbers, values are the
* adaptor status strings.
* @return adaptor list
*/
public Map<String, String> getAdaptorList() {
Map<String, String> adaptors = new HashMap<String, String>(adaptorsByName.size());
synchronized (adaptorsByName) {
for (Map.Entry<String, Adaptor> a : adaptorsByName.entrySet()) {
adaptors.put(a.getKey(), formatAdaptorStatus(a.getValue()));
}
}
return adaptors;
}
public long stopAdaptor(String name, boolean gracefully) {
if (gracefully)
return stopAdaptor(name, AdaptorShutdownPolicy.GRACEFULLY);
else
return stopAdaptor(name, AdaptorShutdownPolicy.HARD_STOP);
}
/**
* Stop the adaptor with given ID number. Takes a parameter to indicate
* whether the adaptor should force out all remaining data, or just exit
* abruptly.
*
* If the adaptor is written correctly, its offset won't change after
* returning from shutdown.
*
* @param name the adaptor to stop
* @param shutdownMode if true, shutdown, if false, hardStop
* @return the number of bytes synched at stop. -1 on error
*/
public long stopAdaptor(String name, AdaptorShutdownPolicy shutdownMode) {
Adaptor toStop;
long offset = -1;
// at most one thread can get past this critical section with toStop != null
// so if multiple callers try to stop the same adaptor, all but one will
// fail
synchronized (adaptorsByName) {
toStop = adaptorsByName.remove(name);
}
if (toStop == null) {
log.warn("trying to stop " + name + " that isn't running");
return offset;
} else {
adaptorPositions.remove(toStop);
adaptorStatsManager.remove(toStop);
}
ChukwaAgent.agentMetrics.adaptorCount.set(adaptorsByName.size());
ChukwaAgent.agentMetrics.removedAdaptor.inc();
try {
offset = toStop.shutdown(shutdownMode);
log.info("shutdown ["+ shutdownMode + "] on " + name + ", "
+ toStop.getCurrentStatus());
} catch (AdaptorException e) {
log.error("adaptor failed to stop cleanly", e);
} finally {
needNewCheckpoint = true;
}
return offset;
}
@Override
public Configuration getConfiguration() {
return conf;
}
public static Configuration getStaticConfiguration() {
return conf;
}
@Override
public Adaptor getAdaptor(String name) {
synchronized(adaptorsByName) {
return adaptorsByName.get(name);
}
}
public Offset offset(Adaptor a) {
Offset o = adaptorPositions.get(a);
return o;
}
public Connector getConnector() {
return connector;
}
public void shutdown() {
shutdown(false);
}
/**
* Triggers agent shutdown. For now, this method doesn't shut down adaptors
* explicitly. It probably should.
* @param force sets flag to exit forcefully
*/
public void shutdown(boolean force) {
controlSock.shutdown(); // make sure we don't get new requests
if (statsCollector != null) {
statsCollector.cancel();
}
try {
stopHttpServer();
} catch (Exception e) {
log.error("Couldn't stop jetty server.", e);
}
// adaptors
synchronized (adaptorsByName) {
// shut down each adaptor
for (Adaptor a : adaptorsByName.values()) {
try {
a.shutdown(AdaptorShutdownPolicy.HARD_STOP);
} catch (AdaptorException e) {
log.warn("failed to cleanly stop " + a, e);
}
}
}
if (checkpointer != null) {
checkpointer.cancel();
try {
if (needNewCheckpoint)
writeCheckpoint(); // write a last checkpoint here, before stopping
} catch (IOException e) {
log.debug(ExceptionUtil.getStackTrace(e));
}
}
adaptorsByName.clear();
adaptorPositions.clear();
adaptorStatsManager.clear();
agent.stop();
if (force)
return;
}
/**
* Set agent into stop state.
*/
private void stop() {
stopped = true;
}
/**
* Check if agent is in stop state.
* @return true if agent is in stop state.
*/
private boolean isStopped() {
return stopped;
}
/**
* Returns the control socket for this agent.
*/
private AgentControlSocketListener getControlSock() {
return controlSock;
}
public String getAdaptorName(Adaptor initiator) {
Offset o = adaptorPositions.get(initiator);
if(o != null)
return o.id;
else return null;
}
}
| 8,179 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/ChukwaConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent;
public class ChukwaConstants {
public static final String SSL_ENABLE = "chukwa.ssl.enable";
public static final String KEYSTORE_STORE = "chukwa.ssl.keystore.store";
public static final String KEYSTORE_PASSWORD = "chukwa.ssl.keystore.password";
public static final String KEYSTORE_KEY_PASSWORD = "chukwa.ssl.keystore.key.password";
public static final String KEYSTORE_TYPE = "chukwa.ssl.keystore.type";
public static final String TRUSTSTORE_STORE = "chukwa.ssl.truststore.store";
public static final String TRUST_PASSWORD = "chukwa.ssl.trust.password";
public static final String TRUSTSTORE_TYPE = "chukwa.ssl.truststore.type";
public static final String SSL_PROTOCOL = "chukwa.ssl.protocol";
public static final String DEFAULT_SSL_PROTOCOL = "TLS";
public static final String DEFAULT_STORE_TYPE = "JKS";
}
| 8,180 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/NonBlockingMemLimitQueue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.ChunkQueue;
import org.apache.hadoop.chukwa.datacollection.agent.metrics.ChunkQueueMetrics;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
/**
* An event queue that discards incoming chunks once a fixed upper limit of data
* is enqueued. The method calling add will not block.
*
* For now, uses the size of the data field. Should really use
* estimatedSerializedSize()?
*
*/
public class NonBlockingMemLimitQueue implements ChunkQueue {
static Logger log = Logger.getLogger(NonBlockingMemLimitQueue.class);
static final ChunkQueueMetrics metrics = new ChunkQueueMetrics("chukwaAgent",
"chunkQueue");
static final String CHUNK_QUEUE_LIMIT = "chukwaAgent.chunk.queue.limit";
static final int QUEUE_SIZE = 10 * 1024 * 1024;
private Queue<Chunk> queue = new LinkedList<Chunk>();
private long dataSize = 0;
private long MAX_MEM_USAGE;
public NonBlockingMemLimitQueue(Configuration conf) {
configure(conf);
}
/**
* @see org.apache.hadoop.chukwa.datacollection.ChunkQueue#add(org.apache.hadoop.chukwa.Chunk)
*/
public void add(Chunk chunk) throws InterruptedException {
assert chunk != null : "can't enqueue null chunks";
int chunkSize = chunk.getData().length;
synchronized (this) {
if (chunkSize + dataSize > MAX_MEM_USAGE) {
if (dataSize == 0) { // queue is empty, but data is still too big
log.error("JUMBO CHUNK SPOTTED: type= " + chunk.getDataType()
+ " and source =" + chunk.getStreamName());
return; // return without sending; otherwise we'd deadlock.
// this error should probably be fatal; there's no way to
// recover.
} else {
metrics.fullQueue.set(1);
log.warn("Discarding chunk due to NonBlockingMemLimitQueue full [" + dataSize
+ "]");
return;
}
}
metrics.fullQueue.set(0);
dataSize += chunk.getData().length;
queue.add(chunk);
metrics.addedChunk.inc();
metrics.queueSize.set(queue.size());
metrics.dataSize.set(dataSize);
this.notifyAll();
}
}
/**
* @see org.apache.hadoop.chukwa.datacollection.ChunkQueue#collect(java.util.List,
* int)
*/
public void collect(List<Chunk> events, int maxSize)
throws InterruptedException {
synchronized (this) {
// we can't just say queue.take() here, since we're holding a lock.
while (queue.isEmpty()) {
this.wait();
}
int size = 0;
while (!queue.isEmpty() && (size < maxSize)) {
Chunk e = this.queue.remove();
metrics.removedChunk.inc();
int chunkSize = e.getData().length;
size += chunkSize;
dataSize -= chunkSize;
metrics.dataSize.set(dataSize);
events.add(e);
}
metrics.queueSize.set(queue.size());
this.notifyAll();
}
if (log.isDebugEnabled()) {
log.debug("WaitingQueue.inQueueCount:" + queue.size()
+ "\tWaitingQueue.collectCount:" + events.size());
}
}
public int size() {
return queue.size();
}
private void configure(Configuration conf) {
MAX_MEM_USAGE = QUEUE_SIZE;
if(conf == null){
return;
}
String limit = conf.get(CHUNK_QUEUE_LIMIT);
if(limit != null){
try{
MAX_MEM_USAGE = Integer.parseInt(limit);
} catch(NumberFormatException nfe) {
log.error("Exception reading property " + CHUNK_QUEUE_LIMIT
+ ". Defaulting internal queue size to " + QUEUE_SIZE);
}
}
log.info("Using NonBlockingMemLimitQueue limit of " + MAX_MEM_USAGE);
}
}
| 8,181 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/AdaptorManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent;
import java.util.Collections;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.chukwa.datacollection.adaptor.Adaptor;
import org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorShutdownPolicy;
/**
* The interface to the agent that is exposed to adaptors.
*
*/
public interface AdaptorManager {
Configuration getConfiguration();
int adaptorCount();
@Deprecated
long stopAdaptor(String id, boolean gracefully);
long stopAdaptor(String id, AdaptorShutdownPolicy mode);
Adaptor getAdaptor(String id);
String processAddCommand(String cmd);
Map<String, String> getAdaptorList();
/**
* Called to update the Agent status table.
*
* Most adaptors should not call this. It is designed for adaptors that do
* some sort of local operation that needs checkpointing, but that doesn't
* emit chunks. For instance, DirTailingAdaptor uses it to track sweeps.
*
* @param src the adaptor in question
* @param uuid the number to record as checkpoint. Must be monotonically increasing.
* @return the adaptor ID of the associated adaptor, or null if not running.
*/
public String reportCommit(Adaptor src, long uuid);
static AdaptorManager NULL = new AdaptorManager() {
@Override
public int adaptorCount() {
return 0;
}
@Override
public Adaptor getAdaptor(String id) {
return null;
}
@Override
public Map<String, String> getAdaptorList() {
return Collections.emptyMap();
}
@Override
public Configuration getConfiguration() {
return new Configuration();
}
@Override
public String processAddCommand(String cmd) {
return "";
}
public long stopAdaptor(String id, boolean gracefully) {
return 0;
}
@Override
public long stopAdaptor(String id, AdaptorShutdownPolicy mode) {
return 0;
}
@Override
public String reportCommit(Adaptor a, long l) {
return null;
}
};
}
| 8,182 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/AdaptorFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent;
import org.apache.hadoop.chukwa.datacollection.adaptor.Adaptor;
import org.apache.log4j.Logger;
/**
* Produces new unconfigured adaptors, given the class name of the appender type.
* Will try the name both in the default package, and then with
* 'org.apache.hadoop.chukwa.datacollection.adaptor' prepended.
*
*/
public class AdaptorFactory {
public static final String PREPENDED_PACKAGE = "org.apache.hadoop.chukwa.datacollection.adaptor.";
static Logger log = Logger.getLogger(ChukwaAgent.class);
/**
* Instantiate an adaptor that can be added by the {@link ChukwaAgent}
*
* @param className the name of the {@link Adaptor} class to instantiate
* @return an Adaptor of the specified type
*/
static public Adaptor createAdaptor(String className) {
Object obj = null;
try {
// the following reflection business for type checking is probably
// unnecessary
// since it will just throw a ClassCastException on error anyway.
obj = Class.forName(className).newInstance();
if (Adaptor.class.isInstance(obj)) {
return (Adaptor) obj;
} else
return null;
} catch (Exception e1) {
log.debug("Error instantiating new adaptor by class name, "
+ "attempting again, but with default chukwa package prepended, i.e. "
+ PREPENDED_PACKAGE + className
+ ". " + e1);
try {
// if failed, try adding default class prefix
Object obj2 = Class.forName(
PREPENDED_PACKAGE + className)
.newInstance();
if (Adaptor.class.isInstance(obj2)) {
log.debug("Succeeded in finding class by adding default adaptor "
+ "namespace prefix to class name profided");
return (Adaptor) obj2;
} else
return null;
} catch (Exception e2) {
log.warn("Error instantiating new adaptor "+ className + " by classname"
+ " and also with \"o.a.h.c.datacollection.adaptor\" prefix added", e2);
return null;
}
}
}
}
| 8,183 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/AgentControlSocketListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.net.*;
import java.nio.charset.Charset;
import java.util.Map;
import org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorException;
import org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorShutdownPolicy;
import org.apache.log4j.Logger;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
/**
* Class to handle the agent control protocol. This is a simple line-oriented
* ASCII protocol, that is designed to be easy to work with both
* programmatically and via telnet.
*
* The port to bind to can be specified by setting option
* chukwaAgent.agent.control.port. A port of 0 creates a socket on any free
* port.
*/
public class AgentControlSocketListener extends Thread {
static Logger log = Logger.getLogger(AgentControlSocketListener.class);
protected ChukwaAgent agent;
protected int portno;
protected ServerSocket s = null;
volatile boolean closing = false;
static final String VERSION = "0.4.0-dev";
public boolean ALLOW_REMOTE = true;
public static final String REMOTE_ACCESS_OPT = "chukwaAgent.control.remote";
private class ListenThread extends Thread {
Socket connection;
ListenThread(Socket conn) {
connection = conn;
try {
connection.setSoTimeout(60000);
} catch (SocketException e) {
log.warn("Error while settin soTimeout to 60000");
e.printStackTrace();
}
this.setName("listen thread for " + connection.getRemoteSocketAddress());
}
public void run() {
try {
InputStream in = connection.getInputStream();
BufferedReader br = new BufferedReader(new InputStreamReader(in, Charset.forName("UTF-8")));
PrintStream out = new PrintStream(new BufferedOutputStream(connection
.getOutputStream()), true, "UTF-8");
String cmd = null;
while ((cmd = br.readLine()) != null) {
processCommand(cmd, out);
}
connection.close();
if (log.isDebugEnabled()) {
log.debug("control connection closed");
}
} catch (SocketException e) {
if (e.getMessage().equals("Socket Closed"))
log.info("control socket closed");
} catch (IOException e) {
log.warn("a control connection broke", e);
try {
connection.close();
} catch(Exception ex) {
log.debug(ExceptionUtil.getStackTrace(ex));
}
}
}
/**
* process a protocol command
*
* @param cmd the command given by the user
* @param out a PrintStream writing to the socket
* @throws IOException
*/
public void processCommand(String cmd, PrintStream out) throws IOException {
String[] words = cmd.split("\\s+");
if (log.isDebugEnabled()) {
log.debug("command from " + connection.getRemoteSocketAddress() + ":"
+ cmd);
}
if (words[0].equalsIgnoreCase("help")) {
out.println("you're talking to the Chukwa agent. Commands available: ");
out.println("add [adaptorname] [args] [offset] -- start an adaptor");
out.println("shutdown [adaptornumber] -- graceful stop");
out.println("stop [adaptornumber] -- abrupt stop");
out.println("list -- list running adaptors");
out.println("close -- close this connection");
out.println("stopagent -- stop the whole agent process");
out.println("stopall -- stop all adaptors");
out.println("reloadCollectors -- reload the list of collectors");
out.println("help -- print this message");
out.println("\t Command names are case-blind.");
} else if (words[0].equalsIgnoreCase("close")) {
connection.close();
} else if (words[0].equalsIgnoreCase("add")) {
try {
String newID = agent.processAddCommandE(cmd);
if (newID != null)
out.println("OK add completed; new ID is " + newID);
else
out.println("failed to start adaptor...check logs for details");
} catch(AdaptorException e) {
out.println(e);
}
} else if (words[0].equalsIgnoreCase("shutdown")) {
if (words.length < 2) {
out.println("need to specify an adaptor to shut down, by number");
} else {
sanitizeAdaptorName(out, words);
long offset = agent.stopAdaptor(words[1], AdaptorShutdownPolicy.GRACEFULLY);
if (offset != -1)
out.println("OK adaptor " + words[1] + " stopping gracefully at "
+ offset);
else
out.println("FAIL: perhaps adaptor " + words[1] + " does not exist");
}
} else if (words[0].equalsIgnoreCase("stop")) {
if (words.length < 2) {
out.println("need to specify an adaptor to shut down, by number");
} else {
sanitizeAdaptorName(out, words);
agent.stopAdaptor(words[1], AdaptorShutdownPolicy.HARD_STOP);
out.println("OK adaptor " + words[1] + " stopped");
}
} else if (words[0].equalsIgnoreCase("reloadCollectors")) {
agent.getConnector().reloadConfiguration();
out.println("OK reloadCollectors done");
} else if (words[0].equalsIgnoreCase("list")) {
java.util.Map<String, String> adaptorList = agent.getAdaptorList();
if (log.isDebugEnabled()) {
log.debug("number of adaptors: " + adaptorList.size());
}
for (Map.Entry<String, String> a: adaptorList.entrySet()) {
out.print(a.getKey());
out.print(") ");
out.print(" ");
out.println(a.getValue());
}
out.println("");
} else if (words[0].equalsIgnoreCase("stopagent")) {
out.println("stopping agent process.");
connection.close();
agent.shutdown(true);
} else if(words[0].equalsIgnoreCase("stopall")) {
int stopped = 0;
for(String id: agent.getAdaptorList().keySet()) {
agent.stopAdaptor(id, false);
stopped++;
}
out.println("stopped " + stopped + " adaptors");
} else if (words[0].equals("")) {
out.println(getStatusLine());
} else {
log.warn("unknown command " + words[0]);
out.println("unknown command " + words[0]);
out.println("say 'help' for a list of legal commands");
}
out.flush();
}
private void sanitizeAdaptorName(PrintStream out, String[] words) {
if(!words[1].startsWith("adaptor_")) {
words[1] = "adaptor_" + words[1];
out.println("adaptor names should start with adaptor_; "
+"assuming you meant"+ words[1] );
}
}
}
/**
* Initializes listener, but does not bind to socket.
* @param agent the agent to control
*/
public AgentControlSocketListener(ChukwaAgent agent) {
this.setDaemon(false); // to keep the local agent alive
this.agent = agent;
this.portno = agent.getConfiguration().getInt("chukwaAgent.control.port",
9093);
this.ALLOW_REMOTE = agent.getConfiguration().getBoolean(REMOTE_ACCESS_OPT, ALLOW_REMOTE);
log.info("AgentControlSocketListerner ask for port: " + portno);
this.setName("control socket listener");
}
/**
* Binds to socket, starts looping listening for commands
*/
public void run() {
try {
if (!isBound())
tryToBind();
} catch (IOException e) {
return;
}
while (!closing) {
try {
Socket connection = s.accept();
if (log.isDebugEnabled()) {
log.debug("new connection from " + connection.getInetAddress());
}
ListenThread l = new ListenThread(connection);
l.setDaemon(true);
l.start();
} catch (IOException e) {
if (!closing)
log.warn("control socket error: ", e);
else {
log.warn("shutting down listen thread due to shutdown() call");
break;
}
}
}// end while
}
/**
* Close the control socket, and exit. Triggers graceful thread shutdown.
*/
public void shutdown() {
closing = true;
try {
if (s != null)
s.close();
s = null;
} catch (IOException e) {
log.debug(ExceptionUtil.getStackTrace(e));
} // ignore exception on close
}
public boolean isBound() {
return s != null && s.isBound();
}
public void tryToBind() throws IOException {
if(ALLOW_REMOTE)
s = new ServerSocket(portno);
else { //FIXME: is there a way to allow all local addresses? (including IPv6 local)
s = new ServerSocket();
s.bind(new InetSocketAddress(InetAddress.getByAddress(new byte[] {127,0,0,1}), portno));
}
s.setReuseAddress(true);
portno = s.getLocalPort();
if (s.isBound())
log.info("socket bound to " + s.getLocalPort());
else
log.info("socket isn't bound");
}
public int getPort() {
if (!s.isBound()) {
return -1;
} else {
return portno;
}
}
//FIXME: we also do this in ChunkImpl; should really do it only once
//and make it visible everywhere?
private static String localHostAddr;
static {
try {
localHostAddr = InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
localHostAddr = "localhost";
}
}
public String getStatusLine() {
int adaptorCount = agent.adaptorCount();
return localHostAddr + ": Chukwa Agent running, version " + VERSION + ", with " + adaptorCount + " adaptors";
}
}
| 8,184 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/WaitingQueue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.ChunkQueue;
import org.apache.log4j.Logger;
public class WaitingQueue implements ChunkQueue {
static Logger log = Logger.getLogger(WaitingQueue.class);
private BlockingQueue<Chunk> queue = new LinkedBlockingQueue<Chunk>(5);
public void add(Chunk event) {
try {
this.queue.put(event);
} catch (InterruptedException e) {
}// return upwards
}
public void add(List<Chunk> events) {
this.queue.addAll(events);
}
public void collect(List<Chunk> events, int maxCount) {
// Workaround to block on the queue
try {
events.add(this.queue.take());
} catch (InterruptedException e) {
}
this.queue.drainTo(events, maxCount - 1);
System.out.println("collect [" + Thread.currentThread().getName() + "] ["
+ events.size() + "]");
if (log.isDebugEnabled()) {
log.debug("WaitingQueue.inQueueCount:" + queue.size()
+ "\tWaitingQueue.collectCount:" + events.size());
}
}
public int size() {
return queue.size();
}
}
| 8,185 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/metrics/ChunkQueueActivityMBean.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent.metrics;
import javax.management.ObjectName;
import org.apache.hadoop.metrics.util.MBeanUtil;
import org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase;
import org.apache.hadoop.metrics.util.MetricsRegistry;
public class ChunkQueueActivityMBean extends MetricsDynamicMBeanBase {
final private ObjectName mbeanName;
public ChunkQueueActivityMBean(final MetricsRegistry mr, final String serviceName) {
super(mr, "ChunkQueue layer statistics");
mbeanName = MBeanUtil.registerMBean(serviceName,
"QueueActivity", this);
}
public void shutdown() {
if (mbeanName != null)
MBeanUtil.unregisterMBean(mbeanName);
}
}
| 8,186 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/metrics/ChunkQueueMetrics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent.metrics;
import org.apache.hadoop.metrics.MetricsContext;
import org.apache.hadoop.metrics.MetricsRecord;
import org.apache.hadoop.metrics.MetricsUtil;
import org.apache.hadoop.metrics.Updater;
import org.apache.hadoop.metrics.util.MetricsBase;
import org.apache.hadoop.metrics.util.MetricsIntValue;
import org.apache.hadoop.metrics.util.MetricsLongValue;
import org.apache.hadoop.metrics.util.MetricsRegistry;
import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
public class ChunkQueueMetrics implements Updater {
public MetricsRegistry registry = new MetricsRegistry();
private MetricsRecord metricsRecord;
private ChunkQueueActivityMBean mbean;
public MetricsIntValue queueSize =
new MetricsIntValue("queueSize", registry,"Queue size");
public MetricsLongValue dataSize =
new MetricsLongValue("dataSize", registry,"Data size");
public MetricsTimeVaryingInt addedChunk =
new MetricsTimeVaryingInt("addedChunk", registry,"number of added chunk");
public MetricsTimeVaryingInt removedChunk =
new MetricsTimeVaryingInt("removedChunk", registry,"number of removed chunk");
public MetricsIntValue fullQueue =
new MetricsIntValue("fullQueue", registry,"Queue is full");
/** Creates a new instance of QueueMetrics
* @param processName is jvm process of Agent process
* @param recordName is mbean record name
* */
public ChunkQueueMetrics(String processName, String recordName) {
MetricsContext context = MetricsUtil.getContext(processName);
metricsRecord = MetricsUtil.createRecord(context, recordName);
mbean = new ChunkQueueActivityMBean(registry, recordName);
context.registerUpdater(this);
}
/**
* Since this object is a registered updater, this method will be called
* periodically, e.g. every 5 seconds.
*/
public void doUpdates(MetricsContext unused) {
synchronized (this) {
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
}
metricsRecord.update();
}
public void shutdown() {
if (mbean != null)
mbean.shutdown();
}
}
| 8,187 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/metrics/AgentMetrics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent.metrics;
import org.apache.hadoop.metrics.MetricsContext;
import org.apache.hadoop.metrics.MetricsRecord;
import org.apache.hadoop.metrics.MetricsUtil;
import org.apache.hadoop.metrics.Updater;
import org.apache.hadoop.metrics.util.MetricsBase;
import org.apache.hadoop.metrics.util.MetricsIntValue;
import org.apache.hadoop.metrics.util.MetricsRegistry;
import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
public class AgentMetrics implements Updater {
public static final AgentMetrics agentMetrics = new AgentMetrics("chukwaAgent", "metrics");
public MetricsRegistry registry = new MetricsRegistry();
private MetricsRecord metricsRecord;
private AgentActivityMBean agentActivityMBean;
public MetricsIntValue adaptorCount =
new MetricsIntValue("adaptorCount", registry,"number of new adaptor");
public MetricsTimeVaryingInt addedAdaptor =
new MetricsTimeVaryingInt("addedAdaptor", registry,"number of added adaptor");
public MetricsTimeVaryingInt removedAdaptor =
new MetricsTimeVaryingInt("removedAdaptor", registry,"number of removed adaptor");
/** Creates a new instance of AgentMetrics
* @param processName is jvm name of agent process
* @param recordName is mbean record name
**/
public AgentMetrics(String processName, String recordName) {
MetricsContext context = MetricsUtil.getContext(processName);
metricsRecord = MetricsUtil.createRecord(context, recordName);
metricsRecord.setTag("process", processName);
agentActivityMBean = new AgentActivityMBean(registry, recordName);
context.registerUpdater(this);
}
/**
* Since this object is a registered updater, this method will be called
* periodically, e.g. every 5 seconds.
*/
public void doUpdates(MetricsContext unused) {
synchronized (this) {
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
}
metricsRecord.update();
}
public void shutdown() {
if (agentActivityMBean != null)
agentActivityMBean.shutdown();
}
}
| 8,188 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/metrics/AgentActivityMBean.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent.metrics;
import javax.management.ObjectName;
import org.apache.hadoop.metrics.util.MBeanUtil;
import org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase;
import org.apache.hadoop.metrics.util.MetricsRegistry;
public class AgentActivityMBean extends MetricsDynamicMBeanBase {
final private ObjectName mbeanName;
public AgentActivityMBean(final MetricsRegistry mr, final String serviceName) {
super(mr, "Agent layer statistics");
mbeanName = MBeanUtil.registerMBean(serviceName,
"AgentActivity", this);
}
public void shutdown() {
if (mbeanName != null)
MBeanUtil.unregisterMBean(mbeanName);
}
}
| 8,189 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/rest/ContextProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent.rest;
import javax.ws.rs.ext.ContextResolver;
import javax.ws.rs.ext.Provider;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBException;
import com.sun.jersey.api.json.JSONConfiguration;
import com.sun.jersey.api.json.JSONJAXBContext;
@Provider
public class ContextProvider implements ContextResolver<JAXBContext> {
private final JAXBContext context;
private Class<?>[] types = { };
/*private Class<?>[] types = {
AgentController.class,
};*/
public ContextProvider() throws JAXBException {
this.context = new JSONJAXBContext(JSONConfiguration.natural().build(),
types);
}
public JAXBContext getContext(Class<?> objectType) {
for (Class<?> type : types) {
if (type.equals(objectType))
return context;
}
return null;
}
}
| 8,190 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/rest/AdaptorController.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent.rest;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.apache.hadoop.chukwa.datacollection.adaptor.AdaptorException;
import org.apache.hadoop.chukwa.datacollection.adaptor.Adaptor;
import org.apache.hadoop.chukwa.datacollection.OffsetStatsManager;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import javax.ws.rs.Path;
import javax.ws.rs.GET;
import javax.ws.rs.Produces;
import javax.ws.rs.PathParam;
import javax.ws.rs.DELETE;
import javax.ws.rs.POST;
import javax.ws.rs.Consumes;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.MediaType;
import javax.servlet.http.HttpServletResponse;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* JAX-RS controller to handle all HTTP request to the Agent that deal with adaptors.
*/
@Path("/adaptor")
public class AdaptorController {
private static final DecimalFormat DECIMAL_FORMAT = new DecimalFormat();
private static final Log LOG = LogFactory.getLog(AdaptorController.class);
static {
DECIMAL_FORMAT.setMinimumFractionDigits(2);
DECIMAL_FORMAT.setMaximumFractionDigits(2);
DECIMAL_FORMAT.setGroupingUsed(false);
}
/**
* Adds an adaptor to the agent and returns the adaptor info
* @param ac is adaptor configuration
* @return web status
*
* @request.representation.example {@link Examples#CREATE_ADAPTOR_SAMPLE}
* @response.representation.200.doc Adaptor has been registered
* @response.representation.200.mediaType application/json
* @response.representation.200.example {@link Examples#ADAPTOR_STATUS_SAMPLE}
* @response.representation.400.doc Error in register adaptor
* @response.representation.400.mediaType text/plain
* @response.representation.400.example Bad Request
*/
@POST
@Consumes({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML})
@Produces({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML, MediaType.TEXT_PLAIN})
public Response addAdaptor(AdaptorConfig ac) {
ChukwaAgent agent = ChukwaAgent.getAgent();
if (ac.getAdaptorClass() == null ||
ac.getDataType() == null) {
return badRequestResponse("Bad adaptor config.");
}
StringBuilder addCommand = new StringBuilder("add ");
addCommand.append(ac.getAdaptorClass()).append(' ');
addCommand.append(ac.getDataType());
if (ac.getAdaptorParams() != null)
addCommand.append(' ').append(ac.getAdaptorParams());
addCommand.append(' ').append(ac.getOffset());
// add the adaptor
try {
String adaptorId = agent.processAddCommandE(addCommand.toString());
return doGetAdaptor(adaptorId);
} catch (AdaptorException e) {
LOG.warn("Could not add adaptor for data type: '" + ac.getDataType() +
"', error: " + e.getMessage());
return badRequestResponse("Could not add adaptor for data type: '" + ac.getDataType() +
"', error: " + e.getMessage());
}
}
/**
* Remove an adaptor from the agent
*
* @param adaptorId id of adaptor to remove.
* @return web status
* @response.representation.200.doc Delete adaptor by id
* @response.representation.200.mediaType text/plain
*/
@DELETE
@Path("/{adaptorId}")
@Produces({MediaType.TEXT_PLAIN})
public Response removeAdaptor(@PathParam("adaptorId") String adaptorId) {
ChukwaAgent agent = ChukwaAgent.getAgent();
// validate that we have an adaptorId
if (adaptorId == null) {
return badRequestResponse("Missing adaptorId.");
}
// validate that we have a valid adaptorId
if (agent.getAdaptor(adaptorId) == null) {
return badRequestResponse("Invalid adaptorId: " + adaptorId);
}
// stop the agent
agent.stopAdaptor(adaptorId, true);
return Response.ok().build();
}
/**
* Get all adaptors
* @return web status
*
* @response.representation.200.doc List all configured adaptors
* @response.representation.200.mediaType application/json
* @response.representation.200.example {@link Examples#ADAPTOR_LIST_SAMPLE}
*/
@GET
@Produces({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML})
public Response getAdaptors() {
return doGetAdaptors();
}
/**
* Get a single adaptor
*
* @param adaptorId id of the adaptor to return
* @return web status
* @response.representation.200.doc Adaptor status and data transfer rate in 1, 5, 10 minutes averages
* @response.representation.200.mediaType application/json
* @response.representation.200.example {@link Examples#ADAPTOR_STATUS_SAMPLE}
*/
@GET
@Path("/{adaptorId}")
@Produces({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML})
public Response getAdaptor(@PathParam("adaptorId") String adaptorId) {
return doGetAdaptor(adaptorId);
}
/**
* Handles a single adaptor request for rendering data model output.
*
* @return Response object
*/
private Response doGetAdaptor(String adaptorId) {
return Response.ok(buildAdaptor(adaptorId)).build();
}
/**
* Rendering data model output for all adaptors
*
* @return Response object
*/
private Response doGetAdaptors() {
return Response.ok(buildAdaptors()).build();
}
/**
* Renders info for one adaptor.
*/
protected AdaptorInfo buildAdaptor(String adaptorId) {
ChukwaAgent agent = ChukwaAgent.getAgent();
Adaptor adaptor = agent.getAdaptor(adaptorId);
OffsetStatsManager<Adaptor> adaptorStats = agent.getAdaptorStatsManager();
AdaptorInfo info = new AdaptorInfo();
info.setId(adaptorId);
info.setDataType(adaptor.getType());
info.setAdaptorClass(adaptor.getClass().getName());
String[] status = adaptor.getCurrentStatus().split(" ",2);
info.setAdaptorParams(status[1]);
List<AdaptorAveragedRate> rates = new ArrayList<AdaptorAveragedRate>();
rates.add(new AdaptorAveragedRate(60, adaptorStats.calcAverageRate(adaptor, 60)));
rates.add(new AdaptorAveragedRate(300, adaptorStats.calcAverageRate(adaptor, 300)));
rates.add(new AdaptorAveragedRate(600, adaptorStats.calcAverageRate(adaptor, 600)));
info.setAdaptorRates(rates);
return info;
}
/**
* Renders info for all adaptors.
*/
protected AdaptorList buildAdaptors() {
ChukwaAgent agent = ChukwaAgent.getAgent();
Map<String, String> adaptorMap = agent.getAdaptorList();
AdaptorList list = new AdaptorList();
for(String name : adaptorMap.keySet()) {
Adaptor adaptor = agent.getAdaptor(name);
OffsetStatsManager<Adaptor> adaptorStats = agent.getAdaptorStatsManager();
AdaptorInfo info = new AdaptorInfo();
info.setId(name);
info.setDataType(adaptor.getType());
info.setAdaptorClass(adaptor.getClass().getName());
String[] status = adaptor.getCurrentStatus().split(" ",2);
info.setAdaptorParams(status[1]);
List<AdaptorAveragedRate> rates = new ArrayList<AdaptorAveragedRate>();
rates.add(new AdaptorAveragedRate(60, adaptorStats.calcAverageRate(adaptor, 60)));
rates.add(new AdaptorAveragedRate(300, adaptorStats.calcAverageRate(adaptor, 300)));
rates.add(new AdaptorAveragedRate(600, adaptorStats.calcAverageRate(adaptor, 600)));
info.setAdaptorRates(rates);
list.add(info);
}
return list;
}
/**
* Renders bad request response.
*/
private static Response badRequestResponse(String content) {
return Response.status(HttpServletResponse.SC_BAD_REQUEST)
.entity(content).build();
}
}
| 8,191 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/rest/AdaptorConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent.rest;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAccessType;
@XmlRootElement
@XmlAccessorType(XmlAccessType.PUBLIC_MEMBER)
public class AdaptorConfig {
private String id;
private String dataType;
private String adaptorClass;
private String adaptorParams;
private long offset;
public AdaptorConfig() {
}
@XmlElement
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
@XmlElement
public String getDataType() {
return dataType;
}
public void setDataType(String dataType) {
this.dataType = dataType;
}
@XmlElement
public String getAdaptorClass() {
return adaptorClass;
}
public void setAdaptorClass(String adaptorClass) {
this.adaptorClass = adaptorClass;
}
@XmlElement
public String getAdaptorParams() {
return adaptorParams;
}
public void setAdaptorParams(String adaptorParams) {
this.adaptorParams = adaptorParams;
}
@XmlElement
public long getOffset() {
return offset;
}
public void setOffset(long offset) {
this.offset = offset;
}
}
| 8,192 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/rest/AdaptorList.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent.rest;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
@XmlRootElement
@XmlAccessorType(XmlAccessType.PUBLIC_MEMBER)
public class AdaptorList {
private List<AdaptorInfo> adaptorInfo;
@XmlElement
public List<AdaptorInfo> getAdaptorInfo() {
return this.adaptorInfo;
}
public void setAdaptorInfo(List<AdaptorInfo> adaptorInfo) {
this.adaptorInfo = adaptorInfo;
}
public void add(AdaptorInfo adaptorInfo) {
if(this.adaptorInfo == null) {
this.adaptorInfo = new ArrayList<AdaptorInfo>();
}
this.adaptorInfo.add(adaptorInfo);
}
@XmlElement
public int getAdaptorCount() {
int count = 0;
if(this.adaptorInfo != null) {
count = this.adaptorInfo.size();
}
return count;
}
}
| 8,193 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/rest/Examples.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent.rest;
import java.util.ArrayList;
import java.util.List;
public class Examples {
public static final AdaptorConfig CREATE_ADAPTOR_SAMPLE = new AdaptorConfig();
public static final AdaptorInfo ADAPTOR_STATUS_SAMPLE = new AdaptorInfo();
final static List<AdaptorAveragedRate> ADAPTOR_RATES = new ArrayList<AdaptorAveragedRate>();
public static final AdaptorAveragedRate ADAPTOR_RATE_SAMPLE_PER_MINUTE = new AdaptorAveragedRate();
public static final AdaptorAveragedRate ADAPTOR_RATE_SAMPLE_PER_FIVE_MINUTE = new AdaptorAveragedRate();
public static final AdaptorAveragedRate ADAPTOR_RATE_SAMPLE_PER_TEN_MINUTE = new AdaptorAveragedRate();
public static final AdaptorInfo SYS_ADAPTOR_STATUS_SAMPLE = new AdaptorInfo();
final static List<AdaptorAveragedRate> SYS_ADAPTOR_RATES = new ArrayList<AdaptorAveragedRate>();
public static final AdaptorAveragedRate SYS_ADAPTOR_RATE_SAMPLE_PER_MINUTE = new AdaptorAveragedRate();
public static final AdaptorAveragedRate SYS_ADAPTOR_RATE_SAMPLE_PER_FIVE_MINUTE = new AdaptorAveragedRate();
public static final AdaptorAveragedRate SYS_ADAPTOR_RATE_SAMPLE_PER_TEN_MINUTE = new AdaptorAveragedRate();
public static final AdaptorList ADAPTOR_LIST_SAMPLE = new AdaptorList();
static {
// Create adaptor Sample
CREATE_ADAPTOR_SAMPLE.setDataType("JobSummary");
CREATE_ADAPTOR_SAMPLE.setAdaptorClass("org.apache.hadoop.chukwa.datacollection.adaptor.SocketAdaptor");
CREATE_ADAPTOR_SAMPLE.setAdaptorParams("9098");
CREATE_ADAPTOR_SAMPLE.setOffset(0);
// Adaptor Status Sample
ADAPTOR_RATE_SAMPLE_PER_MINUTE.setRate(100.123);
ADAPTOR_RATE_SAMPLE_PER_MINUTE.setIntervalInSeconds(60);
ADAPTOR_RATE_SAMPLE_PER_FIVE_MINUTE.setRate(100.123);
ADAPTOR_RATE_SAMPLE_PER_FIVE_MINUTE.setIntervalInSeconds(300);
ADAPTOR_RATE_SAMPLE_PER_TEN_MINUTE.setRate(100.123);
ADAPTOR_RATE_SAMPLE_PER_TEN_MINUTE.setIntervalInSeconds(600);
ADAPTOR_RATES.add(ADAPTOR_RATE_SAMPLE_PER_MINUTE);
ADAPTOR_RATES.add(ADAPTOR_RATE_SAMPLE_PER_FIVE_MINUTE);
ADAPTOR_RATES.add(ADAPTOR_RATE_SAMPLE_PER_TEN_MINUTE);
ADAPTOR_STATUS_SAMPLE.setId("adaptor_93df4746476c9a4b624f6755b122f9dc");
ADAPTOR_STATUS_SAMPLE.setDataType("JobSummary");
ADAPTOR_STATUS_SAMPLE.setAdaptorClass("org.apache.hadoop.chukwa.datacollection.adaptor.SocketAdaptor");
ADAPTOR_STATUS_SAMPLE.setAdaptorParams("9098");
ADAPTOR_STATUS_SAMPLE.setOffset(1680);
ADAPTOR_STATUS_SAMPLE.setAdaptorRates(ADAPTOR_RATES);
// System Adaptor Sample
SYS_ADAPTOR_RATE_SAMPLE_PER_MINUTE.setRate(9.09);
SYS_ADAPTOR_RATE_SAMPLE_PER_MINUTE.setIntervalInSeconds(60);
SYS_ADAPTOR_RATE_SAMPLE_PER_FIVE_MINUTE.setRate(7.55);
SYS_ADAPTOR_RATE_SAMPLE_PER_FIVE_MINUTE.setIntervalInSeconds(300);
SYS_ADAPTOR_RATE_SAMPLE_PER_TEN_MINUTE.setRate(6.44);
SYS_ADAPTOR_RATE_SAMPLE_PER_TEN_MINUTE.setIntervalInSeconds(600);
SYS_ADAPTOR_RATES.add(SYS_ADAPTOR_RATE_SAMPLE_PER_MINUTE);
SYS_ADAPTOR_RATES.add(SYS_ADAPTOR_RATE_SAMPLE_PER_FIVE_MINUTE);
SYS_ADAPTOR_RATES.add(SYS_ADAPTOR_RATE_SAMPLE_PER_TEN_MINUTE);
SYS_ADAPTOR_STATUS_SAMPLE.setId("adaptor_c79bf882974a14286cffe29d3d4cf0d6");
SYS_ADAPTOR_STATUS_SAMPLE.setDataType("SystemMetrics");
SYS_ADAPTOR_STATUS_SAMPLE.setAdaptorClass("org.apache.hadoop.chukwa.datacollection.adaptor.sigar.SystemMetrics");
SYS_ADAPTOR_STATUS_SAMPLE.setAdaptorParams("5");
SYS_ADAPTOR_STATUS_SAMPLE.setOffset(5678);
SYS_ADAPTOR_STATUS_SAMPLE.setAdaptorRates(SYS_ADAPTOR_RATES);
// List of Adaptors Sample
List<AdaptorInfo> list = new ArrayList<AdaptorInfo>();
list.add(ADAPTOR_STATUS_SAMPLE);
list.add(SYS_ADAPTOR_STATUS_SAMPLE);
ADAPTOR_LIST_SAMPLE.setAdaptorInfo(list);
}
}
| 8,194 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/rest/AdaptorInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent.rest;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
@XmlRootElement
@XmlAccessorType(XmlAccessType.PUBLIC_MEMBER)
public class AdaptorInfo extends AdaptorConfig {
private List<AdaptorAveragedRate> rates;
@XmlElement
public List<AdaptorAveragedRate> getAdaptorRates() {
return rates;
}
public void setAdaptorRates(List<AdaptorAveragedRate> rates) {
this.rates = rates;
}
}
| 8,195 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/agent/rest/AdaptorAveragedRate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.agent.rest;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
@XmlRootElement
@XmlAccessorType(XmlAccessType.PUBLIC_MEMBER)
public class AdaptorAveragedRate {
private double rate;
private long unit;
public AdaptorAveragedRate() {
rate = 0;
unit = 0;
}
public AdaptorAveragedRate(long unit, double rate) {
this.unit = unit;
this.rate = rate;
}
@XmlAttribute
public double getRate() {
return rate;
}
public void setRate(double rate) {
this.rate = rate;
}
@XmlAttribute
public long getIntervalInSeconds() {
return unit;
}
public void setIntervalInSeconds(long unit) {
this.unit = unit;
}
}
| 8,196 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/AbstractWrapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
import java.util.regex.*;
import org.apache.hadoop.chukwa.Chunk;
import org.apache.hadoop.chukwa.datacollection.ChunkReceiver;
import org.apache.hadoop.chukwa.datacollection.agent.AdaptorFactory;
import org.apache.hadoop.chukwa.datacollection.agent.AdaptorManager;
public class AbstractWrapper implements NotifyOnCommitAdaptor,ChunkReceiver {
Adaptor inner;
String innerClassName;
String innerType;
ChunkReceiver dest;
AdaptorManager manager;
String adaptorID;
@Override
public String getCurrentStatus() {
return innerClassName + " " + inner.getCurrentStatus();
}
static Pattern p = Pattern.compile("([^ ]+) +([^ ].*)");
/**
* Note that the name of the inner class will get parsed out as a type
*/
@Override
public String parseArgs(String innerClassName, String params, AdaptorManager a) {
manager = a;
Matcher m = p.matcher(params);
this.innerClassName = innerClassName;
String innerCoreParams;
if(m.matches()) {
innerType = m.group(1);
inner = AdaptorFactory.createAdaptor(innerClassName);
innerCoreParams = inner.parseArgs(innerType,m.group(2),a);
return innerClassName + innerCoreParams;
}
else return null;
}
@Override
public long shutdown(AdaptorShutdownPolicy shutdownPolicy)
throws AdaptorException {
return inner.shutdown(shutdownPolicy);
}
@Override
public String getType() {
return innerType;
}
/**
* Note that the name of the inner class will get parsed out as a type
*/
@Override
public void start(String adaptorID, String type, long offset,
ChunkReceiver dest) throws AdaptorException {
String dummyAdaptorID = adaptorID;
this.dest = dest;
this.adaptorID = adaptorID;
inner.start(dummyAdaptorID, type, offset, this);
}
@Override
public void add(Chunk event) throws InterruptedException {
dest.add(event);
}
@Override
public void committed(long commitedByte) { }
}
| 8,197 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/SocketAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.io.ObjectInputStream;
import java.net.*;
import java.nio.charset.Charset;
import org.apache.hadoop.chukwa.*;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.apache.log4j.spi.LoggingEvent;
/**
* SocketAdaptor reads TCP message from a port and convert the message to Chukwa
* Chunk for transport from Chukwa Agent to Chukwa Collector. Usage:
*
* add SocketAdaptor [DataType] [Port] [SequenceNumber]
*
*/
public class SocketAdaptor extends AbstractAdaptor {
PatternLayout layout = new PatternLayout("%d{ISO8601} %p %c: %m%n");
private final static Logger log = Logger.getLogger(SocketAdaptor.class);
volatile boolean running = true;
volatile long bytesReceived = 0;
private int port = 9095;
class Dispatcher extends Thread {
private int port;
private ServerSocket listener;
public Dispatcher(int port) {
this.port = port;
}
public void run() {
try{
listener = new ServerSocket();
listener.setReuseAddress(true);
bindWithExponentialBackoff(listener, port, 12000);
log.info("SocketAdaptor bound successfully to port:" + port);
Socket server;
while(running){
server = listener.accept();
Worker connection = new Worker(server);
Thread t = new Thread(connection);
t.start();
}
} catch (IOException ioe) {
log.error("SocketAdaptor Dispatcher problem:", ioe);
} finally {
try {
listener.close();
} catch (IOException e) {
log.warn("IOException closing socket on port:" + port);
}
}
}
public void shutdown() {
try {
listener.close();
} catch (IOException e) {
log.debug(ExceptionUtil.getStackTrace(e));
}
}
protected void bindWithExponentialBackoff(ServerSocket ss, int p,
int maxDelay) throws IOException {
int backoff = 1000;
int waitedTime = 0;
while (!ss.isBound()) {
try {
ss.bind(new InetSocketAddress(p));
} catch (IOException bindEx) {
backoff *= 2;
log.warn("IOException in bind:" + bindEx);
log.warn("Retrying bind to port " + p + " in milliseconds:" + backoff);
try {
Thread.sleep(backoff);
} catch (InterruptedException e) {
throw new IOException(
"Interrupted while trying to connect to port:" + p);
}
}
waitedTime += backoff;
if (waitedTime > maxDelay) {
throw new IOException("Could not bind to port:" + p
+ " after waiting " + waitedTime
+ " milliseconds. Abandoning this SocketAdaptor.");
}
}
}
}
class Worker implements Runnable {
private ObjectInputStream ois;
private Socket server;
public Worker(Socket server) {
this.server = server;
}
public void run() {
LoggingEvent event;
try {
ois = new ObjectInputStream(
new BufferedInputStream(server.getInputStream()));
if (ois != null) {
while(running) {
// read an event from the wire
event = (LoggingEvent) ois.readObject();
byte[] bytes = layout.format(event).getBytes(Charset.forName("UTF-8"));
bytesReceived=bytes.length;
Chunk c = new ChunkImpl(type, java.net.InetAddress.getLocalHost().getHostName(), bytesReceived, bytes, SocketAdaptor.this);
dest.add(c);
}
}
} catch(java.io.EOFException e) {
log.debug("Caught java.io.EOFException:", e);
} catch(java.net.SocketException e) {
log.debug("Caught java.net.SocketException:", e);
} catch(InterruptedIOException e) {
Thread.currentThread().interrupt();
log.debug("Caught java.io.InterruptedIOException: ", e);
} catch(IOException e) {
log.debug("Caught java.io.IOException: "+e);
} catch(Exception e) {
log.error("Unexpected exception. Closing conneciton.", e);
} finally {
if (ois != null) {
try {
ois.close();
} catch(Exception e) {
log.info("Could not close connection.", e);
}
}
if (server != null) {
try {
server.close();
} catch(InterruptedIOException e) {
Thread.currentThread().interrupt();
} catch(IOException ex) {
log.debug(ExceptionUtil.getStackTrace(ex));
}
}
}
}
public void shutdown() {
try {
ois.close();
server.close();
} catch (IOException e) {
log.debug(ExceptionUtil.getStackTrace(e));
}
}
}
Dispatcher disp;
@Override
public String parseArgs(String s) {
port = Integer.parseInt(s);
return s;
}
@Override
public void start(long offset) throws AdaptorException {
try {
disp = new Dispatcher(port);
disp.setDaemon(true);
disp.start();
} catch (Exception e) {
throw new AdaptorException(ExceptionUtil.getStackTrace(e));
}
}
@Override
public String getCurrentStatus() {
return type + " " + port;
}
@Override
public long shutdown(AdaptorShutdownPolicy shutdownPolicy)
throws AdaptorException {
try {
running = false;
disp.shutdown();
} catch(Exception e) {
log.debug(ExceptionUtil.getStackTrace(e));
}
return 0;
}
}
| 8,198 |
0 | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection | Create_ds/chukwa/core/src/main/java/org/apache/hadoop/chukwa/datacollection/adaptor/HeartbeatAdaptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.datacollection.adaptor;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.Socket;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.chukwa.ChunkImpl;
import org.apache.hadoop.chukwa.datacollection.adaptor.heartbeat.StatusChecker;
import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
public class HeartbeatAdaptor extends AbstractAdaptor {
private final Logger log = Logger.getLogger(HeartbeatAdaptor.class);
private Timer timer = new Timer();
JSONObject status = new JSONObject();
private int period = 3;
private List<StatusChecker> allCheckers = new ArrayList<StatusChecker>();
private final String DEFAULT_PACKAGE = "org.apache.hadoop.chukwa.datacollection.adaptor.heartbeat";
private String arguments;
long seqId = 0;
private final String STREAM_NAME = "STATUS";
private boolean _shouldUseConnector = false;
private String _host;
private int _port;
class Task extends TimerTask{
@Override
public void run() {
try {
heartbeat();
} catch (InterruptedException e) {
log.error(ExceptionUtil.getStackTrace(e));
}
}
@SuppressWarnings("unchecked")
private void heartbeat() throws InterruptedException {
status.put("time", System.currentTimeMillis());
JSONArray array = new JSONArray();
for (StatusChecker checker : allCheckers) {
array.add(checker.getStatus());
}
status.put("components", array);
if(_shouldUseConnector){
ChunkImpl chunk = new ChunkImpl(type, STREAM_NAME, seqId, status.toString()
.getBytes(Charset.forName("UTF-8")), HeartbeatAdaptor.this);
dest.add(chunk);
} else {
sendDirectly(status.toString());
}
seqId++;
}
private void sendDirectly(String data) {
DataOutputStream dos = null;
Socket sock = null;
byte[] bdata = data.getBytes(Charset.forName("UTF-8"));
try {
sock = new Socket(_host, _port);
dos = new DataOutputStream(sock.getOutputStream());
dos.writeInt(bdata.length);
dos.write(bdata);
dos.flush();
} catch (Exception e) {
log.debug(ExceptionUtil.getStackTrace(e));
} finally {
if (dos != null) {
try {
dos.close();
} catch (IOException e) {
log.debug("Error closing dataoutput stream:" + e);
}
}
if (sock != null) {
try {
sock.close();
} catch (IOException e) {
log.debug("Error closing socket: " + e);
}
}
}
}
}
@Override
public String getCurrentStatus() {
return type + " " + arguments;
}
@Override
public long shutdown(AdaptorShutdownPolicy shutdownPolicy)
throws AdaptorException {
timer.cancel();
return seqId;
}
@Override
public void start(long offset) throws AdaptorException {
seqId = offset;
timer.scheduleAtFixedRate(new Task(), 0, period * 1000);
}
@Override
public String parseArgs(String s) {
// match patterns like localhost 1234 (aa host1 port1, bb, cc host2 port2) 60
Pattern p1 = Pattern.compile("(\\(.*\\),?)+\\s(\\d+)");
Matcher m1 = p1.matcher(s);
if(!m1.matches()){
log.error("Invalid adaptor parameters. Usage: HeartbeatAdaptor DefaultProcessor <host> <port> <list-of-status-checkers> <period> <offset>");
return null;
}
try{
String providers = m1.group(1);
period = Integer.parseInt(m1.group(2));
// match pattern like (aa host1 port1, bb, cc host2, port2) and capture the string without braces
Pattern p2 = Pattern.compile("\\(((?:(?:[\\w/:\\.]+\\s*)+,?\\s*)+)\\)");
Matcher m2 = p2.matcher(providers);
if(!m2.matches()){
log.error("Invalid adaptor parameters. Usage: PingAdaptor DefaultProcessor <host> <port> <list-of-status-providers> <period> <offset>");
log.error("Specify list of status-providers as (provider1 args1, provider2 args2...). Pattern used for matching:"+p2.pattern());
return null;
}
String[] checkerList = m2.group(1).split(",");
for(String checker: checkerList){
String args[] = checker.trim().split(" ");
String checkerName = args[0];
try {
Object c = Class.forName(checkerName).newInstance();
if(StatusChecker.class.isInstance(c)){
StatusChecker sp = (StatusChecker)c;
sp.init(Arrays.copyOfRange(args, 1, args.length));
allCheckers.add(sp);
} else {
throw new Exception("Unsupported checker:"+checkerName);
}
} catch (Exception e) {
log.debug("Error instantiating StatusChecker:" + checkerName + " due to " + e);
String newProvider = DEFAULT_PACKAGE + "." + checkerName;
log.debug("Trying with default package name " + DEFAULT_PACKAGE);
try {
Object c = Class.forName(newProvider).newInstance();
if(StatusChecker.class.isInstance(c)){
StatusChecker sp = (StatusChecker)c;
sp.init(Arrays.copyOfRange(args, 1, args.length));
allCheckers.add(sp);
} else {
log.error("Unsupported StatusChecker:"+newProvider);
return null;
}
} catch (Exception e1) {
log.error("Error instantiating StatusChecker:" + checker + " due to " + e1);
log.error(ExceptionUtil.getStackTrace(e1));
return null;
}
}
}
} catch(NumberFormatException nfe){
log.error(ExceptionUtil.getStackTrace(nfe));
return null;
}
arguments = s;
Configuration chukwaConf = ChukwaAgent.getStaticConfiguration();
_host = chukwaConf.get("chukwa.http.writer.host", "localhost");
_port = Integer.parseInt(chukwaConf.get("chukwa.http.writer.port", "8802"));
String connector = chukwaConf.get("chukwa.agent.connector");
if(connector != null && connector.contains("PipelineConnector")){
_shouldUseConnector = true;
}
return s;
}
}
| 8,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.