index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink/hdfs/HDFSBadDataStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hdfs;
import java.io.IOException;
import org.apache.flume.Event;
public class HDFSBadDataStream extends HDFSDataStream {
public class HDFSBadSeqWriter extends HDFSSequenceFile {
@Override
public void append(Event e) throws IOException {
if (e.getHeaders().containsKey("fault")) {
throw new IOException("Injected fault");
} else if (e.getHeaders().containsKey("slow")) {
long waitTime = Long.parseLong(e.getHeaders().get("slow"));
try {
Thread.sleep(waitTime);
} catch (InterruptedException eT) {
throw new IOException("append interrupted", eT);
}
}
super.append(e);
}
}
}
| 9,800 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink/hdfs/HDFSTestWriterFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hdfs;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
public class HDFSTestWriterFactory extends HDFSWriterFactory {
static final String TestSequenceFileType = "SequenceFile";
static final String BadDataStreamType = "DataStream";
// so we can get a handle to this one in our test.
AtomicInteger openCount = new AtomicInteger(0);
@Override
public HDFSWriter getWriter(String fileType) throws IOException {
if (fileType == TestSequenceFileType) {
return new HDFSTestSeqWriter(openCount.incrementAndGet());
} else if (fileType == BadDataStreamType) {
return new HDFSBadDataStream();
} else {
throw new IOException("File type " + fileType + " not supported");
}
}
}
| 9,801 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink/hdfs/MockFsDataOutputStream.java | /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package org.apache.flume.sink.hdfs;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
public class MockFsDataOutputStream extends FSDataOutputStream {
private static final Logger logger =
LoggerFactory.getLogger(MockFsDataOutputStream.class);
boolean closeSucceed;
public MockFsDataOutputStream(FSDataOutputStream wrapMe, boolean closeSucceed)
throws IOException {
super(wrapMe.getWrappedStream(), null);
this.closeSucceed = closeSucceed;
}
@Override
public void close() throws IOException {
logger.info("Close Succeeded - " + closeSucceed);
if (closeSucceed) {
logger.info("closing file");
super.close();
} else {
throw new IOException("MockIOException");
}
}
}
| 9,802 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink/hdfs/TestHDFSEventSink.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hdfs;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.ByteBuffer;
import java.nio.charset.CharsetDecoder;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterators;
import com.google.common.collect.Maps;
import org.apache.avro.file.DataFileStream;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumReader;
import org.apache.commons.lang.StringUtils;
import org.apache.flume.Channel;
import org.apache.flume.ChannelException;
import org.apache.flume.Clock;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.Sink.Status;
import org.apache.flume.SystemClock;
import org.apache.flume.Transaction;
import org.apache.flume.channel.BasicTransactionSemantics;
import org.apache.flume.channel.MemoryChannel;
import org.apache.flume.conf.Configurables;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.event.SimpleEvent;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.flume.lifecycle.LifecycleException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
public class TestHDFSEventSink {
private HDFSEventSink sink;
private String testPath;
private static final Logger LOG = LoggerFactory
.getLogger(HDFSEventSink.class);
static {
System.setProperty("java.security.krb5.realm", "flume");
System.setProperty("java.security.krb5.kdc", "blah");
}
private void dirCleanup() {
Configuration conf = new Configuration();
try {
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(testPath);
if (fs.exists(dirPath)) {
fs.delete(dirPath, true);
}
} catch (IOException eIO) {
LOG.warn("IO Error in test cleanup", eIO);
}
}
// TODO: use System.getProperty("file.separator") instead of hardcoded '/'
@Before
public void setUp() {
LOG.debug("Starting...");
/*
* FIXME: Use a dynamic path to support concurrent test execution. Also,
* beware of the case where this path is used for something or when the
* Hadoop config points at file:/// rather than hdfs://. We need to find a
* better way of testing HDFS related functionality.
*/
testPath = "file:///tmp/flume-test."
+ Calendar.getInstance().getTimeInMillis() + "."
+ Thread.currentThread().getId();
sink = new HDFSEventSink();
sink.setName("HDFSEventSink-" + UUID.randomUUID().toString());
dirCleanup();
}
@After
public void tearDown() {
if (System.getenv("hdfs_keepFiles") == null) dirCleanup();
}
@Test
public void testTextBatchAppend() throws Exception {
doTestTextBatchAppend(false);
}
@Test
public void testTextBatchAppendRawFS() throws Exception {
doTestTextBatchAppend(true);
}
public void doTestTextBatchAppend(boolean useRawLocalFileSystem)
throws Exception {
LOG.debug("Starting...");
final long rollCount = 10;
final long batchSize = 2;
final String fileName = "FlumeData";
String newPath = testPath + "/singleTextBucket";
int totalEvents = 0;
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
// context.put("hdfs.path", testPath + "/%Y-%m-%d/%H");
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.rollInterval", "0");
context.put("hdfs.rollSize", "0");
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.writeFormat", "Text");
context.put("hdfs.useRawLocalFileSystem",
Boolean.toString(useRawLocalFileSystem));
context.put("hdfs.fileType", "DataStream");
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel to roll twice
for (i = 1; i <= (rollCount * 10) / batchSize; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
// loop through all the files generated and check their contains
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
// check that the roll happened correctly for the given data
long expectedFiles = totalEvents / rollCount;
if (totalEvents % rollCount > 0) expectedFiles++;
Assert.assertEquals("num files wrong, found: " +
Lists.newArrayList(fList), expectedFiles, fList.length);
// check the contents of the all files
verifyOutputTextFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
@Test
public void testLifecycle() throws InterruptedException, LifecycleException {
LOG.debug("Starting...");
Context context = new Context();
context.put("hdfs.path", testPath);
/*
* context.put("hdfs.rollInterval", String.class);
* context.get("hdfs.rollSize", String.class); context.get("hdfs.rollCount",
* String.class);
*/
Configurables.configure(sink, context);
sink.setChannel(new MemoryChannel());
sink.start();
sink.stop();
}
@Test
public void testEmptyChannelResultsInStatusBackoff()
throws InterruptedException, LifecycleException, EventDeliveryException {
LOG.debug("Starting...");
Context context = new Context();
Channel channel = new MemoryChannel();
context.put("hdfs.path", testPath);
context.put("keep-alive", "0");
Configurables.configure(sink, context);
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Assert.assertEquals(Status.BACKOFF, sink.process());
sink.stop();
}
@Test
public void testKerbFileAccess() throws InterruptedException,
LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting testKerbFileAccess() ...");
final String fileName = "FlumeData";
final long rollCount = 5;
final long batchSize = 2;
String newPath = testPath + "/singleBucket";
String kerbConfPrincipal = "user1/localhost@EXAMPLE.COM";
String kerbKeytab = "/usr/lib/flume/nonexistkeytabfile";
//turn security on
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.kerberosPrincipal", kerbConfPrincipal);
context.put("hdfs.kerberosKeytab", kerbKeytab);
try {
Configurables.configure(sink, context);
Assert.fail("no exception thrown");
} catch (IllegalArgumentException expected) {
Assert.assertTrue(expected.getMessage().contains(
"Keytab is not a readable file"));
} finally {
//turn security off
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
"simple");
UserGroupInformation.setConfiguration(conf);
}
}
@Test
public void testTextAppend() throws InterruptedException, LifecycleException,
EventDeliveryException, IOException {
LOG.debug("Starting...");
final long rollCount = 3;
final long batchSize = 2;
final String fileName = "FlumeData";
String newPath = testPath + "/singleTextBucket";
int totalEvents = 0;
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
// context.put("hdfs.path", testPath + "/%Y-%m-%d/%H");
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.writeFormat", "Text");
context.put("hdfs.fileType", "DataStream");
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 1; i < 4; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
// loop through all the files generated and check their contains
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
// check that the roll happened correctly for the given data
long expectedFiles = totalEvents / rollCount;
if (totalEvents % rollCount > 0) expectedFiles++;
Assert.assertEquals("num files wrong, found: " +
Lists.newArrayList(fList), expectedFiles, fList.length);
verifyOutputTextFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
@Test
public void testAvroAppend() throws InterruptedException, LifecycleException,
EventDeliveryException, IOException {
LOG.debug("Starting...");
final long rollCount = 3;
final long batchSize = 2;
final String fileName = "FlumeData";
String newPath = testPath + "/singleTextBucket";
int totalEvents = 0;
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
// context.put("hdfs.path", testPath + "/%Y-%m-%d/%H");
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.writeFormat", "Text");
context.put("hdfs.fileType", "DataStream");
context.put("serializer", "AVRO_EVENT");
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 1; i < 4; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
// loop through all the files generated and check their contains
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
// check that the roll happened correctly for the given data
long expectedFiles = totalEvents / rollCount;
if (totalEvents % rollCount > 0) expectedFiles++;
Assert.assertEquals("num files wrong, found: " +
Lists.newArrayList(fList), expectedFiles, fList.length);
verifyOutputAvroFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
@Test
public void testSimpleAppend() throws InterruptedException,
LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
final String fileName = "FlumeData";
final long rollCount = 5;
final long batchSize = 2;
final int numBatches = 4;
String newPath = testPath + "/singleBucket";
int totalEvents = 0;
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 1; i < numBatches; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
// loop through all the files generated and check their contains
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
// check that the roll happened correctly for the given data
long expectedFiles = totalEvents / rollCount;
if (totalEvents % rollCount > 0) expectedFiles++;
Assert.assertEquals("num files wrong, found: " +
Lists.newArrayList(fList), expectedFiles, fList.length);
verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
@Test
public void testSimpleAppendLocalTime()
throws InterruptedException, LifecycleException, EventDeliveryException, IOException {
final long currentTime = System.currentTimeMillis();
Clock clk = new Clock() {
@Override
public long currentTimeMillis() {
return currentTime;
}
};
LOG.debug("Starting...");
final String fileName = "FlumeData";
final long rollCount = 5;
final long batchSize = 2;
final int numBatches = 4;
String newPath = testPath + "/singleBucket/%s" ;
String expectedPath = testPath + "/singleBucket/" +
String.valueOf(currentTime / 1000);
int totalEvents = 0;
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(expectedPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.useLocalTimeStamp", String.valueOf(true));
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.setBucketClock(clk);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 1; i < numBatches; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
// loop through all the files generated and check their contains
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
// check that the roll happened correctly for the given data
long expectedFiles = totalEvents / rollCount;
if (totalEvents % rollCount > 0) expectedFiles++;
Assert.assertEquals("num files wrong, found: " +
Lists.newArrayList(fList), expectedFiles, fList.length);
verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
// The clock in bucketpath is static, so restore the real clock
sink.setBucketClock(new SystemClock());
}
@Test
public void testAppend() throws InterruptedException, LifecycleException,
EventDeliveryException, IOException {
LOG.debug("Starting...");
final long rollCount = 3;
final long batchSize = 2;
final String fileName = "FlumeData";
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(testPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", testPath + "/%Y-%m-%d/%H");
context.put("hdfs.timeZone", "UTC");
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (int i = 1; i < 4; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (int j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
// inject fault and make sure that the txn is rolled back and retried
@Test
public void testBadSimpleAppend() throws InterruptedException,
LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
final String fileName = "FlumeData";
final long rollCount = 5;
final long batchSize = 2;
final int numBatches = 4;
String newPath = testPath + "/singleBucket";
int totalEvents = 0;
int i = 1, j = 1;
HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
sink = new HDFSEventSink(badWriterFactory);
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 1; i < numBatches; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
// inject fault
if ((totalEvents % 30) == 1) {
event.getHeaders().put("fault-once", "");
}
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
LOG.info("Process events: " + sink.process());
}
LOG.info("Process events to end of transaction max: " + sink.process());
LOG.info("Process events to injected fault: " + sink.process());
LOG.info("Process events remaining events: " + sink.process());
sink.stop();
verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
SinkCounter sc = (SinkCounter) Whitebox.getInternalState(sink, "sinkCounter");
Assert.assertEquals(1, sc.getEventWriteFail());
}
private List<String> getAllFiles(String input) {
List<String> output = Lists.newArrayList();
File dir = new File(input);
if (dir.isFile()) {
output.add(dir.getAbsolutePath());
} else if (dir.isDirectory()) {
for (String file : dir.list()) {
File subDir = new File(dir, file);
output.addAll(getAllFiles(subDir.getAbsolutePath()));
}
}
return output;
}
private void verifyOutputSequenceFiles(FileSystem fs, Configuration conf, String dir,
String prefix, List<String> bodies) throws IOException {
int found = 0;
int expected = bodies.size();
for (String outputFile : getAllFiles(dir)) {
String name = (new File(outputFile)).getName();
if (name.startsWith(prefix)) {
SequenceFile.Reader reader = new SequenceFile.Reader(fs, new Path(outputFile), conf);
LongWritable key = new LongWritable();
BytesWritable value = new BytesWritable();
while (reader.next(key, value)) {
String body = new String(value.getBytes(), 0, value.getLength());
if (bodies.contains(body)) {
LOG.debug("Found event body: {}", body);
bodies.remove(body);
found++;
}
}
reader.close();
}
}
if (!bodies.isEmpty()) {
for (String body : bodies) {
LOG.error("Never found event body: {}", body);
}
}
Assert.assertTrue("Found = " + found + ", Expected = " +
expected + ", Left = " + bodies.size() + " " + bodies,
bodies.size() == 0);
}
private void verifyOutputTextFiles(FileSystem fs, Configuration conf, String dir, String prefix,
List<String> bodies) throws IOException {
int found = 0;
int expected = bodies.size();
for (String outputFile : getAllFiles(dir)) {
String name = (new File(outputFile)).getName();
if (name.startsWith(prefix)) {
FSDataInputStream input = fs.open(new Path(outputFile));
BufferedReader reader = new BufferedReader(new InputStreamReader(input));
String body = null;
while ((body = reader.readLine()) != null) {
bodies.remove(body);
found++;
}
reader.close();
}
}
Assert.assertTrue("Found = " + found + ", Expected = " +
expected + ", Left = " + bodies.size() + " " + bodies,
bodies.size() == 0);
}
private void verifyOutputAvroFiles(FileSystem fs, Configuration conf, String dir, String prefix,
List<String> bodies) throws IOException {
int found = 0;
int expected = bodies.size();
for (String outputFile : getAllFiles(dir)) {
String name = (new File(outputFile)).getName();
if (name.startsWith(prefix)) {
FSDataInputStream input = fs.open(new Path(outputFile));
DatumReader<GenericRecord> reader = new GenericDatumReader<GenericRecord>();
DataFileStream<GenericRecord> avroStream =
new DataFileStream<GenericRecord>(input, reader);
GenericRecord record = new GenericData.Record(avroStream.getSchema());
while (avroStream.hasNext()) {
avroStream.next(record);
ByteBuffer body = (ByteBuffer) record.get("body");
CharsetDecoder decoder = Charsets.UTF_8.newDecoder();
String bodyStr = decoder.decode(body).toString();
LOG.debug("Removing event: {}", bodyStr);
bodies.remove(bodyStr);
found++;
}
avroStream.close();
input.close();
}
}
Assert.assertTrue("Found = " + found + ", Expected = " +
expected + ", Left = " + bodies.size() + " " + bodies,
bodies.size() == 0);
}
/**
* Ensure that when a write throws an IOException we are
* able to continue to progress in the next process() call.
* This relies on Transactional rollback semantics for durability and
* the behavior of the BucketWriter class of close()ing upon IOException.
*/
@Test
public void testCloseReopen()
throws InterruptedException, LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
final int numBatches = 4;
final String fileName = "FlumeData";
final long rollCount = 5;
final long batchSize = 2;
String newPath = testPath + "/singleBucket";
int i = 1, j = 1;
HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
sink = new HDFSEventSink(badWriterFactory);
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
Configurables.configure(sink, context);
MemoryChannel channel = new MemoryChannel();
Configurables.configure(channel, new Context());
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 1; i < numBatches; i++) {
channel.getTransaction().begin();
try {
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
// inject fault
event.getHeaders().put("fault-until-reopen", "");
channel.put(event);
}
channel.getTransaction().commit();
} finally {
channel.getTransaction().close();
}
LOG.info("execute sink to process the events: " + sink.process());
}
LOG.info("clear any events pending due to errors: " + sink.process());
sink.stop();
verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
SinkCounter sc = (SinkCounter) Whitebox.getInternalState(sink, "sinkCounter");
Assert.assertEquals(1, sc.getEventWriteFail());
}
/**
* Test that the old bucket writer is closed at the end of rollInterval and
* a new one is used for the next set of events.
*/
@Test
public void testCloseReopenOnRollTime()
throws InterruptedException, LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
final int numBatches = 4;
final String fileName = "FlumeData";
final long batchSize = 2;
String newPath = testPath + "/singleBucket";
int i = 1, j = 1;
HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
sink = new HDFSEventSink(badWriterFactory);
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(0));
context.put("hdfs.rollSize", String.valueOf(0));
context.put("hdfs.rollInterval", String.valueOf(2));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
Configurables.configure(sink, context);
MemoryChannel channel = new MemoryChannel();
Configurables.configure(channel, new Context());
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 1; i < numBatches; i++) {
channel.getTransaction().begin();
try {
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
// inject fault
event.getHeaders().put("count-check", "");
channel.put(event);
}
channel.getTransaction().commit();
} finally {
channel.getTransaction().close();
}
LOG.info("execute sink to process the events: " + sink.process());
// Make sure the first file gets rolled due to rollTimeout.
if (i == 1) {
Thread.sleep(2001);
}
}
LOG.info("clear any events pending due to errors: " + sink.process());
sink.stop();
Assert.assertTrue(badWriterFactory.openCount.get() >= 2);
LOG.info("Total number of bucket writers opened: {}",
badWriterFactory.openCount.get());
verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName,
bodies);
}
/**
* Test that a close due to roll interval removes the bucketwriter from
* sfWriters map.
*/
@Test
public void testCloseRemovesFromSFWriters()
throws InterruptedException, LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
final String fileName = "FlumeData";
final long batchSize = 2;
String newPath = testPath + "/singleBucket";
int i = 1, j = 1;
HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
sink = new HDFSEventSink(badWriterFactory);
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(0));
context.put("hdfs.rollSize", String.valueOf(0));
context.put("hdfs.rollInterval", String.valueOf(1));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
String expectedLookupPath = newPath + "/FlumeData";
Configurables.configure(sink, context);
MemoryChannel channel = new MemoryChannel();
Configurables.configure(channel, new Context());
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
channel.getTransaction().begin();
try {
for (j = 1; j <= 2 * batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
// inject fault
event.getHeaders().put("count-check", "");
channel.put(event);
}
channel.getTransaction().commit();
} finally {
channel.getTransaction().close();
}
LOG.info("execute sink to process the events: " + sink.process());
Assert.assertTrue(sink.getSfWriters().containsKey(expectedLookupPath));
// Make sure the first file gets rolled due to rollTimeout.
Thread.sleep(2001);
Assert.assertFalse(sink.getSfWriters().containsKey(expectedLookupPath));
LOG.info("execute sink to process the events: " + sink.process());
// A new bucket writer should have been created for this bucket. So
// sfWriters map should not have the same key again.
Assert.assertTrue(sink.getSfWriters().containsKey(expectedLookupPath));
sink.stop();
LOG.info("Total number of bucket writers opened: {}",
badWriterFactory.openCount.get());
verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName,
bodies);
}
/*
* append using slow sink writer.
* verify that the process returns backoff due to timeout
*/
@Test
public void testSlowAppendFailure() throws InterruptedException,
LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
final String fileName = "FlumeData";
final long rollCount = 5;
final long batchSize = 2;
final int numBatches = 2;
String newPath = testPath + "/singleBucket";
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
// create HDFS sink with slow writer
HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
sink = new HDFSEventSink(badWriterFactory);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
context.put("hdfs.callTimeout", Long.toString(1000));
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
// push the event batches into channel
for (i = 0; i < numBatches; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
event.getHeaders().put("slow", "1500");
event.setBody(("Test." + i + "." + j).getBytes());
channel.put(event);
}
txn.commit();
txn.close();
// execute sink to process the events
Status satus = sink.process();
// verify that the append returned backoff due to timeotu
Assert.assertEquals(satus, Status.BACKOFF);
}
sink.stop();
SinkCounter sc = (SinkCounter) Whitebox.getInternalState(sink, "sinkCounter");
Assert.assertEquals(2, sc.getEventWriteFail());
}
/*
* append using slow sink writer with specified append timeout
* verify that the data is written correctly to files
*/
private void slowAppendTestHelper(long appendTimeout)
throws InterruptedException, IOException, LifecycleException, EventDeliveryException,
IOException {
final String fileName = "FlumeData";
final long rollCount = 5;
final long batchSize = 2;
final int numBatches = 2;
String newPath = testPath + "/singleBucket";
int totalEvents = 0;
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
// create HDFS sink with slow writer
HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
sink = new HDFSEventSink(badWriterFactory);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
context.put("hdfs.appendTimeout", String.valueOf(appendTimeout));
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 0; i < numBatches; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
event.getHeaders().put("slow", "1500");
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
// loop through all the files generated and check their contains
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
// check that the roll happened correctly for the given data
// Note that we'll end up with two files with only a head
long expectedFiles = totalEvents / rollCount;
if (totalEvents % rollCount > 0) expectedFiles++;
Assert.assertEquals("num files wrong, found: " +
Lists.newArrayList(fList), expectedFiles, fList.length);
verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
/*
* append using slow sink writer with long append timeout
* verify that the data is written correctly to files
*/
@Test
public void testSlowAppendWithLongTimeout() throws InterruptedException,
LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
slowAppendTestHelper(3000);
}
/*
* append using slow sink writer with no timeout to make append
* synchronous. Verify that the data is written correctly to files
*/
@Test
public void testSlowAppendWithoutTimeout() throws InterruptedException,
LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
slowAppendTestHelper(0);
}
@Test
public void testCloseOnIdle() throws IOException, EventDeliveryException, InterruptedException {
String hdfsPath = testPath + "/idleClose";
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(hdfsPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", hdfsPath);
/*
* All three rolling methods are disabled so the only
* way a file can roll is through the idle timeout.
*/
context.put("hdfs.rollCount", "0");
context.put("hdfs.rollSize", "0");
context.put("hdfs.rollInterval", "0");
context.put("hdfs.batchSize", "2");
context.put("hdfs.idleTimeout", "1");
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Transaction txn = channel.getTransaction();
txn.begin();
for (int i = 0; i < 10; i++) {
Event event = new SimpleEvent();
event.setBody(("test event " + i).getBytes());
channel.put(event);
}
txn.commit();
txn.close();
sink.process();
sink.process();
Thread.sleep(1101);
// previous file should have timed out now
// this can throw BucketClosedException(from the bucketWriter having
// closed),this is not an issue as the sink will retry and get a fresh
// bucketWriter so long as the onClose handler properly removes
// bucket writers that were closed.
sink.process();
sink.process();
Thread.sleep(500); // shouldn't be enough for a timeout to occur
sink.process();
sink.process();
sink.stop();
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
Assert.assertEquals("Incorrect content of the directory " + StringUtils.join(fList, ","),
2, fList.length);
Assert.assertTrue(!fList[0].getName().endsWith(".tmp") &&
!fList[1].getName().endsWith(".tmp"));
fs.close();
}
/**
* This test simulates what happens when a batch of events is written to a compressed sequence
* file (and thus hsync'd to hdfs) but the file is not yet closed.
*
* When this happens, the data that we wrote should still be readable.
*/
@Test
public void testBlockCompressSequenceFileWriterSync() throws IOException, EventDeliveryException {
String hdfsPath = testPath + "/sequenceFileWriterSync";
FileSystem fs = FileSystem.get(new Configuration());
// Since we are reading a partial file we don't want to use checksums
fs.setVerifyChecksum(false);
fs.setWriteChecksum(false);
// Compression codecs that don't require native hadoop libraries
String [] codecs = {"BZip2Codec", "DeflateCodec"};
for (String codec : codecs) {
sequenceFileWriteAndVerifyEvents(fs, hdfsPath, codec, Collections.singletonList(
"single-event"
));
sequenceFileWriteAndVerifyEvents(fs, hdfsPath, codec, Arrays.asList(
"multiple-events-1",
"multiple-events-2",
"multiple-events-3",
"multiple-events-4",
"multiple-events-5"
));
}
fs.close();
}
private void sequenceFileWriteAndVerifyEvents(FileSystem fs, String hdfsPath, String codec,
Collection<String> eventBodies)
throws IOException, EventDeliveryException {
Path dirPath = new Path(hdfsPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", hdfsPath);
// Ensure the file isn't closed and rolled
context.put("hdfs.rollCount", String.valueOf(eventBodies.size() + 1));
context.put("hdfs.rollSize", "0");
context.put("hdfs.rollInterval", "0");
context.put("hdfs.batchSize", "1");
context.put("hdfs.fileType", "SequenceFile");
context.put("hdfs.codeC", codec);
context.put("hdfs.writeFormat", "Writable");
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
for (String eventBody : eventBodies) {
Transaction txn = channel.getTransaction();
txn.begin();
Event event = new SimpleEvent();
event.setBody(eventBody.getBytes());
channel.put(event);
txn.commit();
txn.close();
sink.process();
}
// Sink is _not_ closed. The file should remain open but
// the data written should be visible to readers via sync + hflush
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] paths = FileUtil.stat2Paths(dirStat);
Assert.assertEquals(1, paths.length);
SequenceFile.Reader reader =
new SequenceFile.Reader(fs.getConf(), SequenceFile.Reader.stream(fs.open(paths[0])));
LongWritable key = new LongWritable();
BytesWritable value = new BytesWritable();
for (String eventBody : eventBodies) {
Assert.assertTrue(reader.next(key, value));
Assert.assertArrayEquals(eventBody.getBytes(), value.copyBytes());
}
Assert.assertFalse(reader.next(key, value));
}
private Context getContextForRetryTests() {
Context context = new Context();
context.put("hdfs.path", testPath + "/%{retryHeader}");
context.put("hdfs.filePrefix", "test");
context.put("hdfs.batchSize", String.valueOf(100));
context.put("hdfs.fileType", "DataStream");
context.put("hdfs.serializer", "text");
context.put("hdfs.closeTries","3");
context.put("hdfs.rollCount", "1");
context.put("hdfs.retryInterval", "1");
return context;
}
@Test
public void testBadConfigurationForRetryIntervalZero() throws Exception {
Context context = getContextForRetryTests();
context.put("hdfs.retryInterval", "0");
Configurables.configure(sink, context);
Assert.assertEquals(1, sink.getTryCount());
}
@Test
public void testBadConfigurationForRetryIntervalNegative() throws Exception {
Context context = getContextForRetryTests();
context.put("hdfs.retryInterval", "-1");
Configurables.configure(sink, context);
Assert.assertEquals(1, sink.getTryCount());
}
@Test
public void testBadConfigurationForRetryCountZero() throws Exception {
Context context = getContextForRetryTests();
context.put("hdfs.closeTries" ,"0");
Configurables.configure(sink, context);
Assert.assertEquals(Integer.MAX_VALUE, sink.getTryCount());
}
@Test
public void testBadConfigurationForRetryCountNegative() throws Exception {
Context context = getContextForRetryTests();
context.put("hdfs.closeTries" ,"-4");
Configurables.configure(sink, context);
Assert.assertEquals(Integer.MAX_VALUE, sink.getTryCount());
}
@Test
public void testRetryRename()
throws InterruptedException, LifecycleException, EventDeliveryException, IOException {
testRetryRename(true);
testRetryRename(false);
}
private void testRetryRename(boolean closeSucceed)
throws InterruptedException, LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
String newPath = testPath + "/retryBucket";
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
MockFileSystem mockFs = new MockFileSystem(fs, 6, closeSucceed);
Context context = getContextForRetryTests();
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.setMockFs(mockFs);
HDFSWriter hdfsWriter = new MockDataStream(mockFs);
hdfsWriter.configure(context);
sink.setMockWriter(hdfsWriter);
sink.start();
// push the event batches into channel
for (int i = 0; i < 2; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
Map<String, String> hdr = Maps.newHashMap();
hdr.put("retryHeader", "v1");
channel.put(EventBuilder.withBody("random".getBytes(), hdr));
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
// push the event batches into channel
for (int i = 0; i < 2; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
Map<String, String> hdr = Maps.newHashMap();
hdr.put("retryHeader", "v2");
channel.put(EventBuilder.withBody("random".getBytes(), hdr));
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
TimeUnit.SECONDS.sleep(5); //Sleep till all retries are done.
Collection<BucketWriter> writers = sink.getSfWriters().values();
int totalRenameAttempts = 0;
for (BucketWriter writer : writers) {
LOG.info("Rename tries = " + writer.renameTries.get());
totalRenameAttempts += writer.renameTries.get();
}
// stop clears the sfWriters map, so we need to compute the
// close tries count before stopping the sink.
sink.stop();
Assert.assertEquals(6, totalRenameAttempts);
}
/**
* BucketWriter.append() can throw a BucketClosedException when called from
* HDFSEventSink.process() due to a race condition between HDFSEventSink.process() and the
* BucketWriter's close threads.
* This test case tests whether if this happens the newly created BucketWriter will be flushed.
* For more details see FLUME-3085
*/
@Test
public void testFlushedIfAppendFailedWithBucketClosedException() throws Exception {
final Set<BucketWriter> bucketWriters = new HashSet<>();
sink = new HDFSEventSink() {
@Override
BucketWriter initializeBucketWriter(String realPath, String realName, String lookupPath,
HDFSWriter hdfsWriter, WriterCallback closeCallback) {
BucketWriter bw = Mockito.spy(super.initializeBucketWriter(realPath, realName, lookupPath,
hdfsWriter, closeCallback));
try {
// create mock BucketWriters where the first append() succeeds but the
// the second call throws a BucketClosedException
Mockito.doCallRealMethod()
.doThrow(BucketClosedException.class)
.when(bw).append(Mockito.any(Event.class));
} catch (IOException | InterruptedException e) {
Assert.fail("This shouldn't happen, as append() is called during mocking.");
}
bucketWriters.add(bw);
return bw;
}
};
Context context = new Context(ImmutableMap.of("hdfs.path", testPath));
Configurables.configure(sink, context);
Channel channel = Mockito.spy(new MemoryChannel());
Configurables.configure(channel, new Context());
final Iterator<Event> events = Iterators.forArray(
EventBuilder.withBody("test1".getBytes()), EventBuilder.withBody("test2".getBytes()));
Mockito.doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
return events.hasNext() ? events.next() : null;
}
}).when(channel).take();
sink.setChannel(channel);
sink.start();
sink.process();
// channel.take() should have called 3 times (2 events + 1 null)
Mockito.verify(channel, Mockito.times(3)).take();
FileSystem fs = FileSystem.get(new Configuration());
int fileCount = 0;
for (RemoteIterator<LocatedFileStatus> i = fs.listFiles(new Path(testPath), false);
i.hasNext(); i.next()) {
fileCount++;
}
Assert.assertEquals(2, fileCount);
Assert.assertEquals(2, bucketWriters.size());
// It is expected that flush() method was called exactly once for every BucketWriter
for (BucketWriter bw : bucketWriters) {
Mockito.verify(bw, Mockito.times(1)).flush();
}
sink.stop();
}
@Test
public void testChannelException() {
LOG.debug("Starting...");
Context context = new Context();
context.put("hdfs.path", testPath);
context.put("keep-alive", "0");
Configurables.configure(sink, context);
Channel channel = Mockito.mock(Channel.class);
Mockito.when(channel.take()).thenThrow(new ChannelException("dummy"));
Mockito.when(channel.getTransaction())
.thenReturn(Mockito.mock(BasicTransactionSemantics.class));
sink.setChannel(channel);
sink.start();
try {
sink.process();
} catch (EventDeliveryException e) {
//
}
sink.stop();
SinkCounter sc = (SinkCounter) Whitebox.getInternalState(sink, "sinkCounter");
Assert.assertEquals(1, sc.getChannelReadFail());
}
@Test
public void testEmptyInUseSuffix() {
String inUseSuffixConf = "aaaa";
Context context = new Context();
context.put("hdfs.path", testPath);
context.put("hdfs.inUseSuffix", inUseSuffixConf);
//hdfs.emptyInUseSuffix not defined
Configurables.configure(sink, context);
String inUseSuffix = (String) Whitebox.getInternalState(sink, "inUseSuffix");
Assert.assertEquals(inUseSuffixConf, inUseSuffix);
context.put("hdfs.emptyInUseSuffix", "true");
Configurables.configure(sink, context);
inUseSuffix = (String) Whitebox.getInternalState(sink, "inUseSuffix");
Assert.assertEquals("", inUseSuffix);
context.put("hdfs.emptyInUseSuffix", "false");
Configurables.configure(sink, context);
inUseSuffix = (String) Whitebox.getInternalState(sink, "inUseSuffix");
Assert.assertEquals(inUseSuffixConf, inUseSuffix);
}
}
| 9,803 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink/hdfs/TestSequenceFileSerializerFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hdfs;
import org.apache.flume.Context;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class TestSequenceFileSerializerFactory {
@Test
public void getTextFormatter() {
SequenceFileSerializer formatter =
SequenceFileSerializerFactory.getSerializer("Text", new Context());
assertTrue(formatter != null);
assertTrue(formatter.getClass().getName(),
formatter instanceof HDFSTextSerializer);
}
@Test
public void getWritableFormatter() {
SequenceFileSerializer formatter =
SequenceFileSerializerFactory.getSerializer("Writable", new Context());
assertTrue(formatter != null);
assertTrue(formatter.getClass().getName(),
formatter instanceof HDFSWritableSerializer);
}
@Test
public void getCustomFormatter() {
SequenceFileSerializer formatter = SequenceFileSerializerFactory.getSerializer(
"org.apache.flume.sink.hdfs.MyCustomSerializer$Builder", new Context());
assertTrue(formatter != null);
assertTrue(formatter.getClass().getName(),
formatter instanceof MyCustomSerializer);
}
}
| 9,804 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink/hdfs/TestAvroEventSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hdfs;
import com.google.common.base.Charsets;
import com.google.common.io.Files;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.GenericRecordBuilder;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.reflect.ReflectDatumWriter;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.serialization.AvroEventSerializerConfigurationConstants;
import org.apache.flume.serialization.EventSerializer;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.After;
public class TestAvroEventSerializer {
private File file;
@Before
public void setUp() throws Exception {
file = File.createTempFile(getClass().getSimpleName(), "");
}
@After
public void tearDown() throws Exception {
file.delete();
}
@Test
public void testNoCompression() throws IOException {
createAvroFile(file, null, false, false);
validateAvroFile(file);
}
@Test
public void testNullCompression() throws IOException {
createAvroFile(file, "null", false, false);
validateAvroFile(file);
}
@Test
public void testDeflateCompression() throws IOException {
createAvroFile(file, "deflate", false, false);
validateAvroFile(file);
}
@Test
public void testSnappyCompression() throws IOException {
createAvroFile(file, "snappy", false, false);
validateAvroFile(file);
}
@Test
public void testSchemaUrl() throws IOException {
createAvroFile(file, null, true, false);
validateAvroFile(file);
}
@Test
public void testStaticSchemaUrl() throws IOException {
createAvroFile(file,null,false, true);
validateAvroFile(file);
}
@Test
public void testBothUrls() throws IOException {
createAvroFile(file,null,true,true);
validateAvroFile(file);
}
public void createAvroFile(File file, String codec, boolean useSchemaUrl,
boolean useStaticSchemaUrl) throws IOException {
// serialize a few events using the reflection-based avro serializer
OutputStream out = new FileOutputStream(file);
Context ctx = new Context();
if (codec != null) {
ctx.put("compressionCodec", codec);
}
Schema schema = Schema.createRecord("myrecord", null, null, false);
schema.setFields(Arrays.asList(new Schema.Field[]{
new Schema.Field("message", Schema.create(Schema.Type.STRING), null, null)
}));
GenericRecordBuilder recordBuilder = new GenericRecordBuilder(schema);
File schemaFile = null;
if (useSchemaUrl || useStaticSchemaUrl) {
schemaFile = File.createTempFile(getClass().getSimpleName(), ".avsc");
Files.write(schema.toString(), schemaFile, Charsets.UTF_8);
}
if (useStaticSchemaUrl) {
ctx.put(AvroEventSerializerConfigurationConstants.STATIC_SCHEMA_URL,
schemaFile.toURI().toURL().toExternalForm());
}
EventSerializer.Builder builder = new AvroEventSerializer.Builder();
EventSerializer serializer = builder.build(ctx, out);
serializer.afterCreate();
for (int i = 0; i < 3; i++) {
GenericRecord record = recordBuilder.set("message", "Hello " + i).build();
Event event = EventBuilder.withBody(serializeAvro(record, schema));
if (schemaFile == null && !useSchemaUrl) {
event.getHeaders().put(AvroEventSerializer.AVRO_SCHEMA_LITERAL_HEADER,
schema.toString());
} else if (useSchemaUrl) {
event.getHeaders().put(AvroEventSerializer.AVRO_SCHEMA_URL_HEADER,
schemaFile.toURI().toURL().toExternalForm());
}
serializer.write(event);
}
serializer.flush();
serializer.beforeClose();
out.flush();
out.close();
if (schemaFile != null ) {
schemaFile.delete();
}
}
private byte[] serializeAvro(Object datum, Schema schema) throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
ReflectDatumWriter<Object> writer = new ReflectDatumWriter<Object>(schema);
BinaryEncoder encoder = EncoderFactory.get().binaryEncoder(out, null);
out.reset();
writer.write(datum, encoder);
encoder.flush();
return out.toByteArray();
}
public void validateAvroFile(File file) throws IOException {
// read the events back using GenericRecord
DatumReader<GenericRecord> reader = new GenericDatumReader<GenericRecord>();
DataFileReader<GenericRecord> fileReader =
new DataFileReader<GenericRecord>(file, reader);
GenericRecord record = new GenericData.Record(fileReader.getSchema());
int numEvents = 0;
while (fileReader.hasNext()) {
fileReader.next(record);
String bodyStr = record.get("message").toString();
System.out.println(bodyStr);
numEvents++;
}
fileReader.close();
Assert.assertEquals("Should have found a total of 3 events", 3, numEvents);
}
}
| 9,805 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink/hdfs/MockDataStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
class MockDataStream extends HDFSDataStream {
private final FileSystem fs;
MockDataStream(FileSystem fs) {
this.fs = fs;
}
@Override
protected FileSystem getDfs(Configuration conf, Path dstPath) throws IOException {
return fs;
}
}
| 9,806 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink/hdfs/MyCustomSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hdfs;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import java.util.Arrays;
public class MyCustomSerializer implements SequenceFileSerializer {
@Override
public Class<LongWritable> getKeyClass() {
return LongWritable.class;
}
@Override
public Class<BytesWritable> getValueClass() {
return BytesWritable.class;
}
@Override
public Iterable<Record> serialize(Event e) {
return Arrays.asList(
new Record(new LongWritable(1234L), new BytesWritable(new byte[10])),
new Record(new LongWritable(4567L), new BytesWritable(new byte[20]))
);
}
public static class Builder implements SequenceFileSerializer.Builder {
@Override
public SequenceFileSerializer build(Context context) {
return new MyCustomSerializer();
}
}
}
| 9,807 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink/hdfs/TestHDFSEventSinkOnMiniCluster.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hdfs;
import com.google.common.base.Charsets;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import java.util.zip.GZIPInputStream;
import com.google.common.base.Throwables;
import org.apache.commons.io.FileUtils;
import org.apache.flume.Context;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.Transaction;
import org.apache.flume.channel.MemoryChannel;
import org.apache.flume.event.EventBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Unit tests that exercise HDFSEventSink on an actual instance of HDFS.
* TODO: figure out how to unit-test Kerberos-secured HDFS.
*/
public class TestHDFSEventSinkOnMiniCluster {
private static final Logger logger =
LoggerFactory.getLogger(TestHDFSEventSinkOnMiniCluster.class);
private static final boolean KEEP_DATA = false;
private static final String DFS_DIR = "target/test/dfs";
private static final String TEST_BUILD_DATA_KEY = "test.build.data";
private static MiniDFSCluster cluster = null;
private static String oldTestBuildDataProp = null;
@BeforeClass
public static void setupClass() throws IOException {
// set up data dir for HDFS
File dfsDir = new File(DFS_DIR);
if (!dfsDir.isDirectory()) {
dfsDir.mkdirs();
}
// save off system prop to restore later
oldTestBuildDataProp = System.getProperty(TEST_BUILD_DATA_KEY);
System.setProperty(TEST_BUILD_DATA_KEY, DFS_DIR);
}
private static String getNameNodeURL(MiniDFSCluster cluster) {
int nnPort = cluster.getNameNode().getNameNodeAddress().getPort();
return "hdfs://localhost:" + nnPort;
}
/**
* This is a very basic test that writes one event to HDFS and reads it back.
*/
@Test
public void simpleHDFSTest() throws EventDeliveryException, IOException {
cluster = new MiniDFSCluster(new Configuration(), 1, true, null);
cluster.waitActive();
String outputDir = "/flume/simpleHDFSTest";
Path outputDirPath = new Path(outputDir);
logger.info("Running test with output dir: {}", outputDir);
FileSystem fs = cluster.getFileSystem();
// ensure output directory is empty
if (fs.exists(outputDirPath)) {
fs.delete(outputDirPath, true);
}
String nnURL = getNameNodeURL(cluster);
logger.info("Namenode address: {}", nnURL);
Context chanCtx = new Context();
MemoryChannel channel = new MemoryChannel();
channel.setName("simpleHDFSTest-mem-chan");
channel.configure(chanCtx);
channel.start();
Context sinkCtx = new Context();
sinkCtx.put("hdfs.path", nnURL + outputDir);
sinkCtx.put("hdfs.fileType", HDFSWriterFactory.DataStreamType);
sinkCtx.put("hdfs.batchSize", Integer.toString(1));
HDFSEventSink sink = new HDFSEventSink();
sink.setName("simpleHDFSTest-hdfs-sink");
sink.configure(sinkCtx);
sink.setChannel(channel);
sink.start();
// create an event
String EVENT_BODY = "yarg!";
channel.getTransaction().begin();
try {
channel.put(EventBuilder.withBody(EVENT_BODY, Charsets.UTF_8));
channel.getTransaction().commit();
} finally {
channel.getTransaction().close();
}
// store event to HDFS
sink.process();
// shut down flume
sink.stop();
channel.stop();
// verify that it's in HDFS and that its content is what we say it should be
FileStatus[] statuses = fs.listStatus(outputDirPath);
Assert.assertNotNull("No files found written to HDFS", statuses);
Assert.assertEquals("Only one file expected", 1, statuses.length);
for (FileStatus status : statuses) {
Path filePath = status.getPath();
logger.info("Found file on DFS: {}", filePath);
FSDataInputStream stream = fs.open(filePath);
BufferedReader reader = new BufferedReader(new InputStreamReader(stream));
String line = reader.readLine();
logger.info("First line in file {}: {}", filePath, line);
Assert.assertEquals(EVENT_BODY, line);
}
if (!KEEP_DATA) {
fs.delete(outputDirPath, true);
}
cluster.shutdown();
cluster = null;
}
/**
* Writes two events in GZIP-compressed serialize.
*/
@Test
public void simpleHDFSGZipCompressedTest() throws EventDeliveryException, IOException {
cluster = new MiniDFSCluster(new Configuration(), 1, true, null);
cluster.waitActive();
String outputDir = "/flume/simpleHDFSGZipCompressedTest";
Path outputDirPath = new Path(outputDir);
logger.info("Running test with output dir: {}", outputDir);
FileSystem fs = cluster.getFileSystem();
// ensure output directory is empty
if (fs.exists(outputDirPath)) {
fs.delete(outputDirPath, true);
}
String nnURL = getNameNodeURL(cluster);
logger.info("Namenode address: {}", nnURL);
Context chanCtx = new Context();
MemoryChannel channel = new MemoryChannel();
channel.setName("simpleHDFSTest-mem-chan");
channel.configure(chanCtx);
channel.start();
Context sinkCtx = new Context();
sinkCtx.put("hdfs.path", nnURL + outputDir);
sinkCtx.put("hdfs.fileType", HDFSWriterFactory.CompStreamType);
sinkCtx.put("hdfs.batchSize", Integer.toString(1));
sinkCtx.put("hdfs.codeC", "gzip");
HDFSEventSink sink = new HDFSEventSink();
sink.setName("simpleHDFSTest-hdfs-sink");
sink.configure(sinkCtx);
sink.setChannel(channel);
sink.start();
// create an event
String EVENT_BODY_1 = "yarg1";
String EVENT_BODY_2 = "yarg2";
channel.getTransaction().begin();
try {
channel.put(EventBuilder.withBody(EVENT_BODY_1, Charsets.UTF_8));
channel.put(EventBuilder.withBody(EVENT_BODY_2, Charsets.UTF_8));
channel.getTransaction().commit();
} finally {
channel.getTransaction().close();
}
// store event to HDFS
sink.process();
// shut down flume
sink.stop();
channel.stop();
// verify that it's in HDFS and that its content is what we say it should be
FileStatus[] statuses = fs.listStatus(outputDirPath);
Assert.assertNotNull("No files found written to HDFS", statuses);
Assert.assertEquals("Only one file expected", 1, statuses.length);
for (FileStatus status : statuses) {
Path filePath = status.getPath();
logger.info("Found file on DFS: {}", filePath);
FSDataInputStream stream = fs.open(filePath);
BufferedReader reader = new BufferedReader(new InputStreamReader(
new GZIPInputStream(stream)));
String line = reader.readLine();
logger.info("First line in file {}: {}", filePath, line);
Assert.assertEquals(EVENT_BODY_1, line);
// The rest of this test is commented-out (will fail) for 2 reasons:
//
// (1) At the time of this writing, Hadoop has a bug which causes the
// non-native gzip implementation to create invalid gzip files when
// finish() and resetState() are called. See HADOOP-8522.
//
// (2) Even if HADOOP-8522 is fixed, the JDK GZipInputStream is unable
// to read multi-member (concatenated) gzip files. See this Sun bug:
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4691425
//
//line = reader.readLine();
//logger.info("Second line in file {}: {}", filePath, line);
//Assert.assertEquals(EVENT_BODY_2, line);
}
if (!KEEP_DATA) {
fs.delete(outputDirPath, true);
}
cluster.shutdown();
cluster = null;
}
/**
* This is a very basic test that writes one event to HDFS and reads it back.
*/
@Test
public void underReplicationTest() throws EventDeliveryException,
IOException {
Configuration conf = new Configuration();
conf.set("dfs.replication", String.valueOf(3));
cluster = new MiniDFSCluster(conf, 3, true, null);
cluster.waitActive();
String outputDir = "/flume/underReplicationTest";
Path outputDirPath = new Path(outputDir);
logger.info("Running test with output dir: {}", outputDir);
FileSystem fs = cluster.getFileSystem();
// ensure output directory is empty
if (fs.exists(outputDirPath)) {
fs.delete(outputDirPath, true);
}
String nnURL = getNameNodeURL(cluster);
logger.info("Namenode address: {}", nnURL);
Context chanCtx = new Context();
MemoryChannel channel = new MemoryChannel();
channel.setName("simpleHDFSTest-mem-chan");
channel.configure(chanCtx);
channel.start();
Context sinkCtx = new Context();
sinkCtx.put("hdfs.path", nnURL + outputDir);
sinkCtx.put("hdfs.fileType", HDFSWriterFactory.DataStreamType);
sinkCtx.put("hdfs.batchSize", Integer.toString(1));
sinkCtx.put("hdfs.retryInterval", "10"); //to speed up test
HDFSEventSink sink = new HDFSEventSink();
sink.setName("simpleHDFSTest-hdfs-sink");
sink.configure(sinkCtx);
sink.setChannel(channel);
sink.start();
// create an event
channel.getTransaction().begin();
try {
channel.put(EventBuilder.withBody("yarg 1", Charsets.UTF_8));
channel.put(EventBuilder.withBody("yarg 2", Charsets.UTF_8));
channel.put(EventBuilder.withBody("yarg 3", Charsets.UTF_8));
channel.put(EventBuilder.withBody("yarg 4", Charsets.UTF_8));
channel.put(EventBuilder.withBody("yarg 5", Charsets.UTF_8));
channel.put(EventBuilder.withBody("yarg 5", Charsets.UTF_8));
channel.getTransaction().commit();
} finally {
channel.getTransaction().close();
}
// store events to HDFS
logger.info("Running process(). Create new file.");
sink.process(); // create new file;
logger.info("Running process(). Same file.");
sink.process();
// kill a datanode
logger.info("Killing datanode #1...");
cluster.stopDataNode(0);
// there is a race here.. the client may or may not notice that the
// datanode is dead before it next sync()s.
// so, this next call may or may not roll a new file.
logger.info("Running process(). Create new file? (racy)");
sink.process();
logger.info("Running process(). Create new file.");
sink.process();
logger.info("Running process(). Create new file.");
sink.process();
logger.info("Running process(). Create new file.");
sink.process();
// shut down flume
sink.stop();
channel.stop();
// verify that it's in HDFS and that its content is what we say it should be
FileStatus[] statuses = fs.listStatus(outputDirPath);
Assert.assertNotNull("No files found written to HDFS", statuses);
for (FileStatus status : statuses) {
Path filePath = status.getPath();
logger.info("Found file on DFS: {}", filePath);
FSDataInputStream stream = fs.open(filePath);
BufferedReader reader = new BufferedReader(new InputStreamReader(stream));
String line = reader.readLine();
logger.info("First line in file {}: {}", filePath, line);
Assert.assertTrue(line.startsWith("yarg"));
}
Assert.assertTrue("4 or 5 files expected, found " + statuses.length,
statuses.length == 4 || statuses.length == 5);
System.out.println("There are " + statuses.length + " files.");
if (!KEEP_DATA) {
fs.delete(outputDirPath, true);
}
cluster.shutdown();
cluster = null;
}
/**
* This is a very basic test that writes one event to HDFS and reads it back.
*/
@Ignore("This test is flakey and causes tests to fail pretty often.")
@Test
public void maxUnderReplicationTest() throws EventDeliveryException,
IOException {
Configuration conf = new Configuration();
conf.set("dfs.replication", String.valueOf(3));
cluster = new MiniDFSCluster(conf, 3, true, null);
cluster.waitActive();
String outputDir = "/flume/underReplicationTest";
Path outputDirPath = new Path(outputDir);
logger.info("Running test with output dir: {}", outputDir);
FileSystem fs = cluster.getFileSystem();
// ensure output directory is empty
if (fs.exists(outputDirPath)) {
fs.delete(outputDirPath, true);
}
String nnURL = getNameNodeURL(cluster);
logger.info("Namenode address: {}", nnURL);
Context chanCtx = new Context();
MemoryChannel channel = new MemoryChannel();
channel.setName("simpleHDFSTest-mem-chan");
channel.configure(chanCtx);
channel.start();
Context sinkCtx = new Context();
sinkCtx.put("hdfs.path", nnURL + outputDir);
sinkCtx.put("hdfs.fileType", HDFSWriterFactory.DataStreamType);
sinkCtx.put("hdfs.batchSize", Integer.toString(1));
HDFSEventSink sink = new HDFSEventSink();
sink.setName("simpleHDFSTest-hdfs-sink");
sink.configure(sinkCtx);
sink.setChannel(channel);
sink.start();
// create an event
channel.getTransaction().begin();
try {
for (int i = 0; i < 50; i++) {
channel.put(EventBuilder.withBody("yarg " + i, Charsets.UTF_8));
}
channel.getTransaction().commit();
} finally {
channel.getTransaction().close();
}
// store events to HDFS
logger.info("Running process(). Create new file.");
sink.process(); // create new file;
logger.info("Running process(). Same file.");
sink.process();
// kill a datanode
logger.info("Killing datanode #1...");
cluster.stopDataNode(0);
// there is a race here.. the client may or may not notice that the
// datanode is dead before it next sync()s.
// so, this next call may or may not roll a new file.
logger.info("Running process(). Create new file? (racy)");
sink.process();
for (int i = 3; i < 50; i++) {
logger.info("Running process().");
sink.process();
}
// shut down flume
sink.stop();
channel.stop();
// verify that it's in HDFS and that its content is what we say it should be
FileStatus[] statuses = fs.listStatus(outputDirPath);
Assert.assertNotNull("No files found written to HDFS", statuses);
for (FileStatus status : statuses) {
Path filePath = status.getPath();
logger.info("Found file on DFS: {}", filePath);
FSDataInputStream stream = fs.open(filePath);
BufferedReader reader = new BufferedReader(new InputStreamReader(stream));
String line = reader.readLine();
logger.info("First line in file {}: {}", filePath, line);
Assert.assertTrue(line.startsWith("yarg"));
}
System.out.println("There are " + statuses.length + " files.");
Assert.assertEquals("31 files expected, found " + statuses.length,
31, statuses.length);
if (!KEEP_DATA) {
fs.delete(outputDirPath, true);
}
cluster.shutdown();
cluster = null;
}
/**
* Tests if the lease gets released if the close() call throws IOException.
* For more details see https://issues.apache.org/jira/browse/FLUME-3080
*/
@Test
public void testLeaseRecoveredIfCloseThrowsIOException() throws Exception {
testLeaseRecoveredIfCloseFails(new Callable<Void>() {
@Override
public Void call() throws Exception {
throw new IOException();
}
});
}
/**
* Tests if the lease gets released if the close() call times out.
* For more details see https://issues.apache.org/jira/browse/FLUME-3080
*/
@Test
public void testLeaseRecoveredIfCloseTimesOut() throws Exception {
testLeaseRecoveredIfCloseFails(new Callable<Void>() {
@Override
public Void call() throws Exception {
TimeUnit.SECONDS.sleep(30);
return null;
}
});
}
private void testLeaseRecoveredIfCloseFails(final Callable<?> doThisInClose)
throws Exception {
cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1).format(true).build();
cluster.waitActive();
String outputDir = "/flume/leaseRecovery";
Path outputDirPath = new Path(outputDir);
logger.info("Running test with output dir: {}", outputDir);
FileSystem fs = cluster.getFileSystem();
// ensure output directory is empty
if (fs.exists(outputDirPath)) {
fs.delete(outputDirPath, true);
}
String nnURL = getNameNodeURL(cluster);
Context ctx = new Context();
MemoryChannel channel = new MemoryChannel();
channel.configure(ctx);
channel.start();
ctx.put("hdfs.path", nnURL + outputDir);
ctx.put("hdfs.fileType", HDFSWriterFactory.DataStreamType);
ctx.put("hdfs.batchSize", Integer.toString(1));
ctx.put("hdfs.callTimeout", Integer.toString(1000));
ctx.put("hdfs.retryInterval", "10"); //to speed up test
HDFSWriter hdfsWriter = new HDFSDataStream() {
@Override
public void close() throws IOException {
try {
doThisInClose.call();
} catch (Throwable e) {
Throwables.propagateIfPossible(e, IOException.class);
throw new RuntimeException(e);
}
}
};
hdfsWriter.configure(ctx);
HDFSEventSink sink = new HDFSEventSink();
sink.configure(ctx);
sink.setMockFs(fs);
sink.setMockWriter(hdfsWriter);
sink.setChannel(channel);
sink.start();
Transaction txn = channel.getTransaction();
txn.begin();
try {
channel.put(EventBuilder.withBody("test", Charsets.UTF_8));
txn.commit();
} finally {
txn.close();
}
sink.process();
sink.stop();
channel.stop();
FileStatus[] statuses = fs.listStatus(outputDirPath);
Assert.assertEquals(1, statuses.length);
String filePath = statuses[0].getPath().toUri().getPath();
// -1 in case that the lease doesn't exist.
long leaseRenewalTime = NameNodeAdapter.getLeaseRenewalTime(cluster.getNameNode(), filePath);
// wait until the NameNode recovers the lease
for (int i = 0; (i < 10) && (leaseRenewalTime != -1L); i++) {
TimeUnit.SECONDS.sleep(1);
leaseRenewalTime = NameNodeAdapter.getLeaseRenewalTime(cluster.getNameNode(), filePath);
}
// There should be no lease for the given path even if close failed as the BucketWriter
// explicitly calls the recoverLease()
Assert.assertEquals(-1L, leaseRenewalTime);
if (!KEEP_DATA) {
fs.delete(outputDirPath, true);
}
cluster.shutdown();
cluster = null;
}
@AfterClass
public static void teardownClass() {
// restore system state, if needed
if (oldTestBuildDataProp != null) {
System.setProperty(TEST_BUILD_DATA_KEY, oldTestBuildDataProp);
}
if (!KEEP_DATA) {
FileUtils.deleteQuietly(new File(DFS_DIR));
}
}
}
| 9,808 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink/hdfs/HDFSTestSeqWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hdfs;
import org.apache.flume.Event;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import java.io.IOException;
public class HDFSTestSeqWriter extends HDFSSequenceFile {
protected volatile boolean closed;
protected volatile boolean opened;
private int openCount = 0;
HDFSTestSeqWriter(int openCount) {
this.openCount = openCount;
}
@Override
public void open(String filePath, CompressionCodec codeC, CompressionType compType)
throws IOException {
super.open(filePath, codeC, compType);
if (closed) {
opened = true;
}
}
@Override
public void append(Event e) throws IOException {
if (e.getHeaders().containsKey("fault")) {
throw new IOException("Injected fault");
} else if (e.getHeaders().containsKey("fault-once")) {
e.getHeaders().remove("fault-once");
throw new IOException("Injected fault");
} else if (e.getHeaders().containsKey("fault-until-reopen")) {
// opening first time.
if (openCount == 1) {
throw new IOException("Injected fault-until-reopen");
}
} else if (e.getHeaders().containsKey("slow")) {
long waitTime = Long.parseLong(e.getHeaders().get("slow"));
try {
Thread.sleep(waitTime);
} catch (InterruptedException eT) {
throw new IOException("append interrupted", eT);
}
}
super.append(e);
}
@Override
public void close() throws IOException {
closed = true;
super.close();
}
}
| 9,809 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink/hdfs/TestBucketWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hdfs;
import com.google.common.base.Charsets;
import org.apache.flume.Clock;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.SystemClock;
import org.apache.flume.auth.FlumeAuthenticationUtil;
import org.apache.flume.auth.PrivilegedExecutor;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.flume.sink.hdfs.HDFSEventSink.WriterCallback;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.Calendar;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
public class TestBucketWriter {
private static Logger logger = LoggerFactory.getLogger(TestBucketWriter.class);
private Context ctx = new Context();
private static ScheduledExecutorService timedRollerPool;
private static PrivilegedExecutor proxy;
@BeforeClass
public static void setup() {
timedRollerPool = Executors.newSingleThreadScheduledExecutor();
proxy = FlumeAuthenticationUtil.getAuthenticator(null, null).proxyAs(null);
}
@AfterClass
public static void teardown() throws InterruptedException {
timedRollerPool.shutdown();
timedRollerPool.awaitTermination(2, TimeUnit.SECONDS);
timedRollerPool.shutdownNow();
}
@Test
public void testEventCountingRoller() throws IOException, InterruptedException {
int maxEvents = 100;
MockHDFSWriter hdfsWriter = new MockHDFSWriter();
BucketWriter bucketWriter = new BucketWriterBuilder(hdfsWriter)
.setRollCount(maxEvents).build();
Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
for (int i = 0; i < 1000; i++) {
bucketWriter.append(e);
}
logger.info("Number of events written: {}", hdfsWriter.getEventsWritten());
logger.info("Number of bytes written: {}", hdfsWriter.getBytesWritten());
logger.info("Number of files opened: {}", hdfsWriter.getFilesOpened());
Assert.assertEquals("events written", 1000, hdfsWriter.getEventsWritten());
Assert.assertEquals("bytes written", 3000, hdfsWriter.getBytesWritten());
Assert.assertEquals("files opened", 10, hdfsWriter.getFilesOpened());
}
@Test
public void testSizeRoller() throws IOException, InterruptedException {
int maxBytes = 300;
MockHDFSWriter hdfsWriter = new MockHDFSWriter();
BucketWriter bucketWriter = new BucketWriterBuilder(hdfsWriter)
.setRollSize(maxBytes).build();
Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
for (int i = 0; i < 1000; i++) {
bucketWriter.append(e);
}
logger.info("Number of events written: {}", hdfsWriter.getEventsWritten());
logger.info("Number of bytes written: {}", hdfsWriter.getBytesWritten());
logger.info("Number of files opened: {}", hdfsWriter.getFilesOpened());
Assert.assertEquals("events written", 1000, hdfsWriter.getEventsWritten());
Assert.assertEquals("bytes written", 3000, hdfsWriter.getBytesWritten());
Assert.assertEquals("files opened", 10, hdfsWriter.getFilesOpened());
}
@Test
public void testIntervalRoller() throws IOException, InterruptedException {
final int ROLL_INTERVAL = 1; // seconds
final int NUM_EVENTS = 10;
final AtomicBoolean calledBack = new AtomicBoolean(false);
MockHDFSWriter hdfsWriter = new MockHDFSWriter();
BucketWriter bucketWriter = new BucketWriterBuilder(hdfsWriter)
.setRollInterval(ROLL_INTERVAL)
.setOnCloseCallback(new HDFSEventSink.WriterCallback() {
@Override
public void run(String filePath) {
calledBack.set(true);
}
}).build();
Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
long startNanos = System.nanoTime();
for (int i = 0; i < NUM_EVENTS - 1; i++) {
bucketWriter.append(e);
}
// sleep to force a roll... wait 2x interval just to be sure
Thread.sleep(2 * ROLL_INTERVAL * 1000L);
Assert.assertTrue(bucketWriter.closed.get());
Assert.assertTrue(calledBack.get());
bucketWriter = new BucketWriterBuilder(hdfsWriter)
.setRollInterval(ROLL_INTERVAL).build();
// write one more event (to reopen a new file so we will roll again later)
bucketWriter.append(e);
long elapsedMillis = TimeUnit.MILLISECONDS.convert(
System.nanoTime() - startNanos, TimeUnit.NANOSECONDS);
long elapsedSeconds = elapsedMillis / 1000L;
logger.info("Time elapsed: {} milliseconds", elapsedMillis);
logger.info("Number of events written: {}", hdfsWriter.getEventsWritten());
logger.info("Number of bytes written: {}", hdfsWriter.getBytesWritten());
logger.info("Number of files opened: {}", hdfsWriter.getFilesOpened());
logger.info("Number of files closed: {}", hdfsWriter.getFilesClosed());
Assert.assertEquals("events written", NUM_EVENTS,
hdfsWriter.getEventsWritten());
Assert.assertEquals("bytes written", e.getBody().length * NUM_EVENTS,
hdfsWriter.getBytesWritten());
Assert.assertEquals("files opened", 2, hdfsWriter.getFilesOpened());
// before auto-roll
Assert.assertEquals("files closed", 1, hdfsWriter.getFilesClosed());
logger.info("Waiting for roll...");
Thread.sleep(2 * ROLL_INTERVAL * 1000L);
logger.info("Number of files closed: {}", hdfsWriter.getFilesClosed());
Assert.assertEquals("files closed", 2, hdfsWriter.getFilesClosed());
}
@Test
public void testIntervalRollerBug() throws IOException, InterruptedException {
final int ROLL_INTERVAL = 1; // seconds
final int NUM_EVENTS = 10;
HDFSWriter hdfsWriter = new HDFSWriter() {
private volatile boolean open = false;
public void configure(Context context) {
}
public void sync() throws IOException {
if (!open) {
throw new IOException("closed");
}
}
public void open(String filePath, CompressionCodec codec, CompressionType cType)
throws IOException {
open = true;
}
public void open(String filePath) throws IOException {
open = true;
}
public void close() throws IOException {
open = false;
}
@Override
public boolean isUnderReplicated() {
return false;
}
public void append(Event e) throws IOException {
// we just re-open in append if closed
open = true;
}
};
File tmpFile = File.createTempFile("flume", "test");
tmpFile.deleteOnExit();
String path = tmpFile.getParent();
String name = tmpFile.getName();
BucketWriter bucketWriter = new BucketWriterBuilder(hdfsWriter)
.setRollInterval(ROLL_INTERVAL)
.setFilePath(path)
.setFileName(name)
.build();
Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
for (int i = 0; i < NUM_EVENTS - 1; i++) {
bucketWriter.append(e);
}
// sleep to force a roll... wait 2x interval just to be sure
Thread.sleep(2 * ROLL_INTERVAL * 1000L);
bucketWriter.flush(); // throws closed exception
}
@Test
public void testFileSuffixNotGiven() throws IOException, InterruptedException {
final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test
final String suffix = null;
// Need to override system time use for test so we know what to expect
final long testTime = System.currentTimeMillis();
Clock testClock = new Clock() {
public long currentTimeMillis() {
return testTime;
}
};
MockHDFSWriter hdfsWriter = new MockHDFSWriter();
BucketWriter bucketWriter = new BucketWriterBuilder(hdfsWriter)
.setRollInterval(ROLL_INTERVAL)
.setFileSuffix(suffix)
.setClock(testClock)
.build();
Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
bucketWriter.append(e);
Assert.assertTrue("Incorrect suffix", hdfsWriter.getOpenedFilePath().endsWith(
Long.toString(testTime + 1) + ".tmp"));
}
@Test
public void testFileSuffixGiven() throws IOException, InterruptedException {
final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test
final String suffix = ".avro";
// Need to override system time use for test so we know what to expect
final long testTime = System.currentTimeMillis();
Clock testClock = new Clock() {
public long currentTimeMillis() {
return testTime;
}
};
MockHDFSWriter hdfsWriter = new MockHDFSWriter();
BucketWriter bucketWriter = new BucketWriterBuilder(hdfsWriter)
.setRollInterval(ROLL_INTERVAL)
.setFileSuffix(suffix)
.setClock(testClock)
.build();
Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
bucketWriter.append(e);
Assert.assertTrue("Incorrect suffix",hdfsWriter.getOpenedFilePath().endsWith(
Long.toString(testTime + 1) + suffix + ".tmp"));
}
@Test
public void testFileSuffixCompressed()
throws IOException, InterruptedException {
final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test
final String suffix = ".foo";
MockHDFSWriter hdfsWriter = new MockHDFSWriter();
// Need to override system time use for test so we know what to expect
final long testTime = System.currentTimeMillis();
Clock testClock = new Clock() {
public long currentTimeMillis() {
return testTime;
}
};
BucketWriter bucketWriter = new BucketWriterBuilder(hdfsWriter)
.setRollInterval(ROLL_INTERVAL)
.setFileSuffix(suffix)
.setCodeC(HDFSEventSink.getCodec("gzip"))
.setCompType(SequenceFile.CompressionType.BLOCK)
.setClock(testClock)
.build();
Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
bucketWriter.append(e);
Assert.assertTrue("Incorrect suffix", hdfsWriter.getOpenedFilePath().endsWith(
Long.toString(testTime + 1) + suffix + ".tmp"));
}
@Test
public void testInUsePrefix() throws IOException, InterruptedException {
final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test
final String PREFIX = "BRNO_IS_CITY_IN_CZECH_REPUBLIC";
MockHDFSWriter hdfsWriter = new MockHDFSWriter();
HDFSTextSerializer formatter = new HDFSTextSerializer();
BucketWriter bucketWriter = new BucketWriterBuilder(hdfsWriter)
.setRollInterval(ROLL_INTERVAL)
.setInUsePrefix(PREFIX)
.build();
Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
bucketWriter.append(e);
Assert.assertTrue("Incorrect in use prefix", hdfsWriter.getOpenedFilePath().contains(PREFIX));
}
@Test
public void testInUseSuffix() throws IOException, InterruptedException {
final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test
final String SUFFIX = "WELCOME_TO_THE_HELLMOUNTH";
MockHDFSWriter hdfsWriter = new MockHDFSWriter();
HDFSTextSerializer serializer = new HDFSTextSerializer();
BucketWriter bucketWriter = new BucketWriterBuilder(hdfsWriter)
.setRollInterval(ROLL_INTERVAL)
.setInUseSuffix(SUFFIX)
.build();
Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
bucketWriter.append(e);
Assert.assertTrue("Incorrect in use suffix", hdfsWriter.getOpenedFilePath().contains(SUFFIX));
}
@Test
public void testCallbackOnClose() throws IOException, InterruptedException {
final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test
final String SUFFIX = "WELCOME_TO_THE_EREBOR";
final AtomicBoolean callbackCalled = new AtomicBoolean(false);
BucketWriter bucketWriter = new BucketWriterBuilder()
.setRollInterval(ROLL_INTERVAL)
.setInUseSuffix(SUFFIX)
.setOnCloseCallback(new HDFSEventSink.WriterCallback() {
@Override
public void run(String filePath) {
callbackCalled.set(true);
}
})
.setOnCloseCallbackPath("blah")
.build();
Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
bucketWriter.append(e);
bucketWriter.close(true);
Assert.assertTrue(callbackCalled.get());
}
@Test
public void testSequenceFileRenameRetries() throws Exception {
sequenceFileRenameRetryCoreTest(1, true);
sequenceFileRenameRetryCoreTest(5, true);
sequenceFileRenameRetryCoreTest(2, true);
sequenceFileRenameRetryCoreTest(1, false);
sequenceFileRenameRetryCoreTest(5, false);
sequenceFileRenameRetryCoreTest(2, false);
}
@Test
public void testSequenceFileCloseRetries() throws Exception {
sequenceFileCloseRetryCoreTest(5);
sequenceFileCloseRetryCoreTest(1);
}
public void sequenceFileRenameRetryCoreTest(int numberOfRetriesRequired, boolean closeSucceed)
throws Exception {
String hdfsPath = "file:///tmp/flume-test." +
Calendar.getInstance().getTimeInMillis() +
"." + Thread.currentThread().getId();
Context context = new Context();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(hdfsPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
context.put("hdfs.path", hdfsPath);
context.put("hdfs.closeTries", String.valueOf(numberOfRetriesRequired));
context.put("hdfs.rollCount", "1");
context.put("hdfs.retryInterval", "1");
context.put("hdfs.callTimeout", Long.toString(1000));
MockFileSystem mockFs = new MockFileSystem(fs, numberOfRetriesRequired, closeSucceed);
MockDataStream writer = new MockDataStream(mockFs);
BucketWriter bucketWriter = new BucketWriterBuilder(writer)
.setRollCount(1)
.setBatchSize(1)
.setFilePath(hdfsPath)
.setFileName(hdfsPath)
.setInUsePrefix("singleBucket")
.setCompType(null)
.setRetryInterval(1)
.setMaxCloseTries(numberOfRetriesRequired)
.setWriter(writer)
.build();
bucketWriter.setFileSystem(mockFs);
// At this point, we checked if isFileClosed is available in
// this JVM, so lets make it check again.
Event event = EventBuilder.withBody("test", Charsets.UTF_8);
bucketWriter.append(event);
// This is what triggers the close, so a 2nd append is required :/
bucketWriter.append(event);
TimeUnit.SECONDS.sleep(numberOfRetriesRequired + 2);
Assert.assertTrue("Expected " + numberOfRetriesRequired + " " +
"but got " + bucketWriter.renameTries.get(),
bucketWriter.renameTries.get() == numberOfRetriesRequired);
}
private void sequenceFileCloseRetryCoreTest(int numberOfRetriesRequired)
throws Exception {
String hdfsPath = "file:///tmp/flume-test." +
Calendar.getInstance().getTimeInMillis() +
"." + Thread.currentThread().getId();
Context context = new Context();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(hdfsPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
context.put("hdfs.path", hdfsPath);
context.put("hdfs.closeTries", String.valueOf(numberOfRetriesRequired));
context.put("hdfs.rollCount", "1");
context.put("hdfs.retryInterval", "1");
context.put("hdfs.callTimeout", Long.toString(1000));
MockHDFSWriter mockHDFSWriter = new MockHDFSWriter(Integer.MAX_VALUE);
ExecutorService executorService = Executors.newSingleThreadExecutor();
BucketWriter bucketWriter = new BucketWriter(
0, 0, 1, 1, ctx, hdfsPath, hdfsPath, "singleBucket", ".tmp", null, null,
null, mockHDFSWriter, timedRollerPool, proxy,
new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0, null, null, 30000,
executorService, 1, numberOfRetriesRequired);
Event event = EventBuilder.withBody("test", Charsets.UTF_8);
bucketWriter.append(event);
bucketWriter.close(false);
TimeUnit.SECONDS.sleep(numberOfRetriesRequired + 2);
Assert.assertEquals("ExcceutorService should be empty",
executorService.shutdownNow().size(), 0);
Assert.assertEquals("Expected " + numberOfRetriesRequired + " " +
"but got " + mockHDFSWriter.currentCloseAttempts,
mockHDFSWriter.currentCloseAttempts.get(), numberOfRetriesRequired);
}
// Test that we don't swallow IOExceptions in secure mode. We should close the bucket writer
// and rethrow the exception. Regression test for FLUME-3049.
@Test
public void testRotateBucketOnIOException() throws IOException, InterruptedException {
MockHDFSWriter hdfsWriter = Mockito.spy(new MockHDFSWriter());
PrivilegedExecutor ugiProxy =
FlumeAuthenticationUtil.getAuthenticator(null, null).proxyAs("alice");
final int ROLL_COUNT = 1; // Cause a roll after every successful append().
BucketWriter bucketWriter = new BucketWriterBuilder(hdfsWriter)
.setProxyUser(ugiProxy)
.setRollCount(ROLL_COUNT)
.build();
Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
// Write one event successfully.
bucketWriter.append(e);
// Fail the next write.
IOException expectedIOException = new IOException("Test injected IOException");
Mockito.doThrow(expectedIOException).when(hdfsWriter)
.append(Mockito.any(Event.class));
// The second time we try to write we should get an IOException.
try {
bucketWriter.append(e);
Assert.fail("Expected IOException wasn't thrown during append");
} catch (IOException ex) {
Assert.assertEquals(expectedIOException, ex);
logger.info("Caught expected IOException", ex);
}
// The third time we try to write we should get a BucketClosedException, because the
// BucketWriter should attempt to close itself before rethrowing the IOException on the first
// call.
try {
bucketWriter.append(e);
Assert.fail("BucketWriter should be already closed, BucketClosedException expected");
} catch (BucketClosedException ex) {
logger.info("Caught expected BucketClosedException", ex);
}
Assert.assertEquals("events written", 1, hdfsWriter.getEventsWritten());
Assert.assertEquals("2 files should be closed", 2, hdfsWriter.getFilesClosed());
}
private class BucketWriterBuilder {
private long rollInterval = 0;
private long rollSize = 0;
private long rollCount = 0;
private long batchSize = 0;
private Context context = TestBucketWriter.this.ctx;
private String filePath = "/tmp";
private String fileName = "file";
private String inUsePrefix = "";
private String inUseSuffix = ".tmp";
private String fileSuffix = null;
private CompressionCodec codeC = null;
private CompressionType compType = SequenceFile.CompressionType.NONE;
private HDFSWriter writer = null;
private ScheduledExecutorService timedRollerPool = TestBucketWriter.timedRollerPool;
private PrivilegedExecutor proxyUser = TestBucketWriter.proxy;
private SinkCounter sinkCounter = new SinkCounter(
"test-bucket-writer-" + System.currentTimeMillis());
private int idleTimeout = 0;
private WriterCallback onCloseCallback = null;
private String onCloseCallbackPath = null;
private long callTimeout = 30000;
private ExecutorService callTimeoutPool = Executors.newSingleThreadExecutor();
private long retryInterval = 0;
private int maxCloseTries = 0;
private Clock clock = null;
public BucketWriterBuilder() {
}
public BucketWriterBuilder(HDFSWriter writer) {
this.writer = writer;
}
public BucketWriterBuilder setRollInterval(long rollInterval) {
this.rollInterval = rollInterval;
return this;
}
public BucketWriterBuilder setRollSize(long rollSize) {
this.rollSize = rollSize;
return this;
}
public BucketWriterBuilder setRollCount(long rollCount) {
this.rollCount = rollCount;
return this;
}
public BucketWriterBuilder setBatchSize(long batchSize) {
this.batchSize = batchSize;
return this;
}
@SuppressWarnings("unused")
public BucketWriterBuilder setContext(Context context) {
this.context = context;
return this;
}
public BucketWriterBuilder setFilePath(String filePath) {
this.filePath = filePath;
return this;
}
public BucketWriterBuilder setFileName(String fileName) {
this.fileName = fileName;
return this;
}
public BucketWriterBuilder setInUsePrefix(String inUsePrefix) {
this.inUsePrefix = inUsePrefix;
return this;
}
public BucketWriterBuilder setInUseSuffix(String inUseSuffix) {
this.inUseSuffix = inUseSuffix;
return this;
}
public BucketWriterBuilder setFileSuffix(String fileSuffix) {
this.fileSuffix = fileSuffix;
return this;
}
public BucketWriterBuilder setCodeC(CompressionCodec codeC) {
this.codeC = codeC;
return this;
}
public BucketWriterBuilder setCompType(CompressionType compType) {
this.compType = compType;
return this;
}
@SuppressWarnings("unused")
public BucketWriterBuilder setTimedRollerPool(
ScheduledExecutorService timedRollerPool) {
this.timedRollerPool = timedRollerPool;
return this;
}
@SuppressWarnings("unused")
public BucketWriterBuilder setProxyUser(PrivilegedExecutor proxyUser) {
this.proxyUser = proxyUser;
return this;
}
@SuppressWarnings("unused")
public BucketWriterBuilder setSinkCounter(SinkCounter sinkCounter) {
this.sinkCounter = sinkCounter;
return this;
}
@SuppressWarnings("unused")
public BucketWriterBuilder setIdleTimeout(int idleTimeout) {
this.idleTimeout = idleTimeout;
return this;
}
public BucketWriterBuilder setOnCloseCallback(
WriterCallback onCloseCallback) {
this.onCloseCallback = onCloseCallback;
return this;
}
public BucketWriterBuilder setOnCloseCallbackPath(
String onCloseCallbackPath) {
this.onCloseCallbackPath = onCloseCallbackPath;
return this;
}
@SuppressWarnings("unused")
public BucketWriterBuilder setCallTimeout(long callTimeout) {
this.callTimeout = callTimeout;
return this;
}
@SuppressWarnings("unused")
public BucketWriterBuilder setCallTimeoutPool(
ExecutorService callTimeoutPool) {
this.callTimeoutPool = callTimeoutPool;
return this;
}
public BucketWriterBuilder setRetryInterval(long retryInterval) {
this.retryInterval = retryInterval;
return this;
}
public BucketWriterBuilder setMaxCloseTries(int maxCloseTries) {
this.maxCloseTries = maxCloseTries;
return this;
}
public BucketWriterBuilder setWriter(HDFSWriter writer) {
this.writer = writer;
return this;
}
public BucketWriterBuilder setClock(Clock clock) {
this.clock = clock;
return this;
}
public BucketWriter build() {
if (clock == null) {
clock = new SystemClock();
}
if (writer == null) {
writer = new MockHDFSWriter();
}
return new BucketWriter(rollInterval, rollSize, rollCount, batchSize,
context, filePath, fileName, inUsePrefix, inUseSuffix, fileSuffix,
codeC, compType, writer, timedRollerPool, proxyUser, sinkCounter,
idleTimeout, onCloseCallback, onCloseCallbackPath, callTimeout,
callTimeoutPool, retryInterval, maxCloseTries, clock);
}
}
}
| 9,810 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink/hdfs/TestUseRawLocalFileSystem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hdfs;
import java.io.File;
import org.apache.commons.io.FileUtils;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.event.EventBuilder;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.GzipCodec;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Charsets;
import com.google.common.io.Files;
public class TestUseRawLocalFileSystem {
private static Logger logger =
LoggerFactory.getLogger(TestUseRawLocalFileSystem.class);
private Context context;
private File baseDir;
private File testFile;
private Event event;
@Before
public void setup() throws Exception {
baseDir = Files.createTempDir();
testFile = new File(baseDir.getAbsoluteFile(), "test");
context = new Context();
event = EventBuilder.withBody("test", Charsets.UTF_8);
}
@After
public void teardown() throws Exception {
FileUtils.deleteQuietly(baseDir);
}
@Test
public void testTestFile() throws Exception {
String file = testFile.getCanonicalPath();
HDFSDataStream stream = new HDFSDataStream();
context.put("hdfs.useRawLocalFileSystem", "true");
stream.configure(context);
stream.open(file);
stream.append(event);
stream.sync();
Assert.assertTrue(testFile.length() > 0);
}
@Test
public void testCompressedFile() throws Exception {
String file = testFile.getCanonicalPath();
HDFSCompressedDataStream stream = new HDFSCompressedDataStream();
context.put("hdfs.useRawLocalFileSystem", "true");
stream.configure(context);
stream.open(file, new GzipCodec(), CompressionType.RECORD);
stream.append(event);
stream.sync();
Assert.assertTrue(testFile.length() > 0);
}
@Test
public void testSequenceFile() throws Exception {
String file = testFile.getCanonicalPath();
HDFSSequenceFile stream = new HDFSSequenceFile();
context.put("hdfs.useRawLocalFileSystem", "true");
stream.configure(context);
stream.open(file);
stream.append(event);
stream.sync();
Assert.assertTrue(testFile.length() > 0);
}
} | 9,811 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink/hdfs/HDFSWritableSerializer.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hdfs;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import java.util.Collections;
public class HDFSWritableSerializer implements SequenceFileSerializer {
private BytesWritable makeByteWritable(Event e) {
BytesWritable bytesObject = new BytesWritable();
bytesObject.set(e.getBody(), 0, e.getBody().length);
return bytesObject;
}
@Override
public Class<LongWritable> getKeyClass() {
return LongWritable.class;
}
@Override
public Class<BytesWritable> getValueClass() {
return BytesWritable.class;
}
@Override
public Iterable<Record> serialize(Event e) {
Object key = getKey(e);
Object value = getValue(e);
return Collections.singletonList(new Record(key, value));
}
private Object getKey(Event e) {
String timestamp = e.getHeaders().get("timestamp");
long eventStamp;
if (timestamp == null) {
eventStamp = System.currentTimeMillis();
} else {
eventStamp = Long.valueOf(timestamp);
}
return new LongWritable(eventStamp);
}
private Object getValue(Event e) {
return makeByteWritable(e);
}
public static class Builder implements SequenceFileSerializer.Builder {
@Override
public SequenceFileSerializer build(Context context) {
return new HDFSWritableSerializer();
}
}
}
| 9,812 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink/hdfs/HDFSWriter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hdfs;
import java.io.IOException;
import org.apache.flume.Event;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.apache.flume.conf.Configurable;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface HDFSWriter extends Configurable {
public void open(String filePath) throws IOException;
public void open(String filePath, CompressionCodec codec,
CompressionType cType) throws IOException;
public void append(Event e) throws IOException;
public void sync() throws IOException;
public void close() throws IOException;
public boolean isUnderReplicated();
}
| 9,813 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink/hdfs/BucketWriter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hdfs;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Throwables;
import org.apache.flume.Clock;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.SystemClock;
import org.apache.flume.auth.PrivilegedExecutor;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.flume.sink.hdfs.HDFSEventSink.WriterCallback;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.lang.reflect.Method;
import java.security.PrivilegedExceptionAction;
import java.util.concurrent.Callable;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
/**
* Internal API intended for HDFSSink use.
* This class does file rolling and handles file formats and serialization.
* Only the public methods in this class are thread safe.
*/
class BucketWriter {
private static final Logger LOG = LoggerFactory
.getLogger(BucketWriter.class);
/**
* This lock ensures that only one thread can open a file at a time.
*/
private static final Integer staticLock = new Integer(1);
private Method isClosedMethod = null;
private final HDFSWriter writer;
private final long rollInterval;
private final long rollSize;
private final long rollCount;
private final long batchSize;
private final CompressionCodec codeC;
private final CompressionType compType;
private final ScheduledExecutorService timedRollerPool;
private final PrivilegedExecutor proxyUser;
private final AtomicLong fileExtensionCounter;
private long eventCounter;
private long processSize;
private FileSystem fileSystem;
private volatile String filePath;
private volatile String fileName;
private volatile String inUsePrefix;
private volatile String inUseSuffix;
private volatile String fileSuffix;
private volatile String bucketPath;
private volatile String targetPath;
private volatile long batchCounter;
private volatile boolean isOpen;
private volatile boolean isUnderReplicated;
private volatile int consecutiveUnderReplRotateCount = 0;
private volatile ScheduledFuture<Void> timedRollFuture;
private SinkCounter sinkCounter;
private final int idleTimeout;
private volatile ScheduledFuture<Void> idleFuture;
private final WriterCallback onCloseCallback;
private final String onCloseCallbackPath;
private final long callTimeout;
private final ExecutorService callTimeoutPool;
private final int maxConsecUnderReplRotations = 30; // make this config'able?
private boolean mockFsInjected = false;
private final long retryInterval;
private final int maxRetries;
// flag that the bucket writer was closed due to idling and thus shouldn't be
// reopened. Not ideal, but avoids internals of owners
protected AtomicBoolean closed = new AtomicBoolean();
AtomicInteger renameTries = new AtomicInteger(0);
BucketWriter(long rollInterval, long rollSize, long rollCount, long batchSize,
Context context, String filePath, String fileName, String inUsePrefix,
String inUseSuffix, String fileSuffix, CompressionCodec codeC,
CompressionType compType, HDFSWriter writer,
ScheduledExecutorService timedRollerPool, PrivilegedExecutor proxyUser,
SinkCounter sinkCounter, int idleTimeout, WriterCallback onCloseCallback,
String onCloseCallbackPath, long callTimeout,
ExecutorService callTimeoutPool, long retryInterval,
int maxRetries) {
this(rollInterval, rollSize, rollCount, batchSize,
context, filePath, fileName, inUsePrefix,
inUseSuffix, fileSuffix, codeC,
compType, writer,
timedRollerPool, proxyUser,
sinkCounter, idleTimeout, onCloseCallback,
onCloseCallbackPath, callTimeout,
callTimeoutPool, retryInterval,
maxRetries, new SystemClock());
}
BucketWriter(long rollInterval, long rollSize, long rollCount, long batchSize,
Context context, String filePath, String fileName, String inUsePrefix,
String inUseSuffix, String fileSuffix, CompressionCodec codeC,
CompressionType compType, HDFSWriter writer,
ScheduledExecutorService timedRollerPool, PrivilegedExecutor proxyUser,
SinkCounter sinkCounter, int idleTimeout, WriterCallback onCloseCallback,
String onCloseCallbackPath, long callTimeout,
ExecutorService callTimeoutPool, long retryInterval,
int maxRetries, Clock clock) {
this.rollInterval = rollInterval;
this.rollSize = rollSize;
this.rollCount = rollCount;
this.batchSize = batchSize;
this.filePath = filePath;
this.fileName = fileName;
this.inUsePrefix = inUsePrefix;
this.inUseSuffix = inUseSuffix;
this.fileSuffix = fileSuffix;
this.codeC = codeC;
this.compType = compType;
this.writer = writer;
this.timedRollerPool = timedRollerPool;
this.proxyUser = proxyUser;
this.sinkCounter = sinkCounter;
this.idleTimeout = idleTimeout;
this.onCloseCallback = onCloseCallback;
this.onCloseCallbackPath = onCloseCallbackPath;
this.callTimeout = callTimeout;
this.callTimeoutPool = callTimeoutPool;
fileExtensionCounter = new AtomicLong(clock.currentTimeMillis());
this.retryInterval = retryInterval;
this.maxRetries = maxRetries;
isOpen = false;
isUnderReplicated = false;
this.writer.configure(context);
}
@VisibleForTesting
void setFileSystem(FileSystem fs) {
this.fileSystem = fs;
mockFsInjected = true;
}
/**
* Clear the class counters
*/
private void resetCounters() {
eventCounter = 0;
processSize = 0;
batchCounter = 0;
}
private Method getRefIsClosed() {
try {
return fileSystem.getClass().getMethod("isFileClosed",
Path.class);
} catch (Exception e) {
LOG.info("isFileClosed() is not available in the version of the " +
"distributed filesystem being used. " +
"Flume will not attempt to re-close files if the close fails " +
"on the first attempt");
return null;
}
}
private Boolean isFileClosed(FileSystem fs, Path tmpFilePath) throws Exception {
return (Boolean)(isClosedMethod.invoke(fs, tmpFilePath));
}
/**
* open() is called by append()
* @throws IOException
* @throws InterruptedException
*/
private void open() throws IOException, InterruptedException {
if ((filePath == null) || (writer == null)) {
throw new IOException("Invalid file settings");
}
final Configuration config = new Configuration();
// disable FileSystem JVM shutdown hook
config.setBoolean("fs.automatic.close", false);
// Hadoop is not thread safe when doing certain RPC operations,
// including getFileSystem(), when running under Kerberos.
// open() must be called by one thread at a time in the JVM.
// NOTE: tried synchronizing on the underlying Kerberos principal previously
// which caused deadlocks. See FLUME-1231.
synchronized (staticLock) {
checkAndThrowInterruptedException();
try {
long counter = fileExtensionCounter.incrementAndGet();
String fullFileName = fileName + "." + counter;
if (fileSuffix != null && fileSuffix.length() > 0) {
fullFileName += fileSuffix;
} else if (codeC != null) {
fullFileName += codeC.getDefaultExtension();
}
bucketPath = filePath + "/" + inUsePrefix
+ fullFileName + inUseSuffix;
targetPath = filePath + "/" + fullFileName;
LOG.info("Creating " + bucketPath);
callWithTimeout(new CallRunner<Void>() {
@Override
public Void call() throws Exception {
if (codeC == null) {
// Need to get reference to FS using above config before underlying
// writer does in order to avoid shutdown hook &
// IllegalStateExceptions
if (!mockFsInjected) {
fileSystem = new Path(bucketPath).getFileSystem(config);
}
writer.open(bucketPath);
} else {
// need to get reference to FS before writer does to
// avoid shutdown hook
if (!mockFsInjected) {
fileSystem = new Path(bucketPath).getFileSystem(config);
}
writer.open(bucketPath, codeC, compType);
}
return null;
}
});
} catch (Exception ex) {
sinkCounter.incrementConnectionFailedCount();
if (ex instanceof IOException) {
throw (IOException) ex;
} else {
throw Throwables.propagate(ex);
}
}
}
isClosedMethod = getRefIsClosed();
sinkCounter.incrementConnectionCreatedCount();
resetCounters();
// if time-based rolling is enabled, schedule the roll
if (rollInterval > 0) {
Callable<Void> action = new Callable<Void>() {
public Void call() throws Exception {
LOG.debug("Rolling file ({}): Roll scheduled after {} sec elapsed.",
bucketPath, rollInterval);
try {
// Roll the file and remove reference from sfWriters map.
close(true);
} catch (Throwable t) {
LOG.error("Unexpected error", t);
}
return null;
}
};
timedRollFuture = timedRollerPool.schedule(action, rollInterval,
TimeUnit.SECONDS);
}
isOpen = true;
}
/**
* Close the file handle and rename the temp file to the permanent filename.
* Safe to call multiple times. Logs HDFSWriter.close() exceptions. This
* method will not cause the bucket writer to be dereferenced from the HDFS
* sink that owns it. This method should be used only when size or count
* based rolling closes this file.
*/
public void close() throws InterruptedException {
close(false);
}
private CallRunner<Void> createCloseCallRunner() {
return new CallRunner<Void>() {
@Override
public Void call() throws Exception {
writer.close(); // could block
return null;
}
};
}
private class CloseHandler implements Callable<Void> {
private final String path = bucketPath;
private int closeTries = 0;
@Override
public Void call() throws Exception {
close(false);
return null;
}
/**
* Tries to close the writer. Repeats the close if the maximum number
* of retries is not reached or an immediate close is not reuqested.
* If all close attempts were unsuccessful we try to recover the lease.
* @param immediate An immediate close is required
*/
public void close(boolean immediate) {
closeTries++;
boolean shouldRetry = closeTries < maxRetries && !immediate;
try {
callWithTimeout(createCloseCallRunner());
sinkCounter.incrementConnectionClosedCount();
} catch (InterruptedException | IOException e) {
LOG.warn("Closing file: " + path + " failed. Will " +
"retry again in " + retryInterval + " seconds.", e);
if (timedRollerPool != null && !timedRollerPool.isTerminated()) {
if (shouldRetry) {
timedRollerPool.schedule(this, retryInterval, TimeUnit.SECONDS);
}
} else {
LOG.warn("Cannot retry close any more timedRollerPool is null or terminated");
}
if (!shouldRetry) {
LOG.warn("Unsuccessfully attempted to close " + path + " " +
maxRetries + " times. Initializing lease recovery.");
sinkCounter.incrementConnectionFailedCount();
recoverLease();
}
}
}
}
private class ScheduledRenameCallable implements Callable<Void> {
private final String path = bucketPath;
private final String finalPath = targetPath;
private FileSystem fs = fileSystem;
private int renameTries = 1; // one attempt is already done
@Override
public Void call() throws Exception {
if (renameTries >= maxRetries) {
LOG.warn("Unsuccessfully attempted to rename " + path + " " +
maxRetries + " times. File may still be open.");
return null;
}
renameTries++;
try {
renameBucket(path, finalPath, fs);
} catch (Exception e) {
LOG.warn("Renaming file: " + path + " failed. Will " +
"retry again in " + retryInterval + " seconds.", e);
timedRollerPool.schedule(this, retryInterval, TimeUnit.SECONDS);
return null;
}
return null;
}
}
/**
* Tries to start the lease recovery process for the current bucketPath
* if the fileSystem is DistributedFileSystem.
* Catches and logs the IOException.
*/
private synchronized void recoverLease() {
if (bucketPath != null && fileSystem instanceof DistributedFileSystem) {
try {
LOG.debug("Starting lease recovery for {}", bucketPath);
((DistributedFileSystem) fileSystem).recoverLease(new Path(bucketPath));
} catch (IOException ex) {
LOG.warn("Lease recovery failed for {}", bucketPath, ex);
}
}
}
public void close(boolean callCloseCallback) throws InterruptedException {
close(callCloseCallback, false);
}
/**
* Close the file handle and rename the temp file to the permanent filename.
* Safe to call multiple times. Logs HDFSWriter.close() exceptions.
*/
public void close(boolean callCloseCallback, boolean immediate)
throws InterruptedException {
if (callCloseCallback) {
if (closed.compareAndSet(false, true)) {
runCloseAction(); //remove from the cache as soon as possible
} else {
LOG.warn("This bucketWriter is already closing or closed.");
}
}
doClose(immediate);
}
private synchronized void doClose(boolean immediate)
throws InterruptedException {
checkAndThrowInterruptedException();
try {
flush();
} catch (IOException e) {
LOG.warn("pre-close flush failed", e);
}
LOG.info("Closing {}", bucketPath);
if (isOpen) {
new CloseHandler().close(immediate);
isOpen = false;
} else {
LOG.info("HDFSWriter is already closed: {}", bucketPath);
}
// NOTE: timed rolls go through this codepath as well as other roll types
if (timedRollFuture != null && !timedRollFuture.isDone()) {
timedRollFuture.cancel(false); // do not cancel myself if running!
timedRollFuture = null;
}
if (idleFuture != null && !idleFuture.isDone()) {
idleFuture.cancel(false); // do not cancel myself if running!
idleFuture = null;
}
if (bucketPath != null && fileSystem != null) {
// could block or throw IOException
try {
renameBucket(bucketPath, targetPath, fileSystem);
} catch (Exception e) {
LOG.warn("failed to rename() file (" + bucketPath +
"). Exception follows.", e);
sinkCounter.incrementConnectionFailedCount();
final Callable<Void> scheduledRename = new ScheduledRenameCallable();
timedRollerPool.schedule(scheduledRename, retryInterval, TimeUnit.SECONDS);
}
}
}
/**
* flush the data
* @throws IOException
* @throws InterruptedException
*/
public synchronized void flush() throws IOException, InterruptedException {
checkAndThrowInterruptedException();
if (!isBatchComplete()) {
doFlush();
if (idleTimeout > 0) {
// if the future exists and couldn't be cancelled, that would mean it has already run
// or been cancelled
if (idleFuture == null || idleFuture.cancel(false)) {
Callable<Void> idleAction = new Callable<Void>() {
public Void call() throws Exception {
LOG.info("Closing idle bucketWriter {} at {}", bucketPath,
System.currentTimeMillis());
if (isOpen) {
close(true);
}
return null;
}
};
idleFuture = timedRollerPool.schedule(idleAction, idleTimeout,
TimeUnit.SECONDS);
}
}
}
}
private void runCloseAction() {
try {
if (onCloseCallback != null) {
onCloseCallback.run(onCloseCallbackPath);
}
} catch (Throwable t) {
LOG.error("Unexpected error", t);
}
}
/**
* doFlush() must only be called by flush()
* @throws IOException
*/
private void doFlush() throws IOException, InterruptedException {
callWithTimeout(new CallRunner<Void>() {
@Override
public Void call() throws Exception {
writer.sync(); // could block
return null;
}
});
batchCounter = 0;
}
/**
* Open file handles, write data, update stats, handle file rolling and
* batching / flushing. <br />
* If the write fails, the file is implicitly closed and then the IOException
* is rethrown. <br />
* We rotate before append, and not after, so that the active file rolling
* mechanism will never roll an empty file. This also ensures that the file
* creation time reflects when the first event was written.
*
* @throws IOException
* @throws InterruptedException
*/
public synchronized void append(final Event event)
throws IOException, InterruptedException {
checkAndThrowInterruptedException();
// If idleFuture is not null, cancel it before we move forward to avoid a
// close call in the middle of the append.
if (idleFuture != null) {
idleFuture.cancel(false);
// There is still a small race condition - if the idleFuture is already
// running, interrupting it can cause HDFS close operation to throw -
// so we cannot interrupt it while running. If the future could not be
// cancelled, it is already running - wait for it to finish before
// attempting to write.
if (!idleFuture.isDone()) {
try {
idleFuture.get(callTimeout, TimeUnit.MILLISECONDS);
} catch (TimeoutException ex) {
LOG.warn("Timeout while trying to cancel closing of idle file. Idle" +
" file close may have failed", ex);
} catch (Exception ex) {
LOG.warn("Error while trying to cancel closing of idle file. ", ex);
}
}
idleFuture = null;
}
// If the bucket writer was closed due to roll timeout or idle timeout,
// force a new bucket writer to be created. Roll count and roll size will
// just reuse this one
if (!isOpen) {
if (closed.get()) {
throw new BucketClosedException("This bucket writer was closed and " +
"this handle is thus no longer valid");
}
open();
}
// check if it's time to rotate the file
if (shouldRotate()) {
boolean doRotate = true;
if (isUnderReplicated) {
if (maxConsecUnderReplRotations > 0 &&
consecutiveUnderReplRotateCount >= maxConsecUnderReplRotations) {
doRotate = false;
if (consecutiveUnderReplRotateCount == maxConsecUnderReplRotations) {
LOG.error("Hit max consecutive under-replication rotations ({}); " +
"will not continue rolling files under this path due to " +
"under-replication", maxConsecUnderReplRotations);
}
} else {
LOG.warn("Block Under-replication detected. Rotating file.");
}
consecutiveUnderReplRotateCount++;
} else {
consecutiveUnderReplRotateCount = 0;
}
if (doRotate) {
close();
open();
}
}
// write the event
try {
sinkCounter.incrementEventDrainAttemptCount();
callWithTimeout(new CallRunner<Void>() {
@Override
public Void call() throws Exception {
writer.append(event); // could block
return null;
}
});
} catch (IOException e) {
LOG.warn("Caught IOException writing to HDFSWriter ({}). Closing file (" +
bucketPath + ") and rethrowing exception.",
e.getMessage());
close(true);
throw e;
}
// update statistics
processSize += event.getBody().length;
eventCounter++;
batchCounter++;
if (batchCounter == batchSize) {
flush();
}
}
/**
* check if time to rotate the file
*/
private boolean shouldRotate() {
boolean doRotate = false;
if (writer.isUnderReplicated()) {
this.isUnderReplicated = true;
doRotate = true;
} else {
this.isUnderReplicated = false;
}
if ((rollCount > 0) && (rollCount <= eventCounter)) {
LOG.debug("rolling: rollCount: {}, events: {}", rollCount, eventCounter);
doRotate = true;
}
if ((rollSize > 0) && (rollSize <= processSize)) {
LOG.debug("rolling: rollSize: {}, bytes: {}", rollSize, processSize);
doRotate = true;
}
return doRotate;
}
/**
* Rename bucketPath file from .tmp to permanent location.
*/
// When this bucket writer is rolled based on rollCount or
// rollSize, the same instance is reused for the new file. But if
// the previous file was not closed/renamed,
// the bucket writer fields no longer point to it and hence need
// to be passed in from the thread attempting to close it. Even
// when the bucket writer is closed due to close timeout,
// this method can get called from the scheduled thread so the
// file gets closed later - so an implicit reference to this
// bucket writer would still be alive in the Callable instance.
private void renameBucket(String bucketPath, String targetPath, final FileSystem fs)
throws IOException, InterruptedException {
if (bucketPath.equals(targetPath)) {
return;
}
final Path srcPath = new Path(bucketPath);
final Path dstPath = new Path(targetPath);
callWithTimeout(new CallRunner<Void>() {
@Override
public Void call() throws Exception {
if (fs.exists(srcPath)) { // could block
LOG.info("Renaming " + srcPath + " to " + dstPath);
renameTries.incrementAndGet();
fs.rename(srcPath, dstPath); // could block
}
return null;
}
});
}
@Override
public String toString() {
return "[ " + this.getClass().getSimpleName() + " targetPath = " + targetPath +
", bucketPath = " + bucketPath + " ]";
}
private boolean isBatchComplete() {
return (batchCounter == 0);
}
/**
* This method if the current thread has been interrupted and throws an
* exception.
* @throws InterruptedException
*/
private static void checkAndThrowInterruptedException()
throws InterruptedException {
if (Thread.currentThread().interrupted()) {
throw new InterruptedException("Timed out before HDFS call was made. "
+ "Your hdfs.callTimeout might be set too low or HDFS calls are "
+ "taking too long.");
}
}
/**
* Execute the callable on a separate thread and wait for the completion
* for the specified amount of time in milliseconds. In case of timeout
* cancel the callable and throw an IOException
*/
private <T> T callWithTimeout(final CallRunner<T> callRunner)
throws IOException, InterruptedException {
Future<T> future = callTimeoutPool.submit(new Callable<T>() {
@Override
public T call() throws Exception {
return proxyUser.execute(new PrivilegedExceptionAction<T>() {
@Override
public T run() throws Exception {
return callRunner.call();
}
});
}
});
try {
if (callTimeout > 0) {
return future.get(callTimeout, TimeUnit.MILLISECONDS);
} else {
return future.get();
}
} catch (TimeoutException eT) {
future.cancel(true);
sinkCounter.incrementConnectionFailedCount();
throw new IOException("Callable timed out after " +
callTimeout + " ms" + " on file: " + bucketPath, eT);
} catch (ExecutionException e1) {
sinkCounter.incrementConnectionFailedCount();
Throwable cause = e1.getCause();
if (cause instanceof IOException) {
throw (IOException) cause;
} else if (cause instanceof InterruptedException) {
throw (InterruptedException) cause;
} else if (cause instanceof RuntimeException) {
throw (RuntimeException) cause;
} else if (cause instanceof Error) {
throw (Error)cause;
} else {
throw new RuntimeException(e1);
}
} catch (CancellationException ce) {
throw new InterruptedException(
"Blocked callable interrupted by rotation event");
} catch (InterruptedException ex) {
LOG.warn("Unexpected Exception " + ex.getMessage(), ex);
throw ex;
}
}
/**
* Simple interface whose <tt>call</tt> method is called by
* {#callWithTimeout} in a new thread inside a
* {@linkplain java.security.PrivilegedExceptionAction#run()} call.
* @param <T>
*/
private interface CallRunner<T> {
T call() throws Exception;
}
}
| 9,814 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink/hdfs/AvroEventSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hdfs;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Schema;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.io.DatumWriter;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.FlumeException;
import org.apache.flume.conf.Configurable;
import org.apache.flume.serialization.EventSerializer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URL;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import static org.apache.flume.serialization.AvroEventSerializerConfigurationConstants.COMPRESSION_CODEC;
import static org.apache.flume.serialization.AvroEventSerializerConfigurationConstants.DEFAULT_COMPRESSION_CODEC;
import static org.apache.flume.serialization.AvroEventSerializerConfigurationConstants.DEFAULT_STATIC_SCHEMA_URL;
import static org.apache.flume.serialization.AvroEventSerializerConfigurationConstants.DEFAULT_SYNC_INTERVAL_BYTES;
import static org.apache.flume.serialization.AvroEventSerializerConfigurationConstants.STATIC_SCHEMA_URL;
import static org.apache.flume.serialization.AvroEventSerializerConfigurationConstants.SYNC_INTERVAL_BYTES;
/**
* <p>
* This class serializes Flume {@linkplain org.apache.flume.Event events} into Avro data files. The
* Flume event body is read as an Avro datum, and is then written to the
* {@link org.apache.flume.serialization.EventSerializer}'s output stream in Avro data file format.
* </p>
* <p>
* The Avro schema is determined by reading a Flume event header. The schema may be
* specified either as a literal, by setting {@link #AVRO_SCHEMA_LITERAL_HEADER} (not
* recommended, since the full schema must be transmitted in every event),
* or as a URL which the schema may be read from, by setting {@link
* #AVRO_SCHEMA_URL_HEADER}. Schemas read from URLs are cached by instances of this
* class so that the overhead of retrieval is minimized.
* </p>
*/
public class AvroEventSerializer implements EventSerializer, Configurable {
private static final Logger logger =
LoggerFactory.getLogger(AvroEventSerializer.class);
public static final String AVRO_SCHEMA_LITERAL_HEADER = "flume.avro.schema.literal";
public static final String AVRO_SCHEMA_URL_HEADER = "flume.avro.schema.url";
private final OutputStream out;
private DatumWriter<Object> writer = null;
private DataFileWriter<Object> dataFileWriter = null;
private int syncIntervalBytes;
private String compressionCodec;
private Map<String, Schema> schemaCache = new HashMap<String, Schema>();
private String staticSchemaURL;
private AvroEventSerializer(OutputStream out) {
this.out = out;
}
@Override
public void configure(Context context) {
syncIntervalBytes =
context.getInteger(SYNC_INTERVAL_BYTES, DEFAULT_SYNC_INTERVAL_BYTES);
compressionCodec =
context.getString(COMPRESSION_CODEC, DEFAULT_COMPRESSION_CODEC);
staticSchemaURL = context.getString(STATIC_SCHEMA_URL, DEFAULT_STATIC_SCHEMA_URL);
}
@Override
public void afterCreate() throws IOException {
// no-op
}
@Override
public void afterReopen() throws IOException {
// impossible to initialize DataFileWriter without writing the schema?
throw new UnsupportedOperationException("Avro API doesn't support append");
}
@Override
public void write(Event event) throws IOException {
if (dataFileWriter == null) {
initialize(event);
}
dataFileWriter.appendEncoded(ByteBuffer.wrap(event.getBody()));
}
private void initialize(Event event) throws IOException {
Schema schema = null;
String schemaUrl = event.getHeaders().get(AVRO_SCHEMA_URL_HEADER);
String schemaString = event.getHeaders().get(AVRO_SCHEMA_LITERAL_HEADER);
if (schemaUrl != null) { // if URL_HEADER is there then use it
schema = schemaCache.get(schemaUrl);
if (schema == null) {
schema = loadFromUrl(schemaUrl);
schemaCache.put(schemaUrl, schema);
}
} else if (schemaString != null) { // fallback to LITERAL_HEADER if it was there
schema = new Schema.Parser().parse(schemaString);
} else if (staticSchemaURL != null) { // fallback to static url if it was there
schema = schemaCache.get(staticSchemaURL);
if (schema == null) {
schema = loadFromUrl(staticSchemaURL);
schemaCache.put(staticSchemaURL, schema);
}
} else { // no other options so giving up
throw new FlumeException("Could not find schema for event " + event);
}
writer = new GenericDatumWriter<Object>(schema);
dataFileWriter = new DataFileWriter<Object>(writer);
dataFileWriter.setSyncInterval(syncIntervalBytes);
try {
CodecFactory codecFactory = CodecFactory.fromString(compressionCodec);
dataFileWriter.setCodec(codecFactory);
} catch (AvroRuntimeException e) {
logger.warn("Unable to instantiate avro codec with name (" +
compressionCodec + "). Compression disabled. Exception follows.", e);
}
dataFileWriter.create(schema, out);
}
private Schema loadFromUrl(String schemaUrl) throws IOException {
Configuration conf = new Configuration();
Schema.Parser parser = new Schema.Parser();
if (schemaUrl.toLowerCase(Locale.ENGLISH).startsWith("hdfs://")) {
FileSystem fs = FileSystem.get(conf);
FSDataInputStream input = null;
try {
input = fs.open(new Path(schemaUrl));
return parser.parse(input);
} finally {
if (input != null) {
input.close();
}
}
} else {
InputStream is = null;
try {
is = new URL(schemaUrl).openStream();
return parser.parse(is);
} finally {
if (is != null) {
is.close();
}
}
}
}
@Override
public void flush() throws IOException {
dataFileWriter.flush();
}
@Override
public void beforeClose() throws IOException {
// no-op
}
@Override
public boolean supportsReopen() {
return false;
}
public static class Builder implements EventSerializer.Builder {
@Override
public EventSerializer build(Context context, OutputStream out) {
AvroEventSerializer writer = new AvroEventSerializer(out);
writer.configure(context);
return writer;
}
}
}
| 9,815 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink/hdfs/HDFSSequenceFile.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hdfs;
import java.io.IOException;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class HDFSSequenceFile extends AbstractHDFSWriter {
private static final Logger logger =
LoggerFactory.getLogger(HDFSSequenceFile.class);
private SequenceFile.Writer writer;
private String writeFormat;
private Context serializerContext;
private SequenceFileSerializer serializer;
private boolean useRawLocalFileSystem;
private FSDataOutputStream outStream = null;
public HDFSSequenceFile() {
writer = null;
}
@Override
public void configure(Context context) {
super.configure(context);
// use binary writable serialize by default
writeFormat = context.getString("hdfs.writeFormat",
SequenceFileSerializerType.Writable.name());
useRawLocalFileSystem = context.getBoolean("hdfs.useRawLocalFileSystem",
false);
serializerContext = new Context(
context.getSubProperties(SequenceFileSerializerFactory.CTX_PREFIX));
serializer = SequenceFileSerializerFactory
.getSerializer(writeFormat, serializerContext);
logger.info("writeFormat = " + writeFormat + ", UseRawLocalFileSystem = "
+ useRawLocalFileSystem);
}
@Override
public void open(String filePath) throws IOException {
open(filePath, null, CompressionType.NONE);
}
@Override
public void open(String filePath, CompressionCodec codeC,
CompressionType compType) throws IOException {
Configuration conf = new Configuration();
Path dstPath = new Path(filePath);
FileSystem hdfs = dstPath.getFileSystem(conf);
open(dstPath, codeC, compType, conf, hdfs);
}
protected void open(Path dstPath, CompressionCodec codeC,
CompressionType compType, Configuration conf, FileSystem hdfs)
throws IOException {
if (useRawLocalFileSystem) {
if (hdfs instanceof LocalFileSystem) {
hdfs = ((LocalFileSystem)hdfs).getRaw();
} else {
logger.warn("useRawLocalFileSystem is set to true but file system " +
"is not of type LocalFileSystem: " + hdfs.getClass().getName());
}
}
if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
outStream = hdfs.append(dstPath);
} else {
outStream = hdfs.create(dstPath);
}
writer = SequenceFile.createWriter(conf, outStream,
serializer.getKeyClass(), serializer.getValueClass(), compType, codeC);
registerCurrentStream(outStream, hdfs, dstPath);
}
@Override
public void append(Event e) throws IOException {
for (SequenceFileSerializer.Record record : serializer.serialize(e)) {
writer.append(record.getKey(), record.getValue());
}
}
@Override
public void sync() throws IOException {
writer.sync();
hflushOrSync(outStream);
}
@Override
public void close() throws IOException {
writer.close();
outStream.close();
unregisterCurrentStream();
}
}
| 9,816 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink/hdfs/HDFSEventSink.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hdfs;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TimeZone;
import java.util.Map.Entry;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import com.google.common.annotations.VisibleForTesting;
import org.apache.flume.Channel;
import org.apache.flume.Clock;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.SystemClock;
import org.apache.flume.Transaction;
import org.apache.flume.auth.FlumeAuthenticationUtil;
import org.apache.flume.auth.PrivilegedExecutor;
import org.apache.flume.conf.BatchSizeSupported;
import org.apache.flume.conf.Configurable;
import org.apache.flume.formatter.output.BucketPath;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.flume.sink.AbstractSink;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
public class HDFSEventSink extends AbstractSink implements Configurable, BatchSizeSupported {
public interface WriterCallback {
public void run(String filePath);
}
private static final Logger LOG = LoggerFactory.getLogger(HDFSEventSink.class);
private static String DIRECTORY_DELIMITER = System.getProperty("file.separator");
private static final long defaultRollInterval = 30;
private static final long defaultRollSize = 1024;
private static final long defaultRollCount = 10;
private static final String defaultFileName = "FlumeData";
private static final String defaultSuffix = "";
private static final String defaultInUsePrefix = "";
private static final String defaultInUseSuffix = ".tmp";
private static final long defaultBatchSize = 100;
private static final String defaultFileType = HDFSWriterFactory.SequenceFileType;
private static final int defaultMaxOpenFiles = 5000;
// Time between close retries, in seconds
private static final long defaultRetryInterval = 180;
// Retry forever.
private static final int defaultTryCount = Integer.MAX_VALUE;
public static final String IN_USE_SUFFIX_PARAM_NAME = "hdfs.inUseSuffix";
/**
* Default length of time we wait for blocking BucketWriter calls
* before timing out the operation. Intended to prevent server hangs.
*/
private static final long defaultCallTimeout = 30000;
/**
* Default number of threads available for tasks
* such as append/open/close/flush with hdfs.
* These tasks are done in a separate thread in
* the case that they take too long. In which
* case we create a new file and move on.
*/
private static final int defaultThreadPoolSize = 10;
private static final int defaultRollTimerPoolSize = 1;
private final HDFSWriterFactory writerFactory;
private WriterLinkedHashMap sfWriters;
private long rollInterval;
private long rollSize;
private long rollCount;
private long batchSize;
private int threadsPoolSize;
private int rollTimerPoolSize;
private CompressionCodec codeC;
private CompressionType compType;
private String fileType;
private String filePath;
private String fileName;
private String suffix;
private String inUsePrefix;
private String inUseSuffix;
private TimeZone timeZone;
private int maxOpenFiles;
private ExecutorService callTimeoutPool;
private ScheduledExecutorService timedRollerPool;
private boolean needRounding = false;
private int roundUnit = Calendar.SECOND;
private int roundValue = 1;
private boolean useLocalTime = false;
private long callTimeout;
private Context context;
private SinkCounter sinkCounter;
private volatile int idleTimeout;
private Clock clock;
private FileSystem mockFs;
private HDFSWriter mockWriter;
private final Object sfWritersLock = new Object();
private long retryInterval;
private int tryCount;
private PrivilegedExecutor privExecutor;
/*
* Extended Java LinkedHashMap for open file handle LRU queue.
* We want to clear the oldest file handle if there are too many open ones.
*/
private static class WriterLinkedHashMap
extends LinkedHashMap<String, BucketWriter> {
private final int maxOpenFiles;
public WriterLinkedHashMap(int maxOpenFiles) {
super(16, 0.75f, true); // stock initial capacity/load, access ordering
this.maxOpenFiles = maxOpenFiles;
}
@Override
protected boolean removeEldestEntry(Entry<String, BucketWriter> eldest) {
if (size() > maxOpenFiles) {
// If we have more that max open files, then close the last one and
// return true
try {
eldest.getValue().close();
} catch (InterruptedException e) {
LOG.warn(eldest.getKey().toString(), e);
Thread.currentThread().interrupt();
}
return true;
} else {
return false;
}
}
}
public HDFSEventSink() {
this(new HDFSWriterFactory());
}
public HDFSEventSink(HDFSWriterFactory writerFactory) {
this.writerFactory = writerFactory;
}
@VisibleForTesting
Map<String, BucketWriter> getSfWriters() {
return sfWriters;
}
// read configuration and setup thresholds
@Override
public void configure(Context context) {
this.context = context;
filePath = Preconditions.checkNotNull(
context.getString("hdfs.path"), "hdfs.path is required");
fileName = context.getString("hdfs.filePrefix", defaultFileName);
this.suffix = context.getString("hdfs.fileSuffix", defaultSuffix);
inUsePrefix = context.getString("hdfs.inUsePrefix", defaultInUsePrefix);
boolean emptyInUseSuffix = context.getBoolean("hdfs.emptyInUseSuffix", false);
if (emptyInUseSuffix) {
inUseSuffix = "";
String tmpInUseSuffix = context.getString(IN_USE_SUFFIX_PARAM_NAME);
if (tmpInUseSuffix != null) {
LOG.warn("Ignoring parameter " + IN_USE_SUFFIX_PARAM_NAME + " for hdfs sink: " + getName());
}
} else {
inUseSuffix = context.getString(IN_USE_SUFFIX_PARAM_NAME, defaultInUseSuffix);
}
String tzName = context.getString("hdfs.timeZone");
timeZone = tzName == null ? null : TimeZone.getTimeZone(tzName);
rollInterval = context.getLong("hdfs.rollInterval", defaultRollInterval);
rollSize = context.getLong("hdfs.rollSize", defaultRollSize);
rollCount = context.getLong("hdfs.rollCount", defaultRollCount);
batchSize = context.getLong("hdfs.batchSize", defaultBatchSize);
idleTimeout = context.getInteger("hdfs.idleTimeout", 0);
String codecName = context.getString("hdfs.codeC");
fileType = context.getString("hdfs.fileType", defaultFileType);
maxOpenFiles = context.getInteger("hdfs.maxOpenFiles", defaultMaxOpenFiles);
callTimeout = context.getLong("hdfs.callTimeout", defaultCallTimeout);
threadsPoolSize = context.getInteger("hdfs.threadsPoolSize",
defaultThreadPoolSize);
rollTimerPoolSize = context.getInteger("hdfs.rollTimerPoolSize",
defaultRollTimerPoolSize);
String kerbConfPrincipal = context.getString("hdfs.kerberosPrincipal");
String kerbKeytab = context.getString("hdfs.kerberosKeytab");
String proxyUser = context.getString("hdfs.proxyUser");
tryCount = context.getInteger("hdfs.closeTries", defaultTryCount);
if (tryCount <= 0) {
LOG.warn("Retry count value : " + tryCount + " is not " +
"valid. The sink will try to close the file until the file " +
"is eventually closed.");
tryCount = defaultTryCount;
}
retryInterval = context.getLong("hdfs.retryInterval", defaultRetryInterval);
if (retryInterval <= 0) {
LOG.warn("Retry Interval value: " + retryInterval + " is not " +
"valid. If the first close of a file fails, " +
"it may remain open and will not be renamed.");
tryCount = 1;
}
Preconditions.checkArgument(batchSize > 0, "batchSize must be greater than 0");
if (codecName == null) {
codeC = null;
compType = CompressionType.NONE;
} else {
codeC = getCodec(codecName);
// TODO : set proper compression type
compType = CompressionType.BLOCK;
}
// Do not allow user to set fileType DataStream with codeC together
// To prevent output file with compress extension (like .snappy)
if (fileType.equalsIgnoreCase(HDFSWriterFactory.DataStreamType) && codecName != null) {
throw new IllegalArgumentException("fileType: " + fileType +
" which does NOT support compressed output. Please don't set codeC" +
" or change the fileType if compressed output is desired.");
}
if (fileType.equalsIgnoreCase(HDFSWriterFactory.CompStreamType)) {
Preconditions.checkNotNull(codeC, "It's essential to set compress codec"
+ " when fileType is: " + fileType);
}
// get the appropriate executor
this.privExecutor = FlumeAuthenticationUtil.getAuthenticator(
kerbConfPrincipal, kerbKeytab).proxyAs(proxyUser);
needRounding = context.getBoolean("hdfs.round", false);
if (needRounding) {
String unit = context.getString("hdfs.roundUnit", "second");
if (unit.equalsIgnoreCase("hour")) {
this.roundUnit = Calendar.HOUR_OF_DAY;
} else if (unit.equalsIgnoreCase("minute")) {
this.roundUnit = Calendar.MINUTE;
} else if (unit.equalsIgnoreCase("second")) {
this.roundUnit = Calendar.SECOND;
} else {
LOG.warn("Rounding unit is not valid, please set one of" +
"minute, hour, or second. Rounding will be disabled");
needRounding = false;
}
this.roundValue = context.getInteger("hdfs.roundValue", 1);
if (roundUnit == Calendar.SECOND || roundUnit == Calendar.MINUTE) {
Preconditions.checkArgument(roundValue > 0 && roundValue <= 60,
"Round value" +
"must be > 0 and <= 60");
} else if (roundUnit == Calendar.HOUR_OF_DAY) {
Preconditions.checkArgument(roundValue > 0 && roundValue <= 24,
"Round value" +
"must be > 0 and <= 24");
}
}
this.useLocalTime = context.getBoolean("hdfs.useLocalTimeStamp", false);
if (useLocalTime) {
clock = new SystemClock();
}
if (sinkCounter == null) {
sinkCounter = new SinkCounter(getName());
}
}
private static boolean codecMatches(Class<? extends CompressionCodec> cls, String codecName) {
String simpleName = cls.getSimpleName();
if (cls.getName().equals(codecName) || simpleName.equalsIgnoreCase(codecName)) {
return true;
}
if (simpleName.endsWith("Codec")) {
String prefix = simpleName.substring(0, simpleName.length() - "Codec".length());
if (prefix.equalsIgnoreCase(codecName)) {
return true;
}
}
return false;
}
@VisibleForTesting
static CompressionCodec getCodec(String codecName) {
Configuration conf = new Configuration();
List<Class<? extends CompressionCodec>> codecs = CompressionCodecFactory.getCodecClasses(conf);
// Wish we could base this on DefaultCodec but appears not all codec's
// extend DefaultCodec(Lzo)
CompressionCodec codec = null;
ArrayList<String> codecStrs = new ArrayList<String>();
codecStrs.add("None");
for (Class<? extends CompressionCodec> cls : codecs) {
codecStrs.add(cls.getSimpleName());
if (codecMatches(cls, codecName)) {
try {
codec = cls.newInstance();
} catch (InstantiationException e) {
LOG.error("Unable to instantiate " + cls + " class");
} catch (IllegalAccessException e) {
LOG.error("Unable to access " + cls + " class");
}
}
}
if (codec == null) {
if (!codecName.equalsIgnoreCase("None")) {
throw new IllegalArgumentException("Unsupported compression codec "
+ codecName + ". Please choose from: " + codecStrs);
}
} else if (codec instanceof org.apache.hadoop.conf.Configurable) {
// Must check instanceof codec as BZip2Codec doesn't inherit Configurable
// Must set the configuration for Configurable objects that may or do use
// native libs
((org.apache.hadoop.conf.Configurable) codec).setConf(conf);
}
return codec;
}
/**
* Pull events out of channel and send it to HDFS. Take at most batchSize
* events per Transaction. Find the corresponding bucket for the event.
* Ensure the file is open. Serialize the data and write it to the file on
* HDFS. <br/>
* This method is not thread safe.
*/
public Status process() throws EventDeliveryException {
Channel channel = getChannel();
Transaction transaction = channel.getTransaction();
transaction.begin();
try {
Set<BucketWriter> writers = new LinkedHashSet<>();
int txnEventCount = 0;
for (txnEventCount = 0; txnEventCount < batchSize; txnEventCount++) {
Event event = channel.take();
if (event == null) {
break;
}
// reconstruct the path name by substituting place holders
String realPath = BucketPath.escapeString(filePath, event.getHeaders(),
timeZone, needRounding, roundUnit, roundValue, useLocalTime);
String realName = BucketPath.escapeString(fileName, event.getHeaders(),
timeZone, needRounding, roundUnit, roundValue, useLocalTime);
String lookupPath = realPath + DIRECTORY_DELIMITER + realName;
BucketWriter bucketWriter;
HDFSWriter hdfsWriter = null;
// Callback to remove the reference to the bucket writer from the
// sfWriters map so that all buffers used by the HDFS file
// handles are garbage collected.
WriterCallback closeCallback = new WriterCallback() {
@Override
public void run(String bucketPath) {
LOG.info("Writer callback called.");
synchronized (sfWritersLock) {
sfWriters.remove(bucketPath);
}
}
};
synchronized (sfWritersLock) {
bucketWriter = sfWriters.get(lookupPath);
// we haven't seen this file yet, so open it and cache the handle
if (bucketWriter == null) {
hdfsWriter = writerFactory.getWriter(fileType);
bucketWriter = initializeBucketWriter(realPath, realName,
lookupPath, hdfsWriter, closeCallback);
sfWriters.put(lookupPath, bucketWriter);
}
}
// Write the data to HDFS
try {
bucketWriter.append(event);
} catch (BucketClosedException ex) {
LOG.info("Bucket was closed while trying to append, " +
"reinitializing bucket and writing event.");
hdfsWriter = writerFactory.getWriter(fileType);
bucketWriter = initializeBucketWriter(realPath, realName,
lookupPath, hdfsWriter, closeCallback);
synchronized (sfWritersLock) {
sfWriters.put(lookupPath, bucketWriter);
}
bucketWriter.append(event);
}
// track the buckets getting written in this transaction
if (!writers.contains(bucketWriter)) {
writers.add(bucketWriter);
}
}
if (txnEventCount == 0) {
sinkCounter.incrementBatchEmptyCount();
} else if (txnEventCount == batchSize) {
sinkCounter.incrementBatchCompleteCount();
} else {
sinkCounter.incrementBatchUnderflowCount();
}
// flush all pending buckets before committing the transaction
for (BucketWriter bucketWriter : writers) {
bucketWriter.flush();
}
transaction.commit();
if (txnEventCount < 1) {
return Status.BACKOFF;
} else {
sinkCounter.addToEventDrainSuccessCount(txnEventCount);
return Status.READY;
}
} catch (IOException eIO) {
transaction.rollback();
LOG.warn("HDFS IO error", eIO);
sinkCounter.incrementEventWriteFail();
return Status.BACKOFF;
} catch (Throwable th) {
transaction.rollback();
LOG.error("process failed", th);
sinkCounter.incrementEventWriteOrChannelFail(th);
if (th instanceof Error) {
throw (Error) th;
} else {
throw new EventDeliveryException(th);
}
} finally {
transaction.close();
}
}
@VisibleForTesting
BucketWriter initializeBucketWriter(String realPath,
String realName, String lookupPath, HDFSWriter hdfsWriter,
WriterCallback closeCallback) {
HDFSWriter actualHdfsWriter = mockFs == null ? hdfsWriter : mockWriter;
BucketWriter bucketWriter = new BucketWriter(rollInterval,
rollSize, rollCount,
batchSize, context, realPath, realName, inUsePrefix, inUseSuffix,
suffix, codeC, compType, actualHdfsWriter, timedRollerPool,
privExecutor, sinkCounter, idleTimeout, closeCallback,
lookupPath, callTimeout, callTimeoutPool, retryInterval,
tryCount);
if (mockFs != null) {
bucketWriter.setFileSystem(mockFs);
}
return bucketWriter;
}
@Override
public void stop() {
// do not constrain close() calls with a timeout
synchronized (sfWritersLock) {
for (Entry<String, BucketWriter> entry : sfWriters.entrySet()) {
LOG.info("Closing {}", entry.getKey());
try {
entry.getValue().close(false, true);
} catch (Exception ex) {
LOG.warn("Exception while closing " + entry.getKey() + ". " +
"Exception follows.", ex);
if (ex instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
}
}
}
// shut down all our thread pools
ExecutorService[] toShutdown = { callTimeoutPool, timedRollerPool };
for (ExecutorService execService : toShutdown) {
execService.shutdown();
try {
while (execService.isTerminated() == false) {
execService.awaitTermination(
Math.max(defaultCallTimeout, callTimeout), TimeUnit.MILLISECONDS);
}
} catch (InterruptedException ex) {
LOG.warn("shutdown interrupted on " + execService, ex);
}
}
callTimeoutPool = null;
timedRollerPool = null;
synchronized (sfWritersLock) {
sfWriters.clear();
sfWriters = null;
}
sinkCounter.stop();
super.stop();
}
@Override
public void start() {
String timeoutName = "hdfs-" + getName() + "-call-runner-%d";
callTimeoutPool = Executors.newFixedThreadPool(threadsPoolSize,
new ThreadFactoryBuilder().setNameFormat(timeoutName).build());
String rollerName = "hdfs-" + getName() + "-roll-timer-%d";
timedRollerPool = Executors.newScheduledThreadPool(rollTimerPoolSize,
new ThreadFactoryBuilder().setNameFormat(rollerName).build());
this.sfWriters = new WriterLinkedHashMap(maxOpenFiles);
sinkCounter.start();
super.start();
}
@Override
public String toString() {
return "{ Sink type:" + getClass().getSimpleName() + ", name:" + getName() +
" }";
}
@VisibleForTesting
void setBucketClock(Clock clock) {
BucketPath.setClock(clock);
}
@VisibleForTesting
void setMockFs(FileSystem mockFs) {
this.mockFs = mockFs;
}
@VisibleForTesting
void setMockWriter(HDFSWriter writer) {
this.mockWriter = writer;
}
@VisibleForTesting
int getTryCount() {
return tryCount;
}
@Override
public long getBatchSize() {
return batchSize;
}
}
| 9,817 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink/hdfs/KerberosUser.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.flume.sink.hdfs;
/**
* Simple Pair class used to define a unique (principal, keyTab) combination.
*/
public class KerberosUser {
private final String principal;
private final String keyTab;
public KerberosUser(String principal, String keyTab) {
this.principal = principal;
this.keyTab = keyTab;
}
public String getPrincipal() {
return principal;
}
public String getKeyTab() {
return keyTab;
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final KerberosUser other = (KerberosUser) obj;
if ((this.principal == null) ?
(other.principal != null) :
!this.principal.equals(other.principal)) {
return false;
}
if ((this.keyTab == null) ? (other.keyTab != null) : !this.keyTab.equals(other.keyTab)) {
return false;
}
return true;
}
@Override
public int hashCode() {
int hash = 7;
hash = 41 * hash + (this.principal != null ? this.principal.hashCode() : 0);
hash = 41 * hash + (this.keyTab != null ? this.keyTab.hashCode() : 0);
return hash;
}
@Override
public String toString() {
return "{ principal: " + principal + ", keytab: " + keyTab + " }";
}
}
| 9,818 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink/hdfs/SequenceFileSerializerFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hdfs;
import com.google.common.base.Preconditions;
import org.apache.flume.Context;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SequenceFileSerializerFactory {
private static final Logger logger =
LoggerFactory.getLogger(SequenceFileSerializerFactory.class);
/**
* {@link Context} prefix
*/
static final String CTX_PREFIX = "writeFormat.";
@SuppressWarnings("unchecked")
static SequenceFileSerializer getSerializer(String formatType,
Context context) {
Preconditions.checkNotNull(formatType,
"serialize type must not be null");
// try to find builder class in enum of known formatters
SequenceFileSerializerType type;
try {
type = SequenceFileSerializerType.valueOf(formatType);
} catch (IllegalArgumentException e) {
logger.debug("Not in enum, loading builder class: {}", formatType);
type = SequenceFileSerializerType.Other;
}
Class<? extends SequenceFileSerializer.Builder> builderClass =
type.getBuilderClass();
// handle the case where they have specified their own builder in the config
if (builderClass == null) {
try {
Class c = Class.forName(formatType);
if (c != null && SequenceFileSerializer.Builder.class.isAssignableFrom(c)) {
builderClass = (Class<? extends SequenceFileSerializer.Builder>) c;
} else {
logger.error("Unable to instantiate Builder from {}", formatType);
return null;
}
} catch (ClassNotFoundException ex) {
logger.error("Class not found: " + formatType, ex);
return null;
} catch (ClassCastException ex) {
logger.error("Class does not extend " +
SequenceFileSerializer.Builder.class.getCanonicalName() + ": " +
formatType, ex);
return null;
}
}
// build the builder
SequenceFileSerializer.Builder builder;
try {
builder = builderClass.newInstance();
} catch (InstantiationException ex) {
logger.error("Cannot instantiate builder: " + formatType, ex);
return null;
} catch (IllegalAccessException ex) {
logger.error("Cannot instantiate builder: " + formatType, ex);
return null;
}
return builder.build(context);
}
}
| 9,819 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink/hdfs/HDFSTextSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hdfs;
import java.util.Collections;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.LongWritable;
public class HDFSTextSerializer implements SequenceFileSerializer {
private Text makeText(Event e) {
Text textObject = new Text();
textObject.set(e.getBody(), 0, e.getBody().length);
return textObject;
}
@Override
public Class<LongWritable> getKeyClass() {
return LongWritable.class;
}
@Override
public Class<Text> getValueClass() {
return Text.class;
}
@Override
public Iterable<Record> serialize(Event e) {
Object key = getKey(e);
Object value = getValue(e);
return Collections.singletonList(new Record(key, value));
}
private Object getKey(Event e) {
// Write the data to HDFS
String timestamp = e.getHeaders().get("timestamp");
long eventStamp;
if (timestamp == null) {
eventStamp = System.currentTimeMillis();
} else {
eventStamp = Long.valueOf(timestamp);
}
return new LongWritable(eventStamp);
}
private Object getValue(Event e) {
return makeText(e);
}
public static class Builder implements SequenceFileSerializer.Builder {
@Override
public SequenceFileSerializer build(Context context) {
return new HDFSTextSerializer();
}
}
}
| 9,820 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink/hdfs/AbstractHDFSWriter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hdfs;
import com.google.common.base.Preconditions;
import org.apache.flume.Context;
import org.apache.flume.FlumeException;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.OutputStream;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public abstract class AbstractHDFSWriter implements HDFSWriter {
private static final Logger logger =
LoggerFactory.getLogger(AbstractHDFSWriter.class);
private FSDataOutputStream outputStream;
private FileSystem fs;
private Path destPath;
private Method refGetNumCurrentReplicas = null;
private Method refGetDefaultReplication = null;
private Method refHflushOrSync = null;
private Integer configuredMinReplicas = null;
private Integer numberOfCloseRetries = null;
private long timeBetweenCloseRetries = Long.MAX_VALUE;
static final Object[] NO_ARGS = new Object[]{};
@Override
public void configure(Context context) {
configuredMinReplicas = context.getInteger("hdfs.minBlockReplicas");
if (configuredMinReplicas != null) {
Preconditions.checkArgument(configuredMinReplicas >= 0,
"hdfs.minBlockReplicas must be greater than or equal to 0");
}
numberOfCloseRetries = context.getInteger("hdfs.closeTries", 1) - 1;
if (numberOfCloseRetries > 1) {
try {
//hdfs.callTimeout is deprecated from 1.9
timeBetweenCloseRetries = context.getLong("hdfs.callTimeout", 30000L);
} catch (NumberFormatException e) {
logger.warn("hdfs.callTimeout can not be parsed to a long: " +
context.getLong("hdfs.callTimeout"));
}
timeBetweenCloseRetries = Math.max(timeBetweenCloseRetries / numberOfCloseRetries, 1000);
}
}
/**
* Contract for subclasses: Call registerCurrentStream() on open,
* unregisterCurrentStream() on close, and the base class takes care of the
* rest.
* @return
*/
@Override
public boolean isUnderReplicated() {
try {
int numBlocks = getNumCurrentReplicas();
if (numBlocks == -1) {
return false;
}
int desiredBlocks;
if (configuredMinReplicas != null) {
desiredBlocks = configuredMinReplicas;
} else {
desiredBlocks = getFsDesiredReplication();
}
return numBlocks < desiredBlocks;
} catch (IllegalAccessException e) {
logger.error("Unexpected error while checking replication factor", e);
} catch (InvocationTargetException e) {
logger.error("Unexpected error while checking replication factor", e);
} catch (IllegalArgumentException e) {
logger.error("Unexpected error while checking replication factor", e);
}
return false;
}
protected void registerCurrentStream(FSDataOutputStream outputStream,
FileSystem fs, Path destPath) {
Preconditions.checkNotNull(outputStream, "outputStream must not be null");
Preconditions.checkNotNull(fs, "fs must not be null");
Preconditions.checkNotNull(destPath, "destPath must not be null");
this.outputStream = outputStream;
this.fs = fs;
this.destPath = destPath;
this.refGetNumCurrentReplicas = reflectGetNumCurrentReplicas(outputStream);
this.refGetDefaultReplication = reflectGetDefaultReplication(fs);
this.refHflushOrSync = reflectHflushOrSync(outputStream);
}
protected void unregisterCurrentStream() {
this.outputStream = null;
this.fs = null;
this.destPath = null;
this.refGetNumCurrentReplicas = null;
this.refGetDefaultReplication = null;
}
public int getFsDesiredReplication() {
short replication = 0;
if (fs != null && destPath != null) {
if (refGetDefaultReplication != null) {
try {
replication = (Short) refGetDefaultReplication.invoke(fs, destPath);
} catch (IllegalAccessException e) {
logger.warn("Unexpected error calling getDefaultReplication(Path)", e);
} catch (InvocationTargetException e) {
logger.warn("Unexpected error calling getDefaultReplication(Path)", e);
}
} else {
// will not work on Federated HDFS (see HADOOP-8014)
replication = fs.getDefaultReplication();
}
}
return replication;
}
/**
* This method gets the datanode replication count for the current open file.
*
* If the pipeline isn't started yet or is empty, you will get the default
* replication factor.
*
* <p/>If this function returns -1, it means you
* are not properly running with the HDFS-826 patch.
* @throws InvocationTargetException
* @throws IllegalAccessException
* @throws IllegalArgumentException
*/
public int getNumCurrentReplicas()
throws IllegalArgumentException, IllegalAccessException,
InvocationTargetException {
if (refGetNumCurrentReplicas != null && outputStream != null) {
OutputStream dfsOutputStream = outputStream.getWrappedStream();
if (dfsOutputStream != null) {
Object repl = refGetNumCurrentReplicas.invoke(dfsOutputStream, NO_ARGS);
if (repl instanceof Integer) {
return ((Integer)repl).intValue();
}
}
}
return -1;
}
/**
* Find the 'getNumCurrentReplicas' on the passed <code>os</code> stream.
* @return Method or null.
*/
private Method reflectGetNumCurrentReplicas(FSDataOutputStream os) {
Method m = null;
if (os != null) {
Class<? extends OutputStream> wrappedStreamClass = os.getWrappedStream()
.getClass();
try {
m = wrappedStreamClass.getDeclaredMethod("getNumCurrentReplicas",
new Class<?>[] {});
m.setAccessible(true);
} catch (NoSuchMethodException e) {
logger.info("FileSystem's output stream doesn't support"
+ " getNumCurrentReplicas; --HDFS-826 not available; fsOut="
+ wrappedStreamClass.getName() + "; err=" + e);
} catch (SecurityException e) {
logger.info("Doesn't have access to getNumCurrentReplicas on "
+ "FileSystems's output stream --HDFS-826 not available; fsOut="
+ wrappedStreamClass.getName(), e);
m = null; // could happen on setAccessible()
}
}
if (m != null) {
logger.debug("Using getNumCurrentReplicas--HDFS-826");
}
return m;
}
/**
* Find the 'getDefaultReplication' method on the passed <code>fs</code>
* FileSystem that takes a Path argument.
* @return Method or null.
*/
private Method reflectGetDefaultReplication(FileSystem fileSystem) {
Method m = null;
if (fileSystem != null) {
Class<?> fsClass = fileSystem.getClass();
try {
m = fsClass.getMethod("getDefaultReplication",
new Class<?>[] { Path.class });
} catch (NoSuchMethodException e) {
logger.debug("FileSystem implementation doesn't support"
+ " getDefaultReplication(Path); -- HADOOP-8014 not available; " +
"className = " + fsClass.getName() + "; err = " + e);
} catch (SecurityException e) {
logger.debug("No access to getDefaultReplication(Path) on "
+ "FileSystem implementation -- HADOOP-8014 not available; " +
"className = " + fsClass.getName() + "; err = " + e);
}
}
if (m != null) {
logger.debug("Using FileSystem.getDefaultReplication(Path) from " +
"HADOOP-8014");
}
return m;
}
private Method reflectHflushOrSync(FSDataOutputStream os) {
Method m = null;
if (os != null) {
Class<?> fsDataOutputStreamClass = os.getClass();
try {
m = fsDataOutputStreamClass.getMethod("hflush");
} catch (NoSuchMethodException ex) {
logger.debug("HFlush not found. Will use sync() instead");
try {
m = fsDataOutputStreamClass.getMethod("sync");
} catch (Exception ex1) {
String msg = "Neither hflush not sync were found. That seems to be " +
"a problem!";
logger.error(msg);
throw new FlumeException(msg, ex1);
}
}
}
return m;
}
/**
* If hflush is available in this version of HDFS, then this method calls
* hflush, else it calls sync.
* @param os - The stream to flush/sync
* @throws IOException
*/
protected void hflushOrSync(FSDataOutputStream os) throws IOException {
try {
// At this point the refHflushOrSync cannot be null,
// since register method would have thrown if it was.
this.refHflushOrSync.invoke(os);
} catch (InvocationTargetException e) {
String msg = "Error while trying to hflushOrSync!";
logger.error(msg);
Throwable cause = e.getCause();
if (cause != null && cause instanceof IOException) {
throw (IOException)cause;
}
throw new FlumeException(msg, e);
} catch (Exception e) {
String msg = "Error while trying to hflushOrSync!";
logger.error(msg);
throw new FlumeException(msg, e);
}
}
}
| 9,821 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink/hdfs/SequenceFileSerializer.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hdfs;
import org.apache.flume.Context;
import org.apache.flume.Event;
public interface SequenceFileSerializer {
Class<?> getKeyClass();
Class<?> getValueClass();
/**
* Format the given event into zero, one or more SequenceFile records
*
* @param e
* event
* @return a list of records corresponding to the given event
*/
Iterable<Record> serialize(Event e);
/**
* Knows how to construct this output formatter.<br/>
* <b>Note: Implementations MUST provide a public a no-arg constructor.</b>
*/
public interface Builder {
public SequenceFileSerializer build(Context context);
}
/**
* A key-value pair making up a record in an HDFS SequenceFile
*/
public static class Record {
private final Object key;
private final Object value;
public Record(Object key, Object value) {
this.key = key;
this.value = value;
}
public Object getKey() {
return key;
}
public Object getValue() {
return value;
}
}
}
| 9,822 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink/hdfs/HDFSWriterFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hdfs;
import java.io.IOException;
public class HDFSWriterFactory {
static final String SequenceFileType = "SequenceFile";
static final String DataStreamType = "DataStream";
static final String CompStreamType = "CompressedStream";
public HDFSWriterFactory() {
}
public HDFSWriter getWriter(String fileType) throws IOException {
if (fileType.equalsIgnoreCase(SequenceFileType)) {
return new HDFSSequenceFile();
} else if (fileType.equalsIgnoreCase(DataStreamType)) {
return new HDFSDataStream();
} else if (fileType.equalsIgnoreCase(CompStreamType)) {
return new HDFSCompressedDataStream();
} else {
throw new IOException("File type " + fileType + " not supported");
}
}
}
| 9,823 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink/hdfs/BucketClosedException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hdfs;
import org.apache.flume.FlumeException;
public class BucketClosedException extends FlumeException {
private static final long serialVersionUID = -4216667125119540357L;
public BucketClosedException(String msg) {
super(msg);
}
}
| 9,824 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink/hdfs/SequenceFileSerializerType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hdfs;
public enum SequenceFileSerializerType {
Writable(HDFSWritableSerializer.Builder.class),
Text(HDFSTextSerializer.Builder.class),
Other(null);
private final Class<? extends SequenceFileSerializer.Builder> builderClass;
SequenceFileSerializerType(Class<? extends SequenceFileSerializer.Builder> builderClass) {
this.builderClass = builderClass;
}
public Class<? extends SequenceFileSerializer.Builder> getBuilderClass() {
return builderClass;
}
}
| 9,825 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink/hdfs/HDFSCompressedDataStream.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hdfs;
import java.io.IOException;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.serialization.EventSerializer;
import org.apache.flume.serialization.EventSerializerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class HDFSCompressedDataStream extends AbstractHDFSWriter {
private static final Logger logger =
LoggerFactory.getLogger(HDFSCompressedDataStream.class);
private FSDataOutputStream fsOut;
private CompressionOutputStream cmpOut;
private boolean isFinished = false;
private String serializerType;
private Context serializerContext;
private EventSerializer serializer;
private boolean useRawLocalFileSystem;
private Compressor compressor;
@Override
public void configure(Context context) {
super.configure(context);
serializerType = context.getString("serializer", "TEXT");
useRawLocalFileSystem = context.getBoolean("hdfs.useRawLocalFileSystem",
false);
serializerContext = new Context(
context.getSubProperties(EventSerializer.CTX_PREFIX));
logger.info("Serializer = " + serializerType + ", UseRawLocalFileSystem = "
+ useRawLocalFileSystem);
}
@Override
public void open(String filePath) throws IOException {
DefaultCodec defCodec = new DefaultCodec();
CompressionType cType = CompressionType.BLOCK;
open(filePath, defCodec, cType);
}
@Override
public void open(String filePath, CompressionCodec codec,
CompressionType cType) throws IOException {
Configuration conf = new Configuration();
Path dstPath = new Path(filePath);
FileSystem hdfs = dstPath.getFileSystem(conf);
if (useRawLocalFileSystem) {
if (hdfs instanceof LocalFileSystem) {
hdfs = ((LocalFileSystem)hdfs).getRaw();
} else {
logger.warn("useRawLocalFileSystem is set to true but file system " +
"is not of type LocalFileSystem: " + hdfs.getClass().getName());
}
}
boolean appending = false;
if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
fsOut = hdfs.append(dstPath);
appending = true;
} else {
fsOut = hdfs.create(dstPath);
}
if (compressor == null) {
compressor = CodecPool.getCompressor(codec, conf);
}
cmpOut = codec.createOutputStream(fsOut, compressor);
serializer = EventSerializerFactory.getInstance(serializerType,
serializerContext, cmpOut);
if (appending && !serializer.supportsReopen()) {
cmpOut.close();
serializer = null;
throw new IOException("serializer (" + serializerType
+ ") does not support append");
}
registerCurrentStream(fsOut, hdfs, dstPath);
if (appending) {
serializer.afterReopen();
} else {
serializer.afterCreate();
}
isFinished = false;
}
@Override
public void append(Event e) throws IOException {
if (isFinished) {
cmpOut.resetState();
isFinished = false;
}
serializer.write(e);
}
@Override
public void sync() throws IOException {
// We must use finish() and resetState() here -- flush() is apparently not
// supported by the compressed output streams (it's a no-op).
// Also, since resetState() writes headers, avoid calling it without an
// additional write/append operation.
// Note: There are bugs in Hadoop & JDK w/ pure-java gzip; see HADOOP-8522.
serializer.flush();
if (!isFinished) {
cmpOut.finish();
isFinished = true;
}
fsOut.flush();
hflushOrSync(this.fsOut);
}
@Override
public void close() throws IOException {
serializer.flush();
serializer.beforeClose();
if (!isFinished) {
cmpOut.finish();
isFinished = true;
}
fsOut.flush();
hflushOrSync(fsOut);
cmpOut.close();
if (compressor != null) {
CodecPool.returnCompressor(compressor);
compressor = null;
}
unregisterCurrentStream();
}
}
| 9,826 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/main/java/org/apache/flume/sink/hdfs/HDFSDataStream.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hdfs;
import java.io.IOException;
import com.google.common.annotations.VisibleForTesting;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.serialization.EventSerializer;
import org.apache.flume.serialization.EventSerializerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class HDFSDataStream extends AbstractHDFSWriter {
private static final Logger logger = LoggerFactory.getLogger(HDFSDataStream.class);
private FSDataOutputStream outStream;
private String serializerType;
private Context serializerContext;
private EventSerializer serializer;
private boolean useRawLocalFileSystem;
@Override
public void configure(Context context) {
super.configure(context);
serializerType = context.getString("serializer", "TEXT");
useRawLocalFileSystem = context.getBoolean("hdfs.useRawLocalFileSystem",
false);
serializerContext =
new Context(context.getSubProperties(EventSerializer.CTX_PREFIX));
logger.info("Serializer = " + serializerType + ", UseRawLocalFileSystem = "
+ useRawLocalFileSystem);
}
@VisibleForTesting
protected FileSystem getDfs(Configuration conf, Path dstPath) throws IOException {
return dstPath.getFileSystem(conf);
}
protected void doOpen(Configuration conf, Path dstPath, FileSystem hdfs) throws IOException {
if (useRawLocalFileSystem) {
if (hdfs instanceof LocalFileSystem) {
hdfs = ((LocalFileSystem)hdfs).getRaw();
} else {
logger.warn("useRawLocalFileSystem is set to true but file system " +
"is not of type LocalFileSystem: " + hdfs.getClass().getName());
}
}
boolean appending = false;
if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
outStream = hdfs.append(dstPath);
appending = true;
} else {
outStream = hdfs.create(dstPath);
}
serializer = EventSerializerFactory.getInstance(
serializerType, serializerContext, outStream);
if (appending && !serializer.supportsReopen()) {
outStream.close();
serializer = null;
throw new IOException("serializer (" + serializerType +
") does not support append");
}
// must call superclass to check for replication issues
registerCurrentStream(outStream, hdfs, dstPath);
if (appending) {
serializer.afterReopen();
} else {
serializer.afterCreate();
}
}
@Override
public void open(String filePath) throws IOException {
Configuration conf = new Configuration();
Path dstPath = new Path(filePath);
FileSystem hdfs = getDfs(conf, dstPath);
doOpen(conf, dstPath, hdfs);
}
@Override
public void open(String filePath, CompressionCodec codec,
CompressionType cType) throws IOException {
open(filePath);
}
@Override
public void append(Event e) throws IOException {
serializer.write(e);
}
@Override
public void sync() throws IOException {
serializer.flush();
outStream.flush();
hflushOrSync(outStream);
}
@Override
public void close() throws IOException {
serializer.flush();
serializer.beforeClose();
outStream.flush();
hflushOrSync(outStream);
outStream.close();
unregisterCurrentStream();
}
}
| 9,827 |
0 | Create_ds/flume/flume-ng-sinks/flume-http-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-http-sink/src/test/java/org/apache/flume/sink/http/TestHttpSinkIT.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.http;
import com.github.tomakehurst.wiremock.global.RequestDelaySpec;
import com.github.tomakehurst.wiremock.http.Fault;
import com.github.tomakehurst.wiremock.http.Request;
import com.github.tomakehurst.wiremock.http.RequestListener;
import com.github.tomakehurst.wiremock.http.Response;
import com.github.tomakehurst.wiremock.junit.WireMockRule;
import org.apache.flume.Context;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.Sink;
import org.apache.flume.Transaction;
import org.apache.flume.channel.MemoryChannel;
import org.apache.flume.event.SimpleEvent;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.runners.MockitoJUnitRunner;
import java.io.IOException;
import java.net.ServerSocket;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import static com.github.tomakehurst.wiremock.client.WireMock.*;
import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig;
import static com.github.tomakehurst.wiremock.stubbing.Scenario.STARTED;
import static org.apache.flume.Sink.Status;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
* Runs a set of tests against a mocked HTTP endpoint.
*/
@RunWith(MockitoJUnitRunner.class)
public class TestHttpSinkIT {
private static final int RESPONSE_TIMEOUT = 4000;
private static final int CONNECT_TIMEOUT = 2500;
private MemoryChannel channel;
private HttpSink httpSink;
private static int findFreePort() {
try (ServerSocket socket = new ServerSocket(0)) {
return socket.getLocalPort();
} catch (IOException e) {
throw new AssertionError("Can not find free port.", e);
}
}
private final int port = findFreePort();
@Before
public void setupSink() {
if (httpSink == null) {
Context httpSinkContext = new Context();
httpSinkContext.put("endpoint", "http://localhost:" + port + "/endpoint");
httpSinkContext.put("requestTimeout", "2000");
httpSinkContext.put("connectTimeout", "1500");
httpSinkContext.put("acceptHeader", "application/json");
httpSinkContext.put("contentTypeHeader", "application/json");
httpSinkContext.put("backoff.200", "false");
httpSinkContext.put("rollback.200", "false");
httpSinkContext.put("backoff.401", "false");
httpSinkContext.put("rollback.401", "false");
httpSinkContext.put("incrementMetrics.200", "true");
Context memoryChannelContext = new Context();
channel = new MemoryChannel();
channel.configure(memoryChannelContext);
channel.start();
httpSink = new HttpSink();
httpSink.configure(httpSinkContext);
httpSink.setChannel(channel);
httpSink.start();
}
}
@After
public void waitForShutdown() throws InterruptedException {
httpSink.stop();
Thread.sleep(500);
}
@Rule
public WireMockRule service = new WireMockRule(wireMockConfig().port(port));
@Test
public void ensureSuccessfulMessageDelivery() throws Exception {
service.stubFor(post(urlEqualTo("/endpoint"))
.withRequestBody(equalToJson(event("SUCCESS")))
.willReturn(aResponse().withStatus(200)));
addEventToChannel(event("SUCCESS"));
service.verify(1, postRequestedFor(urlEqualTo("/endpoint"))
.withRequestBody(equalToJson(event("SUCCESS"))));
}
@Test
public void ensureEventsResentOn503Failure() throws Exception {
String errorScenario = "Error Scenario";
service.stubFor(post(urlEqualTo("/endpoint"))
.inScenario(errorScenario)
.whenScenarioStateIs(STARTED)
.withRequestBody(equalToJson(event("TRANSIENT_ERROR")))
.willReturn(aResponse().withStatus(503))
.willSetStateTo("Error Sent"));
service.stubFor(post(urlEqualTo("/endpoint"))
.inScenario(errorScenario)
.whenScenarioStateIs("Error Sent")
.withRequestBody(equalToJson(event("TRANSIENT_ERROR")))
.willReturn(aResponse().withStatus(200)));
addEventToChannel(event("TRANSIENT_ERROR"), Status.BACKOFF);
addEventToChannel(event("TRANSIENT_ERROR"), Status.READY);
service.verify(2, postRequestedFor(urlEqualTo("/endpoint"))
.withRequestBody(equalToJson(event("TRANSIENT_ERROR"))));
}
@Test
public void ensureEventsNotResentOn401Failure() throws Exception {
String errorScenario = "Error skip scenario";
service.stubFor(post(urlEqualTo("/endpoint"))
.inScenario(errorScenario)
.whenScenarioStateIs(STARTED)
.withRequestBody(equalToJson(event("UNAUTHORIZED REQUEST")))
.willReturn(aResponse().withStatus(401)
.withHeader("Content-Type", "text/plain")
.withBody("Not allowed!"))
.willSetStateTo("Error Sent"));
service.stubFor(post(urlEqualTo("/endpoint"))
.inScenario(errorScenario)
.whenScenarioStateIs("Error Sent")
.withRequestBody(equalToJson(event("NEXT EVENT")))
.willReturn(aResponse().withStatus(200)));
addEventToChannel(event("UNAUTHORIZED REQUEST"), Status.READY);
addEventToChannel(event("NEXT EVENT"), Status.READY);
service.verify(1, postRequestedFor(urlEqualTo("/endpoint"))
.withRequestBody(equalToJson(event("UNAUTHORIZED REQUEST"))));
service.verify(1, postRequestedFor(urlEqualTo("/endpoint"))
.withRequestBody(equalToJson(event("NEXT EVENT"))));
}
@Test
public void ensureEventsResentOnNetworkFailure() throws Exception {
String errorScenario = "Error Scenario";
service.stubFor(post(urlEqualTo("/endpoint"))
.inScenario(errorScenario)
.whenScenarioStateIs(STARTED)
.withRequestBody(equalToJson(event("NETWORK_ERROR")))
.willReturn(aResponse().withFault(Fault.RANDOM_DATA_THEN_CLOSE))
.willSetStateTo("Error Sent"));
service.stubFor(post(urlEqualTo("/endpoint"))
.inScenario(errorScenario)
.whenScenarioStateIs("Error Sent")
.withRequestBody(equalToJson(event("NETWORK_ERROR")))
.willReturn(aResponse().withStatus(200)));
addEventToChannel(event("NETWORK_ERROR"), Status.BACKOFF);
addEventToChannel(event("NETWORK_ERROR"), Status.READY);
service.verify(2, postRequestedFor(urlEqualTo("/endpoint"))
.withRequestBody(equalToJson(event("NETWORK_ERROR"))));
}
@Test
public void ensureEventsResentOnConnectionTimeout() throws Exception {
final CountDownLatch firstRequestReceived = new CountDownLatch(1);
service.addSocketAcceptDelay(new RequestDelaySpec(CONNECT_TIMEOUT));
service.addMockServiceRequestListener(new RequestListener() {
@Override
public void requestReceived(Request request, Response response) {
service.addSocketAcceptDelay(new RequestDelaySpec(0));
firstRequestReceived.countDown();
}
});
service.stubFor(post(urlEqualTo("/endpoint"))
.withRequestBody(equalToJson(event("SLOW_SOCKET")))
.willReturn(aResponse().withStatus(200)));
addEventToChannel(event("SLOW_SOCKET"), Status.BACKOFF);
// wait until the socket is connected
firstRequestReceived.await(2000, TimeUnit.MILLISECONDS);
addEventToChannel(event("SLOW_SOCKET"), Status.READY);
service.verify(2, postRequestedFor(urlEqualTo("/endpoint"))
.withRequestBody(equalToJson(event("SLOW_SOCKET"))));
}
@Test
public void ensureEventsResentOnRequestTimeout() throws Exception {
String errorScenario = "Error Scenario";
service.stubFor(post(urlEqualTo("/endpoint"))
.inScenario(errorScenario)
.whenScenarioStateIs(STARTED)
.withRequestBody(equalToJson(event("SLOW_RESPONSE")))
.willReturn(aResponse().withFixedDelay(RESPONSE_TIMEOUT).withStatus(200))
.willSetStateTo("Slow Response Sent"));
service.stubFor(post(urlEqualTo("/endpoint"))
.inScenario(errorScenario)
.whenScenarioStateIs("Slow Response Sent")
.withRequestBody(equalToJson(event("SLOW_RESPONSE")))
.willReturn(aResponse().withStatus(200)));
addEventToChannel(event("SLOW_RESPONSE"), Status.BACKOFF);
addEventToChannel(event("SLOW_RESPONSE"), Status.READY);
service.verify(2, postRequestedFor(urlEqualTo("/endpoint"))
.withRequestBody(equalToJson(event("SLOW_RESPONSE"))));
}
@Test
public void ensureHttpConnectionReusedForSuccessfulRequests() throws Exception {
// we should only get one delay when establishing a connection
service.addSocketAcceptDelay(new RequestDelaySpec(1000));
service.stubFor(post(urlEqualTo("/endpoint"))
.withRequestBody(equalToJson(event("SUCCESS")))
.willReturn(aResponse().withStatus(200)));
long startTime = System.currentTimeMillis();
addEventToChannel(event("SUCCESS"), Status.READY);
addEventToChannel(event("SUCCESS"), Status.READY);
addEventToChannel(event("SUCCESS"), Status.READY);
long endTime = System.currentTimeMillis();
assertTrue("Test should have completed faster", endTime - startTime < 2500);
service.verify(3, postRequestedFor(urlEqualTo("/endpoint"))
.withRequestBody(equalToJson(event("SUCCESS"))));
}
private void addEventToChannel(String line) throws EventDeliveryException {
addEventToChannel(line, Status.READY);
}
private void addEventToChannel(String line, Status expectedStatus)
throws EventDeliveryException {
SimpleEvent event = new SimpleEvent();
event.setBody(line.getBytes());
Transaction channelTransaction = channel.getTransaction();
channelTransaction.begin();
channel.put(event);
channelTransaction.commit();
channelTransaction.close();
Sink.Status status = httpSink.process();
assertEquals(expectedStatus, status);
}
private String event(String id) {
return "{'id':'" + id + "'}";
}
}
| 9,828 |
0 | Create_ds/flume/flume-ng-sinks/flume-http-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-http-sink/src/test/java/org/apache/flume/sink/http/TestHttpSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.http;
import org.apache.flume.Channel;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.Sink.Status;
import org.apache.flume.Transaction;
import org.apache.flume.instrumentation.SinkCounter;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.runners.MockitoJUnitRunner;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@RunWith(MockitoJUnitRunner.class)
public class TestHttpSink {
private static final Integer DEFAULT_REQUEST_TIMEOUT = 5000;
private static final Integer DEFAULT_CONNECT_TIMEOUT = 5000;
private static final String DEFAULT_ACCEPT_HEADER = "text/plain";
private static final String DEFAULT_CONTENT_TYPE_HEADER = "text/plain";
@Mock
private SinkCounter sinkCounter;
@Mock
private Context configContext;
@Mock
private Channel channel;
@Mock
private Transaction transaction;
@Mock
private Event event;
@Mock
private HttpURLConnection httpURLConnection;
@Mock
private OutputStream outputStream;
@Mock
private InputStream inputStream;
@Test
public void ensureAllConfigurationOptionsRead() {
whenDefaultStringConfig();
whenDefaultBooleanConfig();
when(configContext.getInteger(eq("connectTimeout"), Mockito.anyInt())).thenReturn(1000);
when(configContext.getInteger(eq("requestTimeout"), Mockito.anyInt())).thenReturn(1000);
new HttpSink().configure(configContext);
verify(configContext).getString("endpoint", "");
verify(configContext).getInteger(eq("connectTimeout"), Mockito.anyInt());
verify(configContext).getInteger(eq("requestTimeout"), Mockito.anyInt());
verify(configContext).getString(eq("acceptHeader"), Mockito.anyString());
verify(configContext).getString(eq("contentTypeHeader"), Mockito.anyString());
verify(configContext).getBoolean("defaultBackoff", true);
verify(configContext).getBoolean("defaultRollback", true);
verify(configContext).getBoolean("defaultIncrementMetrics", false);
}
@Test(expected = IllegalArgumentException.class)
public void ensureExceptionIfEndpointUrlEmpty() {
when(configContext.getString("endpoint", "")).thenReturn("");
new HttpSink().configure(configContext);
}
@Test(expected = IllegalArgumentException.class)
public void ensureExceptionIfEndpointUrlInvalid() {
when(configContext.getString("endpoint", "")).thenReturn("invalid url");
new HttpSink().configure(configContext);
}
@Test(expected = IllegalArgumentException.class)
public void ensureExceptionIfConnectTimeoutNegative() {
whenDefaultStringConfig();
when(configContext.getInteger("connectTimeout", 1000)).thenReturn(-1000);
when(configContext.getInteger(eq("requestTimeout"), Mockito.anyInt())).thenReturn(1000);
new HttpSink().configure(configContext);
}
@Test
public void ensureDefaultConnectTimeoutCorrect() {
whenDefaultStringConfig();
when(configContext.getInteger("connectTimeout", DEFAULT_CONNECT_TIMEOUT)).thenReturn(1000);
when(configContext.getInteger(eq("requestTimeout"), Mockito.anyInt())).thenReturn(1000);
new HttpSink().configure(configContext);
verify(configContext).getInteger("connectTimeout", DEFAULT_CONNECT_TIMEOUT);
}
@Test(expected = IllegalArgumentException.class)
public void ensureExceptionIfRequestTimeoutNegative() {
whenDefaultStringConfig();
when(configContext.getInteger("requestTimeout", 1000)).thenReturn(-1000);
when(configContext.getInteger(eq("connectTimeout"), Mockito.anyInt())).thenReturn(1000);
new HttpSink().configure(configContext);
}
@Test
public void ensureDefaultRequestTimeoutCorrect() {
whenDefaultStringConfig();
when(configContext.getInteger("requestTimeout", DEFAULT_REQUEST_TIMEOUT)).thenReturn(1000);
when(configContext.getInteger(eq("connectTimeout"), Mockito.anyInt())).thenReturn(1000);
new HttpSink().configure(configContext);
verify(configContext).getInteger("requestTimeout", DEFAULT_REQUEST_TIMEOUT);
}
@Test
public void ensureDefaultAcceptHeaderCorrect() {
whenDefaultTimeouts();
whenDefaultStringConfig();
new HttpSink().configure(configContext);
verify(configContext).getString("acceptHeader", DEFAULT_ACCEPT_HEADER);
}
@Test
public void ensureDefaultContentTypeHeaderCorrect() {
whenDefaultTimeouts();
whenDefaultStringConfig();
new HttpSink().configure(configContext);
verify(configContext).getString("contentTypeHeader", DEFAULT_CONTENT_TYPE_HEADER);
}
@Test
public void ensureBackoffOnNullEvent() throws Exception {
when(channel.take()).thenReturn(null);
executeWithMocks(true);
}
@Test
public void ensureBackoffOnNullEventBody() throws Exception {
when(channel.take()).thenReturn(event);
when(event.getBody()).thenReturn(null);
executeWithMocks(true);
}
@Test
public void ensureBackoffOnEmptyEvent() throws Exception {
when(channel.take()).thenReturn(event);
when(event.getBody()).thenReturn(new byte[]{});
executeWithMocks(true);
}
@Test
public void ensureRollbackBackoffAndIncrementMetricsIfConfigured() throws Exception {
when(channel.take()).thenReturn(event);
when(event.getBody()).thenReturn("something".getBytes());
Context context = new Context();
context.put("defaultRollback", "true");
context.put("defaultBackoff", "true");
context.put("defaultIncrementMetrics", "true");
executeWithMocks(false, Status.BACKOFF, true, true, context, HttpURLConnection.HTTP_OK);
}
@Test
public void ensureCommitReadyAndNoIncrementMetricsIfConfigured() throws Exception {
when(channel.take()).thenReturn(event);
when(event.getBody()).thenReturn("something".getBytes());
Context context = new Context();
context.put("defaultRollback", "false");
context.put("defaultBackoff", "false");
context.put("defaultIncrementMetrics", "false");
executeWithMocks(true, Status.READY, false, false, context, HttpURLConnection.HTTP_OK);
}
@Test
public void ensureSingleStatusConfigurationCorrectlyUsed() throws Exception {
when(channel.take()).thenReturn(event);
when(event.getBody()).thenReturn("something".getBytes());
Context context = new Context();
context.put("defaultRollback", "true");
context.put("defaultBackoff", "true");
context.put("defaultIncrementMetrics", "false");
context.put("rollback.200", "false");
context.put("backoff.200", "false");
context.put("incrementMetrics.200", "true");
executeWithMocks(true, Status.READY, true, true, context, HttpURLConnection.HTTP_OK);
}
@Test
public void testErrorCounter() throws Exception {
RuntimeException exception = new RuntimeException("dummy");
when(channel.take()).thenThrow(exception);
Context context = new Context();
context.put("defaultRollback", "false");
context.put("defaultBackoff", "false");
context.put("defaultIncrementMetrics", "false");
executeWithMocks(false, Status.BACKOFF, false, false, context, HttpURLConnection.HTTP_OK);
inOrder(sinkCounter).verify(sinkCounter).incrementEventWriteOrChannelFail(exception);
}
@Test
public void ensureSingleErrorStatusConfigurationCorrectlyUsed() throws Exception {
when(channel.take()).thenReturn(event);
when(event.getBody()).thenReturn("something".getBytes());
Context context = new Context();
context.put("defaultRollback", "true");
context.put("defaultBackoff", "true");
context.put("defaultIncrementMetrics", "false");
context.put("rollback.401", "false");
context.put("backoff.401", "false");
context.put("incrementMetrics.401", "false");
executeWithMocks(true, Status.READY, false, true, context, HttpURLConnection.HTTP_UNAUTHORIZED);
}
@Test
public void ensureGroupConfigurationCorrectlyUsed() throws Exception {
when(channel.take()).thenReturn(event);
when(event.getBody()).thenReturn("something".getBytes());
Context context = new Context();
context.put("defaultRollback", "true");
context.put("defaultBackoff", "true");
context.put("defaultIncrementMetrics", "false");
context.put("rollback.2XX", "false");
context.put("backoff.2XX", "false");
context.put("incrementMetrics.2XX", "true");
executeWithMocks(true, Status.READY, true, true, context, HttpURLConnection.HTTP_OK);
executeWithMocks(true, Status.READY, true, true, context, HttpURLConnection.HTTP_NO_CONTENT);
}
@Test
public void ensureSingleStatusConfigurationOverridesGroupConfigurationCorrectly()
throws Exception {
when(channel.take()).thenReturn(event);
when(event.getBody()).thenReturn("something".getBytes());
Context context = new Context();
context.put("rollback.2XX", "false");
context.put("backoff.2XX", "false");
context.put("incrementMetrics.2XX", "true");
context.put("rollback.200", "true");
context.put("backoff.200", "true");
context.put("incrementMetrics.200", "false");
executeWithMocks(true, Status.READY, true, true, context, HttpURLConnection.HTTP_NO_CONTENT);
executeWithMocks(false, Status.BACKOFF, false, true, context, HttpURLConnection.HTTP_OK);
}
private void executeWithMocks(boolean commit) throws Exception {
Context context = new Context();
executeWithMocks(commit, Status.BACKOFF, false, false, context, HttpURLConnection.HTTP_OK);
}
private void executeWithMocks(boolean expectedCommit, Status expectedStatus,
boolean expectedIncrementSuccessMetrics,
boolean expectedIncrementAttemptMetrics,
Context context, int httpStatus)
throws Exception {
context.put("endpoint", "http://localhost:8080/endpoint");
HttpSink httpSink = new HttpSink();
httpSink.configure(context);
httpSink.setConnectionBuilder(httpSink.new ConnectionBuilder() {
@Override
public HttpURLConnection getConnection() throws IOException {
return httpURLConnection;
}
});
httpSink.setChannel(channel);
httpSink.setSinkCounter(sinkCounter);
when(channel.getTransaction()).thenReturn(transaction);
when(httpURLConnection.getOutputStream()).thenReturn(outputStream);
when(httpURLConnection.getInputStream()).thenReturn(inputStream);
when(httpURLConnection.getErrorStream()).thenReturn(inputStream);
when(httpURLConnection.getResponseCode()).thenReturn(httpStatus);
Status actualStatus = httpSink.process();
assert (actualStatus == expectedStatus);
inOrder(transaction).verify(transaction).begin();
if (expectedIncrementAttemptMetrics) {
inOrder(sinkCounter).verify(sinkCounter).incrementEventDrainAttemptCount();
}
if (expectedCommit) {
inOrder(transaction).verify(transaction).commit();
} else {
inOrder(transaction).verify(transaction).rollback();
}
if (expectedIncrementSuccessMetrics) {
inOrder(sinkCounter).verify(sinkCounter).incrementEventDrainSuccessCount();
}
inOrder(transaction).verify(transaction).close();
}
private void whenDefaultStringConfig() {
when(configContext.getString("endpoint", "")).thenReturn("http://test.abc/");
when(configContext.getString("acceptHeader", "")).thenReturn("test/accept");
when(configContext.getString("contentTypeHeader", "")).thenReturn("test/content");
}
private void whenDefaultBooleanConfig() {
when(configContext.getBoolean("defaultBackoff", true)).thenReturn(true);
when(configContext.getBoolean("defaultRollback", true)).thenReturn(true);
when(configContext.getBoolean("defaultIncrementMetrics", false)).thenReturn(true);
}
private void whenDefaultTimeouts() {
when(configContext.getInteger(eq("requestTimeout"), Mockito.anyInt())).thenReturn(1000);
when(configContext.getInteger(eq("connectTimeout"), Mockito.anyInt())).thenReturn(1000);
}
}
| 9,829 |
0 | Create_ds/flume/flume-ng-sinks/flume-http-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-http-sink/src/main/java/org/apache/flume/sink/http/HttpSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.http;
import org.apache.flume.Channel;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.Transaction;
import org.apache.flume.conf.Configurable;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.flume.sink.AbstractSink;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
/**
* Implementation of an HTTP sink. Events are POSTed to an HTTP / HTTPS
* endpoint. The error handling behaviour is configurable, and can respond
* differently depending on the response status returned by the endpoint.
*
* Rollback of the Flume transaction, and backoff can be specified globally,
* then overridden for ranges (or individual) status codes.
*/
public class HttpSink extends AbstractSink implements Configurable {
/** Class logger. */
private static final Logger LOG = LoggerFactory.getLogger(HttpSink.class);
/** Lowest valid HTTP status code. */
private static final int HTTP_STATUS_CONTINUE = 100;
/** Default setting for the connection timeout when calling endpoint. */
private static final int DEFAULT_CONNECT_TIMEOUT = 5000;
/** Default setting for the request timeout when calling endpoint. */
private static final int DEFAULT_REQUEST_TIMEOUT = 5000;
/** Default setting for the HTTP content type header. */
private static final String DEFAULT_CONTENT_TYPE = "text/plain";
/** Default setting for the HTTP accept header. */
private static final String DEFAULT_ACCEPT_HEADER = "text/plain";
/** Endpoint URL to POST events to. */
private URL endpointUrl;
/** Counter used to monitor event throughput. */
private SinkCounter sinkCounter;
/** Actual connection timeout value in use. */
private int connectTimeout = DEFAULT_CONNECT_TIMEOUT;
/** Actual request timeout value in use. */
private int requestTimeout = DEFAULT_REQUEST_TIMEOUT;
/** Actual content type header value in use. */
private String contentTypeHeader = DEFAULT_CONTENT_TYPE;
/** Actual accept header value in use. */
private String acceptHeader = DEFAULT_ACCEPT_HEADER;
/** Backoff value to use if a specific override is not defined. */
private boolean defaultBackoff;
/** Rollback value to use if a specific override is not defined. */
private boolean defaultRollback;
/** Increment metrics value to use if a specific override is not defined. */
private boolean defaultIncrementMetrics;
/**
* Holds all overrides for backoff. The key is a string of the format "500" or
* "5XX", and the value is the backoff value to use for the individual code,
* or code range.
*/
private HashMap<String, Boolean> backoffOverrides = new HashMap<>();
/**
* Holds all overrides for rollback. The key is a string of the format "500"
* or "5XX", and the value is the rollback value to use for the individual
* code, or code range.
*/
private HashMap<String, Boolean> rollbackOverrides = new HashMap<>();
/**
* Holds all overrides for increment metrics. The key is a string of the
* format "500" or "5XX", and the value is the increment metrics value to use
* for the individual code, or code range.
*/
private HashMap<String, Boolean> incrementMetricsOverrides = new HashMap<>();
/** Used to create HTTP connections to the endpoint. */
private ConnectionBuilder connectionBuilder;
@Override
public final void configure(final Context context) {
String configuredEndpoint = context.getString("endpoint", "");
LOG.info("Read endpoint URL from configuration : " + configuredEndpoint);
try {
endpointUrl = new URL(configuredEndpoint);
} catch (MalformedURLException e) {
throw new IllegalArgumentException("Endpoint URL invalid", e);
}
connectTimeout = context.getInteger("connectTimeout",
DEFAULT_CONNECT_TIMEOUT);
if (connectTimeout <= 0) {
throw new IllegalArgumentException(
"Connect timeout must be a non-zero and positive");
}
LOG.info("Using connect timeout : " + connectTimeout);
requestTimeout = context.getInteger("requestTimeout",
DEFAULT_REQUEST_TIMEOUT);
if (requestTimeout <= 0) {
throw new IllegalArgumentException(
"Request timeout must be a non-zero and positive");
}
LOG.info("Using request timeout : " + requestTimeout);
acceptHeader = context.getString("acceptHeader", DEFAULT_ACCEPT_HEADER);
LOG.info("Using Accept header value : " + acceptHeader);
contentTypeHeader = context.getString("contentTypeHeader",
DEFAULT_CONTENT_TYPE);
LOG.info("Using Content-Type header value : " + contentTypeHeader);
defaultBackoff = context.getBoolean("defaultBackoff", true);
LOG.info("Channel backoff by default is " + defaultBackoff);
defaultRollback = context.getBoolean("defaultRollback", true);
LOG.info("Transaction rollback by default is " + defaultRollback);
defaultIncrementMetrics = context.getBoolean("defaultIncrementMetrics",
false);
LOG.info("Incrementing metrics by default is " + defaultIncrementMetrics);
parseConfigOverrides("backoff", context, backoffOverrides);
parseConfigOverrides("rollback", context, rollbackOverrides);
parseConfigOverrides("incrementMetrics", context,
incrementMetricsOverrides);
if (this.sinkCounter == null) {
this.sinkCounter = new SinkCounter(this.getName());
}
connectionBuilder = new ConnectionBuilder();
}
@Override
public final void start() {
LOG.info("Starting HttpSink");
sinkCounter.start();
}
@Override
public final void stop() {
LOG.info("Stopping HttpSink");
sinkCounter.stop();
}
@Override
public final Status process() throws EventDeliveryException {
Status status = null;
OutputStream outputStream = null;
Channel ch = getChannel();
Transaction txn = ch.getTransaction();
txn.begin();
try {
Event event = ch.take();
byte[] eventBody = null;
if (event != null) {
eventBody = event.getBody();
}
if (eventBody != null && eventBody.length > 0) {
sinkCounter.incrementEventDrainAttemptCount();
LOG.debug("Sending request : " + new String(event.getBody()));
try {
HttpURLConnection connection = connectionBuilder.getConnection();
outputStream = connection.getOutputStream();
outputStream.write(eventBody);
outputStream.flush();
outputStream.close();
int httpStatusCode = connection.getResponseCode();
LOG.debug("Got status code : " + httpStatusCode);
if (httpStatusCode < HttpURLConnection.HTTP_BAD_REQUEST) {
connection.getInputStream().close();
} else {
LOG.debug("bad request");
connection.getErrorStream().close();
}
LOG.debug("Response processed and closed");
if (httpStatusCode >= HTTP_STATUS_CONTINUE) {
String httpStatusString = String.valueOf(httpStatusCode);
boolean shouldRollback = findOverrideValue(httpStatusString,
rollbackOverrides, defaultRollback);
if (shouldRollback) {
txn.rollback();
} else {
txn.commit();
}
boolean shouldBackoff = findOverrideValue(httpStatusString,
backoffOverrides, defaultBackoff);
if (shouldBackoff) {
status = Status.BACKOFF;
} else {
status = Status.READY;
}
boolean shouldIncrementMetrics = findOverrideValue(httpStatusString,
incrementMetricsOverrides, defaultIncrementMetrics);
if (shouldIncrementMetrics) {
sinkCounter.incrementEventDrainSuccessCount();
}
if (shouldRollback) {
if (shouldBackoff) {
LOG.info(String.format("Got status code %d from HTTP server."
+ " Rolled back event and backed off.", httpStatusCode));
} else {
LOG.info(String.format("Got status code %d from HTTP server."
+ " Rolled back event for retry.", httpStatusCode));
}
}
} else {
txn.rollback();
status = Status.BACKOFF;
LOG.warn("Malformed response returned from server, retrying");
}
} catch (IOException e) {
txn.rollback();
status = Status.BACKOFF;
LOG.error("Error opening connection, or request timed out", e);
sinkCounter.incrementEventWriteFail();
}
} else {
txn.commit();
status = Status.BACKOFF;
LOG.warn("Processed empty event");
}
} catch (Throwable t) {
txn.rollback();
status = Status.BACKOFF;
LOG.error("Error sending HTTP request, retrying", t);
sinkCounter.incrementEventWriteOrChannelFail(t);
// re-throw all Errors
if (t instanceof Error) {
throw (Error) t;
}
} finally {
txn.close();
if (outputStream != null) {
try {
outputStream.close();
} catch (IOException e) {
// ignore errors
}
}
}
return status;
}
/**
* Reads a set of override values from the context configuration and stores
* the results in the Map provided.
*
* @param propertyName the prefix of the config property names
* @param context the context to use to read config properties
* @param override the override Map to store results in
*/
private void parseConfigOverrides(final String propertyName,
final Context context,
final Map<String, Boolean> override) {
Map<String, String> config = context.getSubProperties(
propertyName + ".");
if (config != null) {
for (Map.Entry<String, String> value : config.entrySet()) {
LOG.info(String.format("Read %s value for status code %s as %s",
propertyName, value.getKey(), value.getValue()));
if (override.containsKey(value.getKey())) {
LOG.warn(String.format("Ignoring duplicate config value for %s.%s",
propertyName, value.getKey()));
} else {
override.put(value.getKey(), Boolean.valueOf(value.getValue()));
}
}
}
}
/**
* Queries the specified override map to find the most appropriate value. The
* most specific match is found.
*
* @param statusCode the String representation of the HTTP status code
* @param overrides the map of status code overrides
* @param defaultValue the default value to use if no override is configured
*
* @return the value of the most specific match to the given status code
*/
private boolean findOverrideValue(final String statusCode,
final HashMap<String, Boolean> overrides,
final boolean defaultValue) {
Boolean overrideValue = overrides.get(statusCode);
if (overrideValue == null) {
overrideValue = overrides.get(statusCode.substring(0, 1) + "XX");
if (overrideValue == null) {
overrideValue = defaultValue;
}
}
return overrideValue;
}
/**
* Update the connection builder.
*
* @param builder the new value
*/
final void setConnectionBuilder(final ConnectionBuilder builder) {
this.connectionBuilder = builder;
}
/**
* Update the sinkCounter.
*
* @param newSinkCounter the new value
*/
final void setSinkCounter(final SinkCounter newSinkCounter) {
this.sinkCounter = newSinkCounter;
}
/**
* Class used to allow extending the connection building functionality.
*/
class ConnectionBuilder {
/**
* Creates an HTTP connection to the configured endpoint address. This
* connection is setup for a POST request, and uses the content type and
* accept header values in the configuration.
*
* @return the connection object
* @throws IOException on any connection error
*/
public HttpURLConnection getConnection() throws IOException {
HttpURLConnection connection = (HttpURLConnection)
endpointUrl.openConnection();
connection.setRequestMethod("POST");
connection.setRequestProperty("Content-Type", contentTypeHeader);
connection.setRequestProperty("Accept", acceptHeader);
connection.setConnectTimeout(connectTimeout);
connection.setReadTimeout(requestTimeout);
connection.setDoOutput(true);
connection.setDoInput(true);
connection.connect();
return connection;
}
}
}
| 9,830 |
0 | Create_ds/flume/flume-ng-sinks/flume-http-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-http-sink/src/main/java/org/apache/flume/sink/http/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* This package provides an HTTP sink for Flume so that events can be sent out
* to a target HTTP endpoint.
*/
package org.apache.flume.sink.http;
| 9,831 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr/morphline/TestBlobDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.solr.morphline;
import com.google.common.base.Charsets;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.serialization.EventDeserializer;
import org.apache.flume.serialization.EventDeserializerFactory;
import org.apache.flume.serialization.ResettableInputStream;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.util.List;
public class TestBlobDeserializer extends Assert {
private String mini;
@Before
public void setup() {
StringBuilder sb = new StringBuilder();
sb.append("line 1\n");
sb.append("line 2\n");
mini = sb.toString();
}
@Test
public void testSimple() throws IOException {
ResettableInputStream in = new ResettableTestStringInputStream(mini);
EventDeserializer des = new BlobDeserializer(new Context(), in);
validateMiniParse(des);
}
@Test
public void testSimpleViaBuilder() throws IOException {
ResettableInputStream in = new ResettableTestStringInputStream(mini);
EventDeserializer.Builder builder = new BlobDeserializer.Builder();
EventDeserializer des = builder.build(new Context(), in);
validateMiniParse(des);
}
@Test
public void testSimpleViaFactory() throws IOException {
ResettableInputStream in = new ResettableTestStringInputStream(mini);
EventDeserializer des;
des = EventDeserializerFactory.getInstance(BlobDeserializer.Builder.class.getName(),
new Context(), in);
validateMiniParse(des);
}
@Test
public void testBatch() throws IOException {
ResettableInputStream in = new ResettableTestStringInputStream(mini);
EventDeserializer des = new BlobDeserializer(new Context(), in);
List<Event> events;
events = des.readEvents(10); // try to read more than we should have
assertEquals(1, events.size());
assertEventBodyEquals(mini, events.get(0));
des.mark();
des.close();
}
// truncation occurs at maxLineLength boundaries
@Test
public void testMaxLineLength() throws IOException {
String longLine = "abcdefghijklmnopqrstuvwxyz\n";
Context ctx = new Context();
ctx.put(BlobDeserializer.MAX_BLOB_LENGTH_KEY, "10");
ResettableInputStream in = new ResettableTestStringInputStream(longLine);
EventDeserializer des = new BlobDeserializer(ctx, in);
assertEventBodyEquals("abcdefghij", des.readEvent());
assertEventBodyEquals("klmnopqrst", des.readEvent());
assertEventBodyEquals("uvwxyz\n", des.readEvent());
assertNull(des.readEvent());
}
private void assertEventBodyEquals(String expected, Event event) {
String bodyStr = new String(event.getBody(), Charsets.UTF_8);
assertEquals(expected, bodyStr);
}
private void validateMiniParse(EventDeserializer des) throws IOException {
Event evt;
des.mark();
evt = des.readEvent();
assertEquals(new String(evt.getBody()), mini);
des.reset(); // reset!
evt = des.readEvent();
assertEquals("data should be repeated, " +
"because we reset() the stream", new String(evt.getBody()), mini);
evt = des.readEvent();
assertNull("Event should be null because there are no lines " +
"left to read", evt);
des.mark();
des.close();
}
}
| 9,832 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr/morphline/TestUUIDInterceptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.solr.morphline;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.event.SimpleEvent;
import org.junit.Assert;
import org.junit.Test;
public class TestUUIDInterceptor extends Assert {
private static final String ID = "id";
@Test
public void testBasic() throws Exception {
Context context = new Context();
context.put(UUIDInterceptor.HEADER_NAME, ID);
context.put(UUIDInterceptor.PRESERVE_EXISTING_NAME, "true");
Event event = new SimpleEvent();
assertTrue(build(context).intercept(event).getHeaders().get(ID).length() > 0);
}
@Test
public void testPreserveExisting() throws Exception {
Context context = new Context();
context.put(UUIDInterceptor.HEADER_NAME, ID);
context.put(UUIDInterceptor.PRESERVE_EXISTING_NAME, "true");
Event event = new SimpleEvent();
event.getHeaders().put(ID, "foo");
assertEquals("foo", build(context).intercept(event).getHeaders().get(ID));
}
@Test
public void testPrefix() throws Exception {
Context context = new Context();
context.put(UUIDInterceptor.HEADER_NAME, ID);
context.put(UUIDInterceptor.PREFIX_NAME, "bar#");
Event event = new SimpleEvent();
assertTrue(build(context).intercept(event).getHeaders().get(ID).startsWith("bar#"));
}
private UUIDInterceptor build(Context context) {
UUIDInterceptor.Builder builder = new UUIDInterceptor.Builder();
builder.configure(context);
return builder.build();
}
}
| 9,833 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr/morphline/TestBlobHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.solr.morphline;
import java.util.List;
import javax.servlet.http.HttpServletRequest;
import org.apache.flume.Event;
import org.apache.flume.source.http.HTTPSourceHandler;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestBlobHandler extends Assert {
private HTTPSourceHandler handler;
@Before
public void setUp() {
handler = new BlobHandler();
}
@Test
public void testSingleEvent() throws Exception {
byte[] json = "foo".getBytes("UTF-8");
HttpServletRequest req = new FlumeHttpServletRequestWrapper(json);
List<Event> deserialized = handler.getEvents(req);
assertEquals(1, deserialized.size());
Event e = deserialized.get(0);
assertEquals(0, e.getHeaders().size());
assertEquals("foo", new String(e.getBody(),"UTF-8"));
}
@Test
public void testEmptyEvent() throws Exception {
byte[] json = "".getBytes("UTF-8");
HttpServletRequest req = new FlumeHttpServletRequestWrapper(json);
List<Event> deserialized = handler.getEvents(req);
assertEquals(1, deserialized.size());
Event e = deserialized.get(0);
assertEquals(0, e.getHeaders().size());
assertEquals("", new String(e.getBody(),"UTF-8"));
}
}
| 9,834 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr/morphline/FlumeHttpServletRequestWrapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.solr.morphline;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.security.Principal;
import java.util.Collection;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Locale;
import java.util.Map;
import javax.servlet.AsyncContext;
import javax.servlet.DispatcherType;
import javax.servlet.ReadListener;
import javax.servlet.RequestDispatcher;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.ServletInputStream;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
import javax.servlet.http.HttpUpgradeHandler;
import javax.servlet.http.Part;
class FlumeHttpServletRequestWrapper implements HttpServletRequest {
private ServletInputStream stream;
private String charset;
public FlumeHttpServletRequestWrapper(final byte[] data) {
stream = new ServletInputStream() {
@Override
public boolean isFinished() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public boolean isReady() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void setReadListener(ReadListener readListener) {
throw new UnsupportedOperationException("Not supported yet.");
}
private final InputStream in = new ByteArrayInputStream(data);
@Override
public int read() throws IOException {
return in.read();
}
};
}
@Override
public String getAuthType() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Cookie[] getCookies() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public long getDateHeader(String name) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getHeader(String name) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Enumeration getHeaders(String name) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Enumeration getHeaderNames() {
return Collections.enumeration(Collections.EMPTY_LIST);
}
@Override
public int getIntHeader(String name) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getMethod() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getPathInfo() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getPathTranslated() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getContextPath() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getQueryString() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getRemoteUser() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public boolean isUserInRole(String role) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Principal getUserPrincipal() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getRequestedSessionId() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getRequestURI() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public StringBuffer getRequestURL() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getServletPath() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public HttpSession getSession(boolean create) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public HttpSession getSession() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String changeSessionId() {
return null;
}
@Override
public boolean isRequestedSessionIdValid() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public boolean isRequestedSessionIdFromCookie() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public boolean isRequestedSessionIdFromURL() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public boolean isRequestedSessionIdFromUrl() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public boolean authenticate(HttpServletResponse response) throws IOException, ServletException {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void login(String username, String password) throws ServletException {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void logout() throws ServletException {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Collection<Part> getParts() throws IOException, ServletException {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Part getPart(String name) throws IOException, ServletException {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public <T extends HttpUpgradeHandler> T upgrade(Class<T> handlerClass)
throws IOException, ServletException {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Object getAttribute(String name) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Enumeration<String> getAttributeNames() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getCharacterEncoding() {
return charset;
}
@Override
public void setCharacterEncoding(String env) throws UnsupportedEncodingException {
this.charset = env;
}
@Override
public int getContentLength() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public long getContentLengthLong() {
return 0;
}
@Override
public String getContentType() {
return null;
}
@Override
public ServletInputStream getInputStream() throws IOException {
return stream;
}
@Override
public String getParameter(String name) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Enumeration getParameterNames() {
return Collections.enumeration(Collections.EMPTY_LIST);
}
@Override
public String[] getParameterValues(String name) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Map getParameterMap() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getProtocol() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getScheme() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getServerName() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public int getServerPort() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public BufferedReader getReader() throws IOException {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getRemoteAddr() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getRemoteHost() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void setAttribute(String name, Object o) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void removeAttribute(String name) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Locale getLocale() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Enumeration<Locale> getLocales() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public boolean isSecure() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public RequestDispatcher getRequestDispatcher(String path) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getRealPath(String path) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public int getRemotePort() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getLocalName() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getLocalAddr() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public int getLocalPort() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public ServletContext getServletContext() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public AsyncContext startAsync() throws IllegalStateException {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public AsyncContext startAsync(ServletRequest servletRequest, ServletResponse servletResponse)
throws IllegalStateException {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public boolean isAsyncStarted() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public boolean isAsyncSupported() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public AsyncContext getAsyncContext() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public DispatcherType getDispatcherType() {
throw new UnsupportedOperationException("Not supported yet.");
}
}
| 9,835 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr/morphline/TestEnvironment.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.solr.morphline;
import java.net.UnknownHostException;
import org.junit.Test;
import org.kitesdk.morphline.solr.EnvironmentTest;
/** Print and verify some info about the environment in which the unit tests are running */
public class TestEnvironment extends EnvironmentTest {
@Test
public void testEnvironment() throws UnknownHostException {
super.testEnvironment();
}
}
| 9,836 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr/morphline/TestMorphlineSolrSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.solr.morphline;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.flume.Channel;
import org.apache.flume.ChannelException;
import org.apache.flume.ChannelSelector;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.Transaction;
import org.apache.flume.channel.BasicTransactionSemantics;
import org.apache.flume.channel.ChannelProcessor;
import org.apache.flume.channel.MemoryChannel;
import org.apache.flume.channel.ReplicatingChannelSelector;
import org.apache.flume.conf.Configurables;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.kitesdk.morphline.api.MorphlineContext;
import org.kitesdk.morphline.api.Record;
import org.kitesdk.morphline.base.FaultTolerance;
import org.kitesdk.morphline.base.Fields;
import org.kitesdk.morphline.solr.DocumentLoader;
import org.kitesdk.morphline.solr.SolrLocator;
import org.kitesdk.morphline.solr.SolrMorphlineContext;
import org.kitesdk.morphline.solr.SolrServerDocumentLoader;
import org.kitesdk.morphline.solr.TestEmbeddedSolrServer;
import com.codahale.metrics.MetricRegistry;
import com.google.common.base.Charsets;
import com.google.common.collect.ImmutableListMultimap;
import com.google.common.collect.ListMultimap;
import com.google.common.io.Files;
public class TestMorphlineSolrSink extends SolrTestCaseJ4 {
private EmbeddedSource source;
private SolrServer solrServer;
private MorphlineSink sink;
private Map<String,Integer> expectedRecords;
private File tmpFile;
private static final boolean TEST_WITH_EMBEDDED_SOLR_SERVER = true;
private static final String EXTERNAL_SOLR_SERVER_URL = System.getProperty("externalSolrServer");
//private static final String EXTERNAL_SOLR_SERVER_URL = "http://127.0.0.1:8983/solr";
private static final String RESOURCES_DIR = "target/test-classes";
//private static final String RESOURCES_DIR = "src/test/resources";
private static final AtomicInteger SEQ_NUM = new AtomicInteger();
private static final AtomicInteger SEQ_NUM2 = new AtomicInteger();
private static final Logger LOGGER = LoggerFactory.getLogger(TestMorphlineSolrSink.class);
@BeforeClass
public static void beforeClass() throws Exception {
initCore(
RESOURCES_DIR + "/solr/collection1/conf/solrconfig.xml",
RESOURCES_DIR + "/solr/collection1/conf/schema.xml",
RESOURCES_DIR + "/solr");
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
String path = RESOURCES_DIR + "/test-documents";
expectedRecords = new HashMap();
expectedRecords.put(path + "/sample-statuses-20120906-141433.avro", 2);
expectedRecords.put(path + "/sample-statuses-20120906-141433", 2);
expectedRecords.put(path + "/sample-statuses-20120906-141433.gz", 2);
expectedRecords.put(path + "/sample-statuses-20120906-141433.bz2", 2);
expectedRecords.put(path + "/cars.csv", 5);
expectedRecords.put(path + "/cars.csv.gz", 5);
expectedRecords.put(path + "/cars.tar.gz", 4);
expectedRecords.put(path + "/cars.tsv", 5);
expectedRecords.put(path + "/cars.ssv", 5);
final Map<String, String> context = new HashMap();
if (EXTERNAL_SOLR_SERVER_URL != null) {
throw new UnsupportedOperationException();
//solrServer = new ConcurrentUpdateSolrServer(EXTERNAL_SOLR_SERVER_URL, 2, 2);
//solrServer = new SafeConcurrentUpdateSolrServer(EXTERNAL_SOLR_SERVER_URL, 2, 2);
//solrServer = new HttpSolrServer(EXTERNAL_SOLR_SERVER_URL);
} else {
if (TEST_WITH_EMBEDDED_SOLR_SERVER) {
solrServer = new TestEmbeddedSolrServer(h.getCoreContainer(), "");
} else {
throw new RuntimeException("Not yet implemented");
//solrServer = new TestSolrServer(getSolrServer());
}
}
Map<String, String> channelContext = new HashMap();
channelContext.put("capacity", "1000000");
channelContext.put("keep-alive", "0"); // for faster tests
Channel channel = new MemoryChannel();
channel.setName(channel.getClass().getName() + SEQ_NUM.getAndIncrement());
Configurables.configure(channel, new Context(channelContext));
class MySolrSink extends MorphlineSolrSink {
public MySolrSink(MorphlineHandlerImpl indexer) {
super(indexer);
}
}
int batchSize = SEQ_NUM2.incrementAndGet() % 2 == 0 ? 100 : 1;
DocumentLoader testServer = new SolrServerDocumentLoader(solrServer, batchSize);
MorphlineContext solrMorphlineContext = new SolrMorphlineContext.Builder()
.setDocumentLoader(testServer)
.setExceptionHandler(new FaultTolerance(false, false, SolrServerException.class.getName()))
.setMetricRegistry(new MetricRegistry()).build();
MorphlineHandlerImpl impl = new MorphlineHandlerImpl();
impl.setMorphlineContext(solrMorphlineContext);
class MySolrLocator extends SolrLocator { // trick to access protected ctor
public MySolrLocator(MorphlineContext indexer) {
super(indexer);
}
}
SolrLocator locator = new MySolrLocator(solrMorphlineContext);
locator.setSolrHomeDir(testSolrHome + "/collection1");
String str1 = "SOLR_LOCATOR : " + locator.toString();
//File solrLocatorFile = new File("target/test-classes/test-morphlines/solrLocator.conf");
//String str1 = Files.toString(solrLocatorFile, Charsets.UTF_8);
File morphlineFile = new File("target/test-classes/test-morphlines/solrCellDocumentTypes.conf");
String str2 = Files.toString(morphlineFile, Charsets.UTF_8);
tmpFile = File.createTempFile("morphline", ".conf");
tmpFile.deleteOnExit();
Files.write(str1 + "\n" + str2, tmpFile, Charsets.UTF_8);
context.put("morphlineFile", tmpFile.getPath());
impl.configure(new Context(context));
sink = new MySolrSink(impl);
sink.setName(sink.getClass().getName() + SEQ_NUM.getAndIncrement());
sink.configure(new Context(context));
sink.setChannel(channel);
sink.start();
source = new EmbeddedSource(sink);
ChannelSelector rcs = new ReplicatingChannelSelector();
rcs.setChannels(Collections.singletonList(channel));
ChannelProcessor chp = new ChannelProcessor(rcs);
Context chpContext = new Context();
chpContext.put("interceptors", "uuidinterceptor");
chpContext.put("interceptors.uuidinterceptor.type", UUIDInterceptor.Builder.class.getName());
chp.configure(chpContext);
source.setChannelProcessor(chp);
deleteAllDocuments();
}
private void deleteAllDocuments() throws SolrServerException, IOException {
SolrServer s = solrServer;
s.deleteByQuery("*:*"); // delete everything!
s.commit();
}
@After
@Override
public void tearDown() throws Exception {
try {
if (source != null) {
source.stop();
source = null;
}
if (sink != null) {
sink.stop();
sink = null;
}
if (tmpFile != null) {
tmpFile.delete();
}
} finally {
solrServer = null;
expectedRecords = null;
super.tearDown();
}
}
@Test
public void testDocumentTypes() throws Exception {
String path = RESOURCES_DIR + "/test-documents";
String[] files = new String[] {
path + "/testBMPfp.txt",
path + "/boilerplate.html",
path + "/NullHeader.docx",
path + "/testWORD_various.doc",
path + "/testPDF.pdf",
path + "/testJPEG_EXIF.jpg",
path + "/testXML.xml",
// path + "/cars.csv",
// path + "/cars.tsv",
// path + "/cars.ssv",
// path + "/cars.csv.gz",
// path + "/cars.tar.gz",
path + "/sample-statuses-20120906-141433.avro",
path + "/sample-statuses-20120906-141433",
path + "/sample-statuses-20120906-141433.gz",
path + "/sample-statuses-20120906-141433.bz2",
};
testDocumentTypesInternal(files);
}
@Test
public void testDocumentTypes2() throws Exception {
String path = RESOURCES_DIR + "/test-documents";
String[] files = new String[] {
path + "/testPPT_various.ppt",
path + "/testPPT_various.pptx",
path + "/testEXCEL.xlsx",
path + "/testEXCEL.xls",
path + "/testPages.pages",
path + "/testNumbers.numbers",
path + "/testKeynote.key",
path + "/testRTFVarious.rtf",
path + "/complex.mbox",
path + "/test-outlook.msg",
path + "/testEMLX.emlx",
// path + "/testRFC822",
path + "/rsstest.rss",
// path + "/testDITA.dita",
path + "/testMP3i18n.mp3",
path + "/testAIFF.aif",
path + "/testFLAC.flac",
// path + "/testFLAC.oga",
// path + "/testVORBIS.ogg",
path + "/testMP4.m4a",
path + "/testWAV.wav",
// path + "/testWMA.wma",
path + "/testFLV.flv",
// path + "/testWMV.wmv",
path + "/testBMP.bmp",
path + "/testPNG.png",
path + "/testPSD.psd",
path + "/testSVG.svg",
path + "/testTIFF.tif",
// path + "/test-documents.7z",
// path + "/test-documents.cpio",
// path + "/test-documents.tar",
// path + "/test-documents.tbz2",
// path + "/test-documents.tgz",
// path + "/test-documents.zip",
// path + "/test-zip-of-zip.zip",
// path + "/testJAR.jar",
// path + "/testKML.kml",
// path + "/testRDF.rdf",
path + "/testTrueType.ttf",
path + "/testVISIO.vsd",
// path + "/testWAR.war",
// path + "/testWindows-x86-32.exe",
// path + "/testWINMAIL.dat",
// path + "/testWMF.wmf",
};
testDocumentTypesInternal(files);
}
@Test
public void testErrorCounters() throws Exception {
Channel channel = Mockito.mock(Channel.class);
Mockito.when(channel.take()).thenThrow(new ChannelException("dummy"));
Transaction transaction = Mockito.mock(BasicTransactionSemantics.class);
Mockito.when(channel.getTransaction()).thenReturn(transaction);
sink.setChannel(channel);
sink.process();
SinkCounter sinkCounter = (SinkCounter) Whitebox.getInternalState(sink, "sinkCounter");
assertEquals(1, sinkCounter.getChannelReadFail());
}
@Test
public void testAvroRoundTrip() throws Exception {
String file = RESOURCES_DIR + "/test-documents" + "/sample-statuses-20120906-141433.avro";
testDocumentTypesInternal(file);
QueryResponse rsp = query("*:*");
Iterator<SolrDocument> iter = rsp.getResults().iterator();
ListMultimap<String, String> expectedFieldValues;
expectedFieldValues = ImmutableListMultimap.of("id", "1234567890", "text", "sample tweet one",
"user_screen_name", "fake_user1");
assertEquals(expectedFieldValues, next(iter));
expectedFieldValues = ImmutableListMultimap.of("id", "2345678901", "text", "sample tweet two",
"user_screen_name", "fake_user2");
assertEquals(expectedFieldValues, next(iter));
assertFalse(iter.hasNext());
}
private ListMultimap<String, Object> next(Iterator<SolrDocument> iter) {
SolrDocument doc = iter.next();
Record record = toRecord(doc);
record.removeAll("_version_"); // the values of this field are unknown and internal to solr
return record.getFields();
}
private Record toRecord(SolrDocument doc) {
Record record = new Record();
for (String key : doc.keySet()) {
record.getFields().replaceValues(key, doc.getFieldValues(key));
}
return record;
}
private void testDocumentTypesInternal(String... files) throws Exception {
int numDocs = 0;
long startTime = System.currentTimeMillis();
assertEquals(numDocs, queryResultSetSize("*:*"));
// assertQ(req("*:*"), "//*[@numFound='0']");
for (int i = 0; i < 1; i++) {
for (String file : files) {
File f = new File(file);
byte[] body = Files.toByteArray(f);
Event event = EventBuilder.withBody(body);
event.getHeaders().put(Fields.ATTACHMENT_NAME, f.getName());
load(event);
Integer count = expectedRecords.get(file);
if (count != null) {
numDocs += count;
} else {
numDocs++;
}
assertEquals(numDocs, queryResultSetSize("*:*"));
}
LOGGER.trace("iter: {}", i);
}
LOGGER.trace("all done with put at {}", System.currentTimeMillis() - startTime);
assertEquals(numDocs, queryResultSetSize("*:*"));
LOGGER.trace("sink: ", sink);
}
// @Test
public void benchmarkDocumentTypes() throws Exception {
int iters = 200;
// LogManager.getLogger(getClass().getPackage().getName()).setLevel(Level.INFO);
assertEquals(0, queryResultSetSize("*:*"));
String path = RESOURCES_DIR + "/test-documents";
String[] files = new String[] {
// path + "/testBMPfp.txt",
// path + "/boilerplate.html",
// path + "/NullHeader.docx",
// path + "/testWORD_various.doc",
// path + "/testPDF.pdf",
// path + "/testJPEG_EXIF.jpg",
// path + "/testXML.xml",
// path + "/cars.csv",
// path + "/cars.csv.gz",
// path + "/cars.tar.gz",
// path + "/sample-statuses-20120906-141433.avro",
path + "/sample-statuses-20120906-141433-medium.avro",
};
List<Event> events = new ArrayList();
for (String file : files) {
File f = new File(file);
byte[] body = Files.toByteArray(f);
Event event = EventBuilder.withBody(body);
// event.getHeaders().put(Metadata.RESOURCE_NAME_KEY, f.getName());
events.add(event);
}
long startTime = System.currentTimeMillis();
for (int i = 0; i < iters; i++) {
if (i % 10000 == 0) {
LOGGER.info("iter: {}", i);
}
for (Event event : events) {
event = EventBuilder.withBody(event.getBody(), new HashMap(event.getHeaders()));
event.getHeaders().put("id", UUID.randomUUID().toString());
load(event);
}
}
float secs = (System.currentTimeMillis() - startTime) / 1000.0f;
long numDocs = queryResultSetSize("*:*");
LOGGER.info("Took secs: " + secs + ", iters/sec: " + (iters / secs));
LOGGER.info("Took secs: " + secs + ", docs/sec: " + (numDocs / secs));
LOGGER.info("Iterations: " + iters + ", numDocs: " + numDocs);
LOGGER.info("sink: ", sink);
}
private void load(Event event) throws EventDeliveryException {
source.load(event);
}
private void commit() throws SolrServerException, IOException {
solrServer.commit(false, true, true);
}
private int queryResultSetSize(String query) throws SolrServerException, IOException {
commit();
QueryResponse rsp = query(query);
LOGGER.debug("rsp: {}", rsp);
int size = rsp.getResults().size();
return size;
}
private QueryResponse query(String query) throws SolrServerException, IOException {
commit();
QueryResponse rsp = solrServer.query(new SolrQuery(query).setRows(Integer.MAX_VALUE));
LOGGER.debug("rsp: {}", rsp);
return rsp;
}
}
| 9,837 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr/morphline/TestMorphlineInterceptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.solr.morphline;
import com.google.common.base.Charsets;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Files;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.event.EventBuilder;
import org.junit.Assert;
import org.junit.Test;
import org.kitesdk.morphline.base.Fields;
import java.io.File;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class TestMorphlineInterceptor extends Assert {
private static final String RESOURCES_DIR = "target/test-classes";
@Test
public void testNoOperation() throws Exception {
Context context = new Context();
context.put(MorphlineHandlerImpl.MORPHLINE_FILE_PARAM,
RESOURCES_DIR + "/test-morphlines/noOperation.conf");
Event input = EventBuilder.withBody("foo", Charsets.UTF_8);
input.getHeaders().put("name", "nadja");
MorphlineInterceptor interceptor = build(context);
Event actual = interceptor.intercept(input);
interceptor.close();
Event expected = EventBuilder.withBody("foo".getBytes("UTF-8"),
ImmutableMap.of("name", "nadja"));
assertEqualsEvent(expected, actual);
List<Event> actualList = build(context).intercept(Collections.singletonList(input));
List<Event> expectedList = Collections.singletonList(expected);
assertEqualsEventList(expectedList, actualList);
}
@Test
public void testReadClob() throws Exception {
Context context = new Context();
context.put(MorphlineHandlerImpl.MORPHLINE_FILE_PARAM,
RESOURCES_DIR + "/test-morphlines/readClob.conf");
Event input = EventBuilder.withBody("foo", Charsets.UTF_8);
input.getHeaders().put("name", "nadja");
Event actual = build(context).intercept(input);
Event expected = EventBuilder.withBody(null,
ImmutableMap.of("name", "nadja", Fields.MESSAGE, "foo"));
assertEqualsEvent(expected, actual);
List<Event> actualList = build(context).intercept(Collections.singletonList(input));
List<Event> expectedList = Collections.singletonList(expected);
assertEqualsEventList(expectedList, actualList);
}
@Test
public void testGrokIfNotMatchDropEventRetain() throws Exception {
Context context = new Context();
context.put(MorphlineHandlerImpl.MORPHLINE_FILE_PARAM,
RESOURCES_DIR + "/test-morphlines/grokIfNotMatchDropRecord.conf");
String msg = "<164>Feb 4 10:46:14 syslog sshd[607]: Server listening on 0.0.0.0 port 22.";
Event input = EventBuilder.withBody(null, ImmutableMap.of(Fields.MESSAGE, msg));
Event actual = build(context).intercept(input);
Map<String, String> expected = new HashMap();
expected.put(Fields.MESSAGE, msg);
expected.put("syslog_pri", "164");
expected.put("syslog_timestamp", "Feb 4 10:46:14");
expected.put("syslog_hostname", "syslog");
expected.put("syslog_program", "sshd");
expected.put("syslog_pid", "607");
expected.put("syslog_message", "Server listening on 0.0.0.0 port 22.");
Event expectedEvent = EventBuilder.withBody(null, expected);
assertEqualsEvent(expectedEvent, actual);
}
@Test
/* leading XXXXX does not match regex, thus we expect the event to be dropped */
public void testGrokIfNotMatchDropEventDrop() throws Exception {
Context context = new Context();
context.put(MorphlineHandlerImpl.MORPHLINE_FILE_PARAM,
RESOURCES_DIR + "/test-morphlines/grokIfNotMatchDropRecord.conf");
String msg = "<XXXXXXXXXXXXX164>Feb 4 10:46:14 syslog sshd[607]: Server listening on 0.0.0.0" +
" port 22.";
Event input = EventBuilder.withBody(null, ImmutableMap.of(Fields.MESSAGE, msg));
Event actual = build(context).intercept(input);
assertNull(actual);
}
@Test
/** morphline says route to southpole if it's an avro file, otherwise route to northpole */
public void testIfDetectMimeTypeRouteToSouthPole() throws Exception {
Context context = new Context();
context.put(MorphlineHandlerImpl.MORPHLINE_FILE_PARAM,
RESOURCES_DIR + "/test-morphlines/ifDetectMimeType.conf");
context.put(MorphlineHandlerImpl.MORPHLINE_VARIABLE_PARAM + ".MY.MIME_TYPE", "avro/binary");
Event input = EventBuilder.withBody(Files.toByteArray(
new File(RESOURCES_DIR + "/test-documents/sample-statuses-20120906-141433.avro")));
Event actual = build(context).intercept(input);
Map<String, String> expected = new HashMap();
expected.put(Fields.ATTACHMENT_MIME_TYPE, "avro/binary");
expected.put("flume.selector.header", "goToSouthPole");
Event expectedEvent = EventBuilder.withBody(input.getBody(), expected);
assertEqualsEvent(expectedEvent, actual);
}
@Test
/** morphline says route to southpole if it's an avro file, otherwise route to northpole */
public void testIfDetectMimeTypeRouteToNorthPole() throws Exception {
Context context = new Context();
context.put(MorphlineHandlerImpl.MORPHLINE_FILE_PARAM,
RESOURCES_DIR + "/test-morphlines/ifDetectMimeType.conf");
context.put(MorphlineHandlerImpl.MORPHLINE_VARIABLE_PARAM + ".MY.MIME_TYPE", "avro/binary");
Event input = EventBuilder.withBody(
Files.toByteArray(new File(RESOURCES_DIR + "/test-documents/testPDF.pdf")));
Event actual = build(context).intercept(input);
Map<String, String> expected = new HashMap();
expected.put(Fields.ATTACHMENT_MIME_TYPE, "application/pdf");
expected.put("flume.selector.header", "goToNorthPole");
Event expectedEvent = EventBuilder.withBody(input.getBody(), expected);
assertEqualsEvent(expectedEvent, actual);
}
private MorphlineInterceptor build(Context context) {
MorphlineInterceptor.Builder builder = new MorphlineInterceptor.Builder();
builder.configure(context);
return builder.build();
}
// b/c SimpleEvent doesn't implement equals() method :-(
private void assertEqualsEvent(Event x, Event y) {
assertEquals(x.getHeaders(), y.getHeaders());
assertArrayEquals(x.getBody(), y.getBody());
}
private void assertEqualsEventList(List<Event> x, List<Event> y) {
assertEquals(x.size(), y.size());
for (int i = 0; i < x.size(); i++) {
assertEqualsEvent(x.get(i), y.get(i));
}
}
}
| 9,838 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr/morphline/ResettableTestStringInputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.solr.morphline;
import java.io.IOException;
import org.apache.flume.serialization.ResettableInputStream;
class ResettableTestStringInputStream extends ResettableInputStream {
private String str;
private int markPos = 0;
private int curPos = 0;
/**
* Warning: This test class does not handle character/byte conversion at all!
* @param str String to use for testing
*/
public ResettableTestStringInputStream(String str) {
this.str = str;
}
@Override
public int readChar() throws IOException {
throw new UnsupportedOperationException("This test class doesn't return " +
"strings!");
}
@Override
public void mark() throws IOException {
markPos = curPos;
}
@Override
public void reset() throws IOException {
curPos = markPos;
}
@Override
public void seek(long position) throws IOException {
throw new UnsupportedOperationException("Unimplemented in test class");
}
@Override
public long tell() throws IOException {
throw new UnsupportedOperationException("Unimplemented in test class");
}
@Override
public int read() throws IOException {
if (curPos >= str.length()) {
return -1;
}
return str.charAt(curPos++);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (curPos >= str.length()) {
return -1;
}
int n = 0;
while (len > 0 && curPos < str.length()) {
b[off++] = (byte) str.charAt(curPos++);
n++;
len--;
}
return n;
}
@Override
public void close() throws IOException {
// no-op
}
}
| 9,839 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/test/java/org/apache/flume/sink/solr/morphline/EmbeddedSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.solr.morphline;
import java.util.List;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.EventDrivenSource;
import org.apache.flume.Sink;
import org.apache.flume.source.AbstractSource;
class EmbeddedSource extends AbstractSource implements EventDrivenSource {
private Sink sink;
public EmbeddedSource(Sink sink) {
this.sink = sink;
}
public void load(Event event) throws EventDeliveryException {
getChannelProcessor().processEvent(event);
sink.process();
}
public void load(List<Event> events) throws EventDeliveryException {
getChannelProcessor().processEventBatch(events);
sink.process();
}
}
| 9,840 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/main/java/org/apache/flume/sink/solr | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/main/java/org/apache/flume/sink/solr/morphline/MorphlineHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.solr.morphline;
import java.io.IOException;
import org.apache.flume.Event;
import org.apache.flume.conf.Configurable;
/**
* Interface to load Flume events into Solr
*/
public interface MorphlineHandler extends Configurable {
/** Begins a transaction */
public void beginTransaction();
/** Loads the given event into Solr */
public void process(Event event);
/**
* Sends any outstanding documents to Solr and waits for a positive
* or negative ack (i.e. exception). Depending on the outcome the caller
* should then commit or rollback the current flume transaction
* correspondingly.
*
* @throws IOException
* If there is a low-level I/O error.
*/
public void commitTransaction();
/**
* Performs a rollback of all non-committed documents pending.
* <p>
* Note that this is not a true rollback as in databases. Content you have previously added to
* Solr may have already been committed due to autoCommit, buffer full, other client performing a
* commit etc. So this is only a best-effort rollback.
*
* @throws IOException
* If there is a low-level I/O error.
*/
public void rollbackTransaction();
/** Releases allocated resources */
public void stop();
}
| 9,841 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/main/java/org/apache/flume/sink/solr | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/main/java/org/apache/flume/sink/solr/morphline/BlobHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.solr.morphline;
import java.io.InputStream;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import org.apache.commons.io.output.ByteArrayOutputStream;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.conf.ConfigurationException;
import org.apache.flume.conf.LogPrivacyUtil;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.source.http.HTTPSourceHandler;
import org.apache.tika.metadata.Metadata;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* BlobHandler for HTTPSource that returns event that contains the request
* parameters as well as the Binary Large Object (BLOB) uploaded with this
* request.
* <p>
* Note that this approach is not suitable for very large objects because it
* buffers up the entire BLOB.
* <p>
* Example client usage:
* <pre>
* curl --data-binary @sample-statuses-20120906-141433-medium.avro 'http://127.0.0.1:5140?resourceName=sample-statuses-20120906-141433-medium.avro' --header 'Content-Type:application/octet-stream' --verbose
* </pre>
*/
public class BlobHandler implements HTTPSourceHandler {
private int maxBlobLength = MAX_BLOB_LENGTH_DEFAULT;
public static final String MAX_BLOB_LENGTH_KEY = "maxBlobLength";
public static final int MAX_BLOB_LENGTH_DEFAULT = 100 * 1000 * 1000;
private static final int DEFAULT_BUFFER_SIZE = 1024 * 8;
private static final Logger LOGGER = LoggerFactory.getLogger(BlobHandler.class);
public BlobHandler() {
}
@Override
public void configure(Context context) {
this.maxBlobLength = context.getInteger(MAX_BLOB_LENGTH_KEY, MAX_BLOB_LENGTH_DEFAULT);
if (this.maxBlobLength <= 0) {
throw new ConfigurationException("Configuration parameter " + MAX_BLOB_LENGTH_KEY
+ " must be greater than zero: " + maxBlobLength);
}
}
@SuppressWarnings("resource")
@Override
public List<Event> getEvents(HttpServletRequest request) throws Exception {
Map<String, String> headers = getHeaders(request);
InputStream in = request.getInputStream();
try {
ByteArrayOutputStream blob = null;
byte[] buf = new byte[Math.min(maxBlobLength, DEFAULT_BUFFER_SIZE)];
int blobLength = 0;
int n = 0;
while ((n = in.read(buf, 0, Math.min(buf.length, maxBlobLength - blobLength))) != -1) {
if (blob == null) {
blob = new ByteArrayOutputStream(n);
}
blob.write(buf, 0, n);
blobLength += n;
if (blobLength >= maxBlobLength) {
LOGGER.warn("Request length exceeds maxBlobLength ({}), truncating BLOB event!",
maxBlobLength);
break;
}
}
byte[] array = blob != null ? blob.toByteArray() : new byte[0];
Event event = EventBuilder.withBody(array, headers);
if (LOGGER.isDebugEnabled() && LogPrivacyUtil.allowLogRawData()) {
LOGGER.debug("blobEvent: {}", event);
}
return Collections.singletonList(event);
} finally {
in.close();
}
}
private Map<String, String> getHeaders(HttpServletRequest request) {
if (LOGGER.isDebugEnabled() && LogPrivacyUtil.allowLogRawData()) {
Map requestHeaders = new HashMap();
Enumeration iter = request.getHeaderNames();
while (iter.hasMoreElements()) {
String name = (String) iter.nextElement();
requestHeaders.put(name, request.getHeader(name));
}
LOGGER.debug("requestHeaders: {}", requestHeaders);
}
Map<String, String> headers = new HashMap();
if (request.getContentType() != null) {
headers.put(Metadata.CONTENT_TYPE, request.getContentType());
}
Enumeration iter = request.getParameterNames();
while (iter.hasMoreElements()) {
String name = (String) iter.nextElement();
headers.put(name, request.getParameter(name));
}
return headers;
}
}
| 9,842 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/main/java/org/apache/flume/sink/solr | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/main/java/org/apache/flume/sink/solr/morphline/MorphlineSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.solr.morphline;
import org.apache.flume.Channel;
import org.apache.flume.ChannelException;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.Transaction;
import org.apache.flume.conf.BatchSizeSupported;
import org.apache.flume.conf.Configurable;
import org.apache.flume.conf.ConfigurationException;
import org.apache.flume.conf.LogPrivacyUtil;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.flume.sink.AbstractSink;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.kitesdk.morphline.api.Command;
/**
* Flume sink that extracts search documents from Flume events and processes them using a morphline
* {@link Command} chain.
*/
public class MorphlineSink extends AbstractSink implements Configurable, BatchSizeSupported {
private int maxBatchSize = 1000;
private long maxBatchDurationMillis = 1000;
private String handlerClass;
private MorphlineHandler handler;
private Context context;
private SinkCounter sinkCounter;
public static final String BATCH_SIZE = "batchSize";
public static final String BATCH_DURATION_MILLIS = "batchDurationMillis";
public static final String HANDLER_CLASS = "handlerClass";
private static final Logger LOGGER = LoggerFactory.getLogger(MorphlineSink.class);
public MorphlineSink() {
this(null);
}
/** For testing only */
protected MorphlineSink(MorphlineHandler handler) {
this.handler = handler;
}
@Override
public void configure(Context context) {
this.context = context;
maxBatchSize = context.getInteger(BATCH_SIZE, maxBatchSize);
maxBatchDurationMillis = context.getLong(BATCH_DURATION_MILLIS, maxBatchDurationMillis);
handlerClass = context.getString(HANDLER_CLASS, MorphlineHandlerImpl.class.getName());
if (sinkCounter == null) {
sinkCounter = new SinkCounter(getName());
}
}
/**
* Returns the maximum number of events to take per flume transaction;
* override to customize
*/
private int getMaxBatchSize() {
return maxBatchSize;
}
/** Returns the maximum duration per flume transaction; override to customize */
private long getMaxBatchDurationMillis() {
return maxBatchDurationMillis;
}
@Override
public synchronized void start() {
LOGGER.info("Starting Morphline Sink {} ...", this);
sinkCounter.start();
if (handler == null) {
MorphlineHandler tmpHandler;
try {
tmpHandler = (MorphlineHandler) Class.forName(handlerClass).newInstance();
} catch (Exception e) {
throw new ConfigurationException(e);
}
tmpHandler.configure(context);
handler = tmpHandler;
}
super.start();
LOGGER.info("Morphline Sink {} started.", getName());
}
@Override
public synchronized void stop() {
LOGGER.info("Morphline Sink {} stopping...", getName());
try {
if (handler != null) {
handler.stop();
}
sinkCounter.stop();
LOGGER.info("Morphline Sink {} stopped. Metrics: {}", getName(), sinkCounter);
} finally {
super.stop();
}
}
@Override
public Status process() throws EventDeliveryException {
int batchSize = getMaxBatchSize();
long batchEndTime = System.currentTimeMillis() + getMaxBatchDurationMillis();
Channel myChannel = getChannel();
Transaction txn = myChannel.getTransaction();
txn.begin();
boolean isMorphlineTransactionCommitted = true;
try {
int numEventsTaken = 0;
handler.beginTransaction();
isMorphlineTransactionCommitted = false;
// repeatedly take and process events from the Flume queue
for (int i = 0; i < batchSize; i++) {
Event event = myChannel.take();
if (event == null) {
break;
}
sinkCounter.incrementEventDrainAttemptCount();
numEventsTaken++;
if (LOGGER.isTraceEnabled() && LogPrivacyUtil.allowLogRawData()) {
LOGGER.trace("Flume event arrived {}", event);
}
//StreamEvent streamEvent = createStreamEvent(event);
handler.process(event);
if (System.currentTimeMillis() >= batchEndTime) {
break;
}
}
// update metrics
if (numEventsTaken == 0) {
sinkCounter.incrementBatchEmptyCount();
}
if (numEventsTaken < batchSize) {
sinkCounter.incrementBatchUnderflowCount();
} else {
sinkCounter.incrementBatchCompleteCount();
}
handler.commitTransaction();
isMorphlineTransactionCommitted = true;
txn.commit();
sinkCounter.addToEventDrainSuccessCount(numEventsTaken);
return numEventsTaken == 0 ? Status.BACKOFF : Status.READY;
} catch (Throwable t) {
// Ooops - need to rollback and back off
LOGGER.error("Morphline Sink " + getName() + ": Unable to process event from channel " +
myChannel.getName() + ". Exception follows.", t);
sinkCounter.incrementEventWriteOrChannelFail(t);
try {
if (!isMorphlineTransactionCommitted) {
handler.rollbackTransaction();
}
} catch (Throwable t2) {
LOGGER.error("Morphline Sink " + getName() +
": Unable to rollback morphline transaction. Exception follows.", t2);
} finally {
try {
txn.rollback();
} catch (Throwable t4) {
LOGGER.error("Morphline Sink " + getName() + ": Unable to rollback Flume transaction. " +
"Exception follows.", t4);
}
}
if (t instanceof Error) {
throw (Error) t; // rethrow original exception
} else if (t instanceof ChannelException) {
return Status.BACKOFF;
} else {
throw new EventDeliveryException("Failed to send events", t); // rethrow and backoff
}
} finally {
txn.close();
}
}
@Override
public long getBatchSize() {
return getMaxBatchSize();
}
@Override
public String toString() {
int i = getClass().getName().lastIndexOf('.') + 1;
String shortClassName = getClass().getName().substring(i);
return getName() + " (" + shortClassName + ")";
}
}
| 9,843 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/main/java/org/apache/flume/sink/solr | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/main/java/org/apache/flume/sink/solr/morphline/BlobDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.solr.morphline;
import java.io.IOException;
import java.util.List;
import org.apache.commons.io.output.ByteArrayOutputStream;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.apache.flume.conf.ConfigurationException;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.serialization.EventDeserializer;
import org.apache.flume.serialization.ResettableInputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
/**
* A deserializer that reads a Binary Large Object (BLOB) per event, typically
* one BLOB per file; To be used in conjunction with Flume SpoolDirectorySource.
* <p>
* Note that this approach is not suitable for very large objects because it
* buffers up the entire BLOB.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class BlobDeserializer implements EventDeserializer {
private ResettableInputStream in;
private final int maxBlobLength;
private volatile boolean isOpen;
public static final String MAX_BLOB_LENGTH_KEY = "maxBlobLength";
public static final int MAX_BLOB_LENGTH_DEFAULT = 100 * 1000 * 1000;
private static final int DEFAULT_BUFFER_SIZE = 1024 * 8;
private static final Logger LOGGER = LoggerFactory.getLogger(BlobDeserializer.class);
protected BlobDeserializer(Context context, ResettableInputStream in) {
this.in = in;
this.maxBlobLength = context.getInteger(MAX_BLOB_LENGTH_KEY, MAX_BLOB_LENGTH_DEFAULT);
if (this.maxBlobLength <= 0) {
throw new ConfigurationException("Configuration parameter " + MAX_BLOB_LENGTH_KEY
+ " must be greater than zero: " + maxBlobLength);
}
this.isOpen = true;
}
/**
* Reads a BLOB from a file and returns an event
* @return Event containing a BLOB
* @throws IOException
*/
@SuppressWarnings("resource")
@Override
public Event readEvent() throws IOException {
ensureOpen();
ByteArrayOutputStream blob = null;
byte[] buf = new byte[Math.min(maxBlobLength, DEFAULT_BUFFER_SIZE)];
int blobLength = 0;
int n = 0;
while ((n = in.read(buf, 0, Math.min(buf.length, maxBlobLength - blobLength))) != -1) {
if (blob == null) {
blob = new ByteArrayOutputStream(n);
}
blob.write(buf, 0, n);
blobLength += n;
if (blobLength >= maxBlobLength) {
LOGGER.warn("File length exceeds maxBlobLength ({}), truncating BLOB event!",
maxBlobLength);
break;
}
}
if (blob == null) {
return null;
} else {
return EventBuilder.withBody(blob.toByteArray());
}
}
/**
* Batch BLOB read
* @param numEvents Maximum number of events to return.
* @return List of events containing read BLOBs
* @throws IOException
*/
@Override
public List<Event> readEvents(int numEvents) throws IOException {
ensureOpen();
List<Event> events = Lists.newLinkedList();
for (int i = 0; i < numEvents; i++) {
Event event = readEvent();
if (event != null) {
events.add(event);
} else {
break;
}
}
return events;
}
@Override
public void mark() throws IOException {
ensureOpen();
in.mark();
}
@Override
public void reset() throws IOException {
ensureOpen();
in.reset();
}
@Override
public void close() throws IOException {
if (isOpen) {
reset();
in.close();
isOpen = false;
}
}
private void ensureOpen() {
if (!isOpen) {
throw new IllegalStateException("Serializer has been closed");
}
}
///////////////////////////////////////////////////////////////////////////////
// Nested classes:
///////////////////////////////////////////////////////////////////////////////
/** Builder implementations MUST have a public no-arg constructor */
public static class Builder implements EventDeserializer.Builder {
@Override
public BlobDeserializer build(Context context, ResettableInputStream in) {
return new BlobDeserializer(context, in);
}
}
}
| 9,844 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/main/java/org/apache/flume/sink/solr | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/main/java/org/apache/flume/sink/solr/morphline/UUIDInterceptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.solr.morphline;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.interceptor.Interceptor;
/**
* Flume Interceptor that sets a universally unique identifier on all events
* that are intercepted. By default this event header is named "id".
*/
public class UUIDInterceptor implements Interceptor {
private String headerName;
private boolean preserveExisting;
private String prefix;
public static final String HEADER_NAME = "headerName";
public static final String PRESERVE_EXISTING_NAME = "preserveExisting";
public static final String PREFIX_NAME = "prefix";
protected UUIDInterceptor(Context context) {
headerName = context.getString(HEADER_NAME, "id");
preserveExisting = context.getBoolean(PRESERVE_EXISTING_NAME, true);
prefix = context.getString(PREFIX_NAME, "");
}
@Override
public void initialize() {
}
protected String getPrefix() {
return prefix;
}
protected String generateUUID() {
return getPrefix() + UUID.randomUUID().toString();
}
protected boolean isMatch(Event event) {
return true;
}
@Override
public Event intercept(Event event) {
Map<String, String> headers = event.getHeaders();
if (preserveExisting && headers.containsKey(headerName)) {
// we must preserve the existing id
} else if (isMatch(event)) {
headers.put(headerName, generateUUID());
}
return event;
}
@Override
public List<Event> intercept(List<Event> events) {
List results = new ArrayList(events.size());
for (Event event : events) {
event = intercept(event);
if (event != null) {
results.add(event);
}
}
return results;
}
@Override
public void close() {
}
///////////////////////////////////////////////////////////////////////////////
// Nested classes:
///////////////////////////////////////////////////////////////////////////////
/** Builder implementations MUST have a public no-arg constructor */
public static class Builder implements Interceptor.Builder {
private Context context;
public Builder() {
}
@Override
public UUIDInterceptor build() {
return new UUIDInterceptor(context);
}
@Override
public void configure(Context context) {
this.context = context;
}
}
}
| 9,845 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/main/java/org/apache/flume/sink/solr | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/main/java/org/apache/flume/sink/solr/morphline/MorphlineHandlerImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.solr.morphline;
import java.io.File;
import java.util.Map.Entry;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.kitesdk.morphline.api.Command;
import org.kitesdk.morphline.api.MorphlineCompilationException;
import org.kitesdk.morphline.api.MorphlineContext;
import org.kitesdk.morphline.api.Record;
import org.kitesdk.morphline.base.Compiler;
import org.kitesdk.morphline.base.FaultTolerance;
import org.kitesdk.morphline.base.Fields;
import org.kitesdk.morphline.base.Metrics;
import org.kitesdk.morphline.base.Notifications;
import com.codahale.metrics.Meter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.SharedMetricRegistries;
import com.codahale.metrics.Timer;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
/**
* A {@link MorphlineHandler} that processes it's events using a morphline {@link Command} chain.
*/
public class MorphlineHandlerImpl implements MorphlineHandler {
private MorphlineContext morphlineContext;
private Command morphline;
private Command finalChild;
private String morphlineFileAndId;
private Timer mappingTimer;
private Meter numRecords;
private Meter numFailedRecords;
private Meter numExceptionRecords;
public static final String MORPHLINE_FILE_PARAM = "morphlineFile";
public static final String MORPHLINE_ID_PARAM = "morphlineId";
/**
* Morphline variables can be passed from flume.conf to the morphline, e.g.:
* agent.sinks.solrSink.morphlineVariable.zkHost=127.0.0.1:2181/solr
*/
public static final String MORPHLINE_VARIABLE_PARAM = "morphlineVariable";
private static final Logger LOG = LoggerFactory.getLogger(MorphlineHandlerImpl.class);
// For test injection
void setMorphlineContext(MorphlineContext morphlineContext) {
this.morphlineContext = morphlineContext;
}
// for interceptor
void setFinalChild(Command finalChild) {
this.finalChild = finalChild;
}
@Override
public void configure(Context context) {
String morphlineFile = context.getString(MORPHLINE_FILE_PARAM);
String morphlineId = context.getString(MORPHLINE_ID_PARAM);
if (morphlineFile == null || morphlineFile.trim().length() == 0) {
throw new MorphlineCompilationException("Missing parameter: " + MORPHLINE_FILE_PARAM, null);
}
morphlineFileAndId = morphlineFile + "@" + morphlineId;
if (morphlineContext == null) {
FaultTolerance faultTolerance = new FaultTolerance(
context.getBoolean(FaultTolerance.IS_PRODUCTION_MODE, false),
context.getBoolean(FaultTolerance.IS_IGNORING_RECOVERABLE_EXCEPTIONS, false),
context.getString(FaultTolerance.RECOVERABLE_EXCEPTION_CLASSES));
morphlineContext = new MorphlineContext.Builder()
.setExceptionHandler(faultTolerance)
.setMetricRegistry(SharedMetricRegistries.getOrCreate(morphlineFileAndId))
.build();
}
Config override = ConfigFactory.parseMap(
context.getSubProperties(MORPHLINE_VARIABLE_PARAM + "."));
morphline = new Compiler().compile(
new File(morphlineFile), morphlineId, morphlineContext, finalChild, override);
this.mappingTimer = morphlineContext.getMetricRegistry().timer(
MetricRegistry.name("morphline.app", Metrics.ELAPSED_TIME));
this.numRecords = morphlineContext.getMetricRegistry().meter(
MetricRegistry.name("morphline.app", Metrics.NUM_RECORDS));
this.numFailedRecords = morphlineContext.getMetricRegistry().meter(
MetricRegistry.name("morphline.app", "numFailedRecords"));
this.numExceptionRecords = morphlineContext.getMetricRegistry().meter(
MetricRegistry.name("morphline.app", "numExceptionRecords"));
}
@Override
public void process(Event event) {
numRecords.mark();
Timer.Context timerContext = mappingTimer.time();
try {
Record record = new Record();
for (Entry<String, String> entry : event.getHeaders().entrySet()) {
record.put(entry.getKey(), entry.getValue());
}
byte[] bytes = event.getBody();
if (bytes != null && bytes.length > 0) {
record.put(Fields.ATTACHMENT_BODY, bytes);
}
try {
Notifications.notifyStartSession(morphline);
if (!morphline.process(record)) {
numFailedRecords.mark();
LOG.warn("Morphline {} failed to process record: {}", morphlineFileAndId, record);
}
} catch (RuntimeException t) {
numExceptionRecords.mark();
morphlineContext.getExceptionHandler().handleException(t, record);
}
} finally {
timerContext.stop();
}
}
@Override
public void beginTransaction() {
Notifications.notifyBeginTransaction(morphline);
}
@Override
public void commitTransaction() {
Notifications.notifyCommitTransaction(morphline);
}
@Override
public void rollbackTransaction() {
Notifications.notifyRollbackTransaction(morphline);
}
@Override
public void stop() {
Notifications.notifyShutdown(morphline);
}
}
| 9,846 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/main/java/org/apache/flume/sink/solr | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/main/java/org/apache/flume/sink/solr/morphline/MorphlineSolrSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.solr.morphline;
import org.apache.flume.Context;
import org.kitesdk.morphline.api.Command;
import org.kitesdk.morphline.base.FaultTolerance;
/**
* Flume sink that extracts search documents from Flume events, processes them using a morphline
* {@link Command} chain, and loads them into Apache Solr.
*/
public class MorphlineSolrSink extends MorphlineSink {
public MorphlineSolrSink() {
super();
}
/** For testing only */
protected MorphlineSolrSink(MorphlineHandler handler) {
super(handler);
}
@Override
public void configure(Context context) {
if (context.getString(FaultTolerance.RECOVERABLE_EXCEPTION_CLASSES) == null) {
context.put(FaultTolerance.RECOVERABLE_EXCEPTION_CLASSES,
"org.apache.solr.client.solrj.SolrServerException");
}
super.configure(context);
}
}
| 9,847 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/main/java/org/apache/flume/sink/solr | Create_ds/flume/flume-ng-sinks/flume-ng-morphline-solr-sink/src/main/java/org/apache/flume/sink/solr/morphline/MorphlineInterceptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.solr.morphline;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.FlumeException;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.interceptor.Interceptor;
import org.kitesdk.morphline.api.Command;
import org.kitesdk.morphline.api.Record;
import org.kitesdk.morphline.base.Fields;
import com.google.common.base.Preconditions;
import com.google.common.io.ByteStreams;
/**
* Flume Interceptor that executes a morphline on events that are intercepted.
*
* Currently, there is a restriction in that the morphline must not generate more than one output
* record for each input event.
*/
public class MorphlineInterceptor implements Interceptor {
private final Context context;
private final Queue<LocalMorphlineInterceptor> pool = new ConcurrentLinkedQueue<>();
protected MorphlineInterceptor(Context context) {
Preconditions.checkNotNull(context);
this.context = context;
// fail fast on morphline compilation exception
returnToPool(new LocalMorphlineInterceptor(context));
}
@Override
public void initialize() {
}
@Override
public void close() {
LocalMorphlineInterceptor interceptor;
while ((interceptor = pool.poll()) != null) {
interceptor.close();
}
}
@Override
public List<Event> intercept(List<Event> events) {
LocalMorphlineInterceptor interceptor = borrowFromPool();
List<Event> results = interceptor.intercept(events);
returnToPool(interceptor);
return results;
}
@Override
public Event intercept(Event event) {
LocalMorphlineInterceptor interceptor = borrowFromPool();
Event result = interceptor.intercept(event);
returnToPool(interceptor);
return result;
}
private void returnToPool(LocalMorphlineInterceptor interceptor) {
pool.add(interceptor);
}
private LocalMorphlineInterceptor borrowFromPool() {
LocalMorphlineInterceptor interceptor = pool.poll();
if (interceptor == null) {
interceptor = new LocalMorphlineInterceptor(context);
}
return interceptor;
}
///////////////////////////////////////////////////////////////////////////////
// Nested classes:
///////////////////////////////////////////////////////////////////////////////
/** Builder implementations MUST have a public no-arg constructor */
public static class Builder implements Interceptor.Builder {
private Context context;
public Builder() {
}
@Override
public MorphlineInterceptor build() {
return new MorphlineInterceptor(context);
}
@Override
public void configure(Context context) {
this.context = context;
}
}
///////////////////////////////////////////////////////////////////////////////
// Nested classes:
///////////////////////////////////////////////////////////////////////////////
private static final class LocalMorphlineInterceptor implements Interceptor {
private final MorphlineHandlerImpl morphline;
private final Collector collector;
protected LocalMorphlineInterceptor(Context context) {
this.morphline = new MorphlineHandlerImpl();
this.collector = new Collector();
this.morphline.setFinalChild(collector);
this.morphline.configure(context);
}
@Override
public void initialize() {
}
@Override
public void close() {
morphline.stop();
}
@Override
public List<Event> intercept(List<Event> events) {
List results = new ArrayList(events.size());
for (Event event : events) {
event = intercept(event);
if (event != null) {
results.add(event);
}
}
return results;
}
@Override
public Event intercept(Event event) {
collector.reset();
morphline.process(event);
List<Record> results = collector.getRecords();
if (results.size() == 0) {
return null;
}
if (results.size() > 1) {
throw new FlumeException(getClass().getName() +
" must not generate more than one output record per input event");
}
Event result = toEvent(results.get(0));
return result;
}
private Event toEvent(Record record) {
Map<String, String> headers = new HashMap();
Map<String, Collection<Object>> recordMap = record.getFields().asMap();
byte[] body = null;
for (Map.Entry<String, Collection<Object>> entry : recordMap.entrySet()) {
if (entry.getValue().size() > 1) {
throw new FlumeException(getClass().getName()
+ " must not generate more than one output value per record field");
}
assert entry.getValue().size() != 0; // guava guarantees that
Object firstValue = entry.getValue().iterator().next();
if (Fields.ATTACHMENT_BODY.equals(entry.getKey())) {
if (firstValue instanceof byte[]) {
body = (byte[]) firstValue;
} else if (firstValue instanceof InputStream) {
try {
body = ByteStreams.toByteArray((InputStream) firstValue);
} catch (IOException e) {
throw new FlumeException(e);
}
} else {
throw new FlumeException(getClass().getName()
+ " must non generate attachments that are not a byte[] or InputStream");
}
} else {
headers.put(entry.getKey(), firstValue.toString());
}
}
return EventBuilder.withBody(body, headers);
}
}
///////////////////////////////////////////////////////////////////////////////
// Nested classes:
///////////////////////////////////////////////////////////////////////////////
private static final class Collector implements Command {
private final List<Record> results = new ArrayList();
public List<Record> getRecords() {
return results;
}
public void reset() {
results.clear();
}
@Override
public Command getParent() {
return null;
}
@Override
public void notify(Record notification) {
}
@Override
public boolean process(Record record) {
Preconditions.checkNotNull(record);
results.add(record);
return true;
}
}
}
| 9,848 |
0 | Create_ds/flume/flume-ng-sinks/flume-irc-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-irc-sink/src/test/java/org/apache/flume/sink/irc/TestIRCSink.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.irc;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.flume.Channel;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.Sink;
import org.apache.flume.Transaction;
import org.apache.flume.channel.MemoryChannel;
import org.apache.flume.conf.Configurables;
import org.apache.flume.event.EventBuilder;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.ServerSocket;
import java.net.Socket;
import java.util.List;
import java.util.UUID;
import static org.junit.Assert.fail;
public class TestIRCSink {
private File eventFile;
int ircServerPort;
DumbIRCServer dumbIRCServer;
@Rule
public TemporaryFolder folder = new TemporaryFolder();
private static int findFreePort() throws IOException {
ServerSocket socket = new ServerSocket(0);
int port = socket.getLocalPort();
socket.close();
return port;
}
@Before
public void setUp() throws IOException {
ircServerPort = findFreePort();
dumbIRCServer = new DumbIRCServer(ircServerPort);
dumbIRCServer.start();
eventFile = folder.newFile("eventFile.txt");
}
@After
public void tearDown() throws Exception {
dumbIRCServer.shutdownServer();
}
@Test
public void testIRCSinkMissingSplitLineProperty() {
Sink ircSink = new IRCSink();
ircSink.setName("IRC Sink - " + UUID.randomUUID().toString());
Context context = new Context();
context.put("hostname", "localhost");
context.put("port", String.valueOf(ircServerPort));
context.put("nick", "flume");
context.put("password", "flume");
context.put("user", "flume");
context.put("name", "flume-dev");
context.put("chan", "flume");
context.put("splitchars", "false");
Configurables.configure(ircSink, context);
Channel memoryChannel = new MemoryChannel();
Configurables.configure(memoryChannel, context);
ircSink.setChannel(memoryChannel);
ircSink.start();
Transaction txn = memoryChannel.getTransaction();
txn.begin();
Event event = EventBuilder.withBody("Dummy Event".getBytes());
memoryChannel.put(event);
txn.commit();
txn.close();
try {
Sink.Status status = ircSink.process();
if (status == Sink.Status.BACKOFF) {
fail("Error occured");
}
} catch (EventDeliveryException eDelExcp) {
// noop
}
}
class DumbIRCServer extends Thread {
int port;
ServerSocket ss;
public DumbIRCServer(int port) {
this.port = port;
}
public void run() {
try {
ss = new ServerSocket(port);
while (true) {
try {
Socket socket = ss.accept();
process(socket);
} catch (Exception ex) {
/* noop */
}
}
} catch (IOException e) {
// noop
}
}
public void shutdownServer() throws Exception {
ss.close();
}
/**
* Process the incoming request from IRC client
*
* @param socket IRC client connection socket
* @throws IOException
*/
private void process(Socket socket) throws IOException {
FileOutputStream fileOutputStream = FileUtils.openOutputStream(eventFile);
List<String> input = IOUtils.readLines(socket.getInputStream());
for (String next : input) {
if (isPrivMessage(next)) {
fileOutputStream.write(next.getBytes());
fileOutputStream.write("\n".getBytes());
}
}
fileOutputStream.close();
socket.close();
}
/**
* Checks if the message is Priv message
*
* @param input command received from IRC client
* @return true, if command received is PrivMessage
*/
private boolean isPrivMessage(String input) {
return input.startsWith("PRIVMSG");
}
}
} | 9,849 |
0 | Create_ds/flume/flume-ng-sinks/flume-irc-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-irc-sink/src/main/java/org/apache/flume/sink/irc/IRCSink.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.irc;
import java.io.IOException;
import org.apache.flume.Channel;
import org.apache.flume.ChannelException;
import org.apache.flume.Context;
import org.apache.flume.CounterGroup;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.Transaction;
import org.apache.flume.conf.Configurable;
import org.apache.flume.sink.AbstractSink;
import org.schwering.irc.lib.IRCConnection;
import org.schwering.irc.lib.IRCEventListener;
import org.schwering.irc.lib.IRCModeParser;
import org.schwering.irc.lib.IRCUser;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
public class IRCSink extends AbstractSink implements Configurable {
private static final Logger logger = LoggerFactory.getLogger(IRCSink.class);
private static final int DEFAULT_PORT = 6667;
private static final String DEFAULT_SPLIT_CHARS = "\n";
private static final String IRC_CHANNEL_PREFIX = "#";
private IRCConnection connection = null;
private String hostname;
private Integer port;
private String nick;
private String password;
private String user;
private String name;
private String chan;
private Boolean splitLines;
private String splitChars;
private CounterGroup counterGroup;
public static class IRCConnectionListener implements IRCEventListener {
public void onRegistered() {
}
public void onDisconnected() {
logger.error("IRC sink disconnected");
}
public void onError(String msg) {
logger.error("IRC sink error: {}", msg);
}
public void onError(int num, String msg) {
logger.error("IRC sink error: {} - {}", num, msg);
}
public void onInvite(String chan, IRCUser u, String nickPass) {
}
public void onJoin(String chan, IRCUser u) {
}
public void onKick(String chan, IRCUser u, String nickPass, String msg) {
}
public void onMode(IRCUser u, String nickPass, String mode) {
}
public void onMode(String chan, IRCUser u, IRCModeParser mp) {
}
public void onNick(IRCUser u, String nickNew) {
}
public void onNotice(String target, IRCUser u, String msg) {
}
public void onPart(String chan, IRCUser u, String msg) {
}
public void onPrivmsg(String chan, IRCUser u, String msg) {
}
public void onQuit(IRCUser u, String msg) {
}
public void onReply(int num, String value, String msg) {
}
public void onTopic(String chan, IRCUser u, String topic) {
}
public void onPing(String p) {
}
public void unknown(String a, String b, String c, String d) {
}
}
public IRCSink() {
counterGroup = new CounterGroup();
}
public void configure(Context context) {
hostname = context.getString("hostname");
String portStr = context.getString("port");
nick = context.getString("nick");
password = context.getString("password");
user = context.getString("user");
name = context.getString("name");
chan = context.getString("chan");
splitLines = context.getBoolean("splitlines", false);
splitChars = context.getString("splitchars");
if (portStr != null) {
port = Integer.parseInt(portStr);
} else {
port = DEFAULT_PORT;
}
if (splitChars == null) {
splitChars = DEFAULT_SPLIT_CHARS;
}
Preconditions.checkState(hostname != null, "No hostname specified");
Preconditions.checkState(nick != null, "No nick specified");
Preconditions.checkState(chan != null, "No chan specified");
}
private void createConnection() throws IOException {
if (connection == null) {
logger.debug(
"Creating new connection to hostname:{} port:{}",
hostname, port);
connection = new IRCConnection(hostname, new int[] { port },
password, nick, user, name);
connection.addIRCEventListener(new IRCConnectionListener());
connection.setEncoding("UTF-8");
connection.setPong(true);
connection.setDaemon(false);
connection.setColors(false);
connection.connect();
connection.send("join " + IRC_CHANNEL_PREFIX + chan);
}
}
private void destroyConnection() {
if (connection != null) {
logger.debug("Destroying connection to: {}:{}", hostname, port);
connection.close();
}
connection = null;
}
@Override
public void start() {
logger.info("IRC sink starting");
try {
createConnection();
} catch (Exception e) {
logger.error("Unable to create irc client using hostname:"
+ hostname + " port:" + port + ". Exception follows.", e);
/* Try to prevent leaking resources. */
destroyConnection();
/* FIXME: Mark ourselves as failed. */
return;
}
super.start();
logger.debug("IRC sink {} started", this.getName());
}
@Override
public void stop() {
logger.info("IRC sink {} stopping", this.getName());
destroyConnection();
super.stop();
logger.debug("IRC sink {} stopped. Metrics:{}", this.getName(), counterGroup);
}
private void sendLine(Event event) {
String body = new String(event.getBody());
if (splitLines) {
String[] lines = body.split(splitChars);
for (String line: lines) {
connection.doPrivmsg(IRC_CHANNEL_PREFIX + this.chan, line);
}
} else {
connection.doPrivmsg(IRC_CHANNEL_PREFIX + this.chan, body);
}
}
@Override
public Status process() throws EventDeliveryException {
Status status = Status.READY;
Channel channel = getChannel();
Transaction transaction = channel.getTransaction();
try {
transaction.begin();
createConnection();
Event event = channel.take();
if (event == null) {
counterGroup.incrementAndGet("event.empty");
status = Status.BACKOFF;
} else {
sendLine(event);
counterGroup.incrementAndGet("event.irc");
}
transaction.commit();
} catch (ChannelException e) {
transaction.rollback();
logger.error(
"Unable to get event from channel. Exception follows.", e);
status = Status.BACKOFF;
} catch (Exception e) {
transaction.rollback();
logger.error(
"Unable to communicate with IRC server. Exception follows.",
e);
status = Status.BACKOFF;
destroyConnection();
} finally {
transaction.close();
}
return status;
}
}
| 9,850 |
0 | Create_ds/flume/flume-ng-tests/src/test/java/org/apache/flume/test | Create_ds/flume/flume-ng-tests/src/test/java/org/apache/flume/test/util/SyslogAgent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.test.util;
import com.google.common.base.Charsets;
import com.google.common.io.Files;
import org.apache.commons.io.FileUtils;
import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.IOException;
import java.net.ServerSocket;
import java.net.Socket;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
/**
* Syslog Flume Agent.
* A Syslog source of some kind is configured and a client is available to write
* messages to the agent. The Flume agents port is randomly assigned (not in use).
*
*/
public class SyslogAgent {
private static final Logger LOGGER = LoggerFactory.getLogger(SyslogAgent.class);
private static final Collection<File> tempResources = new ArrayList<File>();
private static final int DEFAULT_ATTEMPTS = 20;
private static final long DEFAULT_TIMEOUT = 500L;
public enum SyslogSourceType {
TCP("syslogtcp"),
MULTIPORTTCP("multiport_syslogtcp");
private final String syslogSourceType;
private SyslogSourceType(String syslogSourceType) {
this.syslogSourceType = syslogSourceType;
}
public String toString() {
return syslogSourceType;
}
}
private Properties agentProps;
private File sinkOutputDir;
private String keepFields;
private int port;
private String hostname;
BufferedOutputStream client;
public SyslogAgent() throws IOException {
hostname = "localhost";
setRandomPort();
}
public void setRandomPort() throws IOException {
ServerSocket s = new ServerSocket(0);
port = s.getLocalPort();
s.close();
}
public void configure(SyslogSourceType sourceType) throws IOException {
/* Create 3 temp dirs, each used as value within agentProps */
sinkOutputDir = Files.createTempDir();
tempResources.add(sinkOutputDir);
final String sinkOutputDirPath = sinkOutputDir.getCanonicalPath();
LOGGER.info("Created rolling file sink's output dir: "
+ sinkOutputDirPath);
/* Build props to pass to flume agent */
agentProps = new Properties();
// Active sets
agentProps.put("a1.channels", "c1");
agentProps.put("a1.sources", "r1");
agentProps.put("a1.sinks", "k1");
// c1
agentProps.put("a1.channels.c1.type", "memory");
agentProps.put("a1.channels.c1.capacity", "1000");
agentProps.put("a1.channels.c1.transactionCapacity", "100");
// r1
agentProps.put("a1.sources.r1.channels", "c1");
agentProps.put("a1.sources.r1.type", sourceType.toString());
agentProps.put("a1.sources.r1.host", hostname);
if (sourceType.equals(SyslogSourceType.MULTIPORTTCP)) {
agentProps.put("a1.sources.r1.ports", Integer.toString(port));
} else {
agentProps.put("a1.sources.r1.port", Integer.toString(port));
}
// k1
agentProps.put("a1.sinks.k1.channel", "c1");
agentProps.put("a1.sinks.k1.sink.directory", sinkOutputDirPath);
agentProps.put("a1.sinks.k1.type", "FILE_ROLL");
agentProps.put("a1.sinks.k1.sink.rollInterval", "0");
}
// Blocks until flume agent boots up.
public void start(String keepFields) throws Exception {
this.keepFields = keepFields;
// Set properties that should be different per agent start and stop.
agentProps.put("a1.sources.r1.keepFields", keepFields);
// Recreate temporary directory.
sinkOutputDir.mkdir();
/* Start flume agent */
StagedInstall.getInstance().startAgent("a1", agentProps);
LOGGER.info("Started flume agent with syslog source on port " + port);
// Wait for source, channel, sink to start and create client.
int numberOfAttempts = 0;
while (client == null) {
try {
client = new BufferedOutputStream(new Socket(hostname, port).getOutputStream());
} catch (IOException e) {
if (++numberOfAttempts >= DEFAULT_ATTEMPTS) {
throw new AssertionError("Could not connect to source after "
+ DEFAULT_ATTEMPTS + " attempts with " + DEFAULT_TIMEOUT + " ms timeout.");
}
TimeUnit.MILLISECONDS.sleep(DEFAULT_TIMEOUT);
}
}
}
public boolean isRunning() throws Exception {
return StagedInstall.getInstance().isRunning();
}
public void stop() throws Exception {
if (client != null) {
client.close();
}
client = null;
StagedInstall.getInstance().stopAgent();
for (File tempResource : tempResources) {
// Should always be a directory.
FileUtils.deleteDirectory(tempResource);
}
}
public void runKeepFieldsTest() throws Exception {
/* Create expected output and log message */
String logMessage = "<34>1 Oct 11 22:14:15 mymachine su: Test\n";
String expectedOutput = "su: Test\n";
if (keepFields.equals("true") || keepFields.equals("all")) {
expectedOutput = logMessage;
} else if (!keepFields.equals("false") && !keepFields.equals("none")) {
if (keepFields.indexOf("hostname") != -1) {
expectedOutput = "mymachine " + expectedOutput;
}
if (keepFields.indexOf("timestamp") != -1) {
expectedOutput = "Oct 11 22:14:15 " + expectedOutput;
}
if (keepFields.indexOf("version") != -1) {
expectedOutput = "1 " + expectedOutput;
}
if (keepFields.indexOf("priority") != -1) {
expectedOutput = "<34>" + expectedOutput;
}
}
LOGGER.info("Created expected output: " + expectedOutput);
/* Send test message to agent */
sendMessage(logMessage);
/* Wait for output file */
int numberOfListDirAttempts = 0;
while (sinkOutputDir.listFiles().length == 0) {
if (++numberOfListDirAttempts >= DEFAULT_ATTEMPTS) {
throw new AssertionError("FILE_ROLL sink hasn't written any files after "
+ DEFAULT_ATTEMPTS + " attempts with " + DEFAULT_TIMEOUT + " ms timeout.");
}
TimeUnit.MILLISECONDS.sleep(DEFAULT_TIMEOUT);
}
// Only 1 file should be in FILE_ROLL sink's dir (rolling is disabled)
File[] sinkOutputDirChildren = sinkOutputDir.listFiles();
Assert.assertEquals("Expected FILE_ROLL sink's dir to have only 1 child," +
" but found " + sinkOutputDirChildren.length + " children.",
1, sinkOutputDirChildren.length);
/* Wait for output file stats to be as expected. */
File outputDirChild = sinkOutputDirChildren[0];
int numberOfStatsAttempts = 0;
while (outputDirChild.length() != expectedOutput.length()) {
if (++numberOfStatsAttempts >= DEFAULT_ATTEMPTS) {
throw new AssertionError("Expected output and FILE_ROLL sink's"
+ " lengths did not match after " + DEFAULT_ATTEMPTS
+ " attempts with " + DEFAULT_TIMEOUT + " ms timeout.");
}
TimeUnit.MILLISECONDS.sleep(DEFAULT_TIMEOUT);
}
File actualOutput = sinkOutputDirChildren[0];
if (!Files.toString(actualOutput, Charsets.UTF_8).equals(expectedOutput)) {
LOGGER.error("Actual output doesn't match expected output.\n");
LOGGER.debug("Output: " + Files.toString(actualOutput, Charsets.UTF_8));
throw new AssertionError("FILE_ROLL sink's actual output doesn't " +
"match expected output.");
}
}
private void sendMessage(String message) throws IOException {
client.write(message.getBytes());
client.flush();
}
}
| 9,851 |
0 | Create_ds/flume/flume-ng-tests/src/test/java/org/apache/flume/test | Create_ds/flume/flume-ng-tests/src/test/java/org/apache/flume/test/util/StagedInstall.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.test.util;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.io.Files;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileFilter;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.ServerSocket;
import java.net.Socket;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.zip.GZIPInputStream;
/**
* Attempts to setup a staged install using explicitly specified tar-ball
* distribution or by using relative path into the flume-ng-dist module.
*/
public class StagedInstall {
private static final Logger LOGGER = LoggerFactory.getLogger(StagedInstall.class);
public static final String PROP_PATH_TO_DIST_TARBALL =
"flume.dist.tarball";
public static final String ENV_FLUME_LOG_DIR = "flume.log.dir";
public static final String ENV_FLUME_ROOT_LOGGER = "flume.root.logger";
public static final String ENV_FLUME_ROOT_LOGGER_VALUE = "DEBUG,LOGFILE";
public static final String ENV_FLUME_LOG_FILE = "flume.log.file";
private final File stageDir;
private final File baseDir;
private final String launchScriptPath;
private final String confDirPath;
private final String logDirPath;
// State per invocation - config file, process, shutdown hook
private String agentName;
private String configFilePath;
private Process process;
private ProcessShutdownHook shutdownHook;
private ProcessInputStreamConsumer consumer;
private static StagedInstall INSTANCE;
public static synchronized StagedInstall getInstance() throws Exception {
if (INSTANCE == null) {
INSTANCE = new StagedInstall();
}
return INSTANCE;
}
public static int findFreePort() throws IOException {
try (ServerSocket socket = new ServerSocket(0)) {
return socket.getLocalPort();
}
}
public synchronized boolean isRunning() {
return process != null;
}
public synchronized void stopAgent() throws Exception {
if (process == null) {
throw new Exception("Process not found");
}
LOGGER.info("Shutting down agent process");
process.destroy();
process.waitFor();
process = null;
consumer.interrupt();
consumer = null;
configFilePath = null;
Runtime.getRuntime().removeShutdownHook(shutdownHook);
shutdownHook = null;
Thread.sleep(3000); // sleep for 3s to let system shutdown
}
public synchronized int startAgent(String name, String configResource)
throws Exception {
if (process != null) {
throw new Exception("A process is already running");
}
int port = findFreePort();
Properties props = new Properties();
props.load(ClassLoader.getSystemResourceAsStream(configResource));
props.put("rpccagent.sources.src1.port", String.valueOf(port));
startAgent(name, props);
return port;
}
public synchronized void startAgent(String name, Properties properties) throws Exception {
startAgent(name, properties, new HashMap<>(), new HashMap<>());
}
public synchronized void startAgent(
String name, Properties properties, Map<String, String> environmentVariables,
Map<String, String> commandOptions)
throws Exception {
Preconditions.checkArgument(!name.isEmpty(), "agent name must not be empty");
Preconditions.checkNotNull(properties, "properties object must not be null");
agentName = name;
if (process != null) {
throw new Exception("A process is already running");
}
LOGGER.info("Starting process for agent: " + agentName + " using config: " + properties);
File configFile = createConfigurationFile(agentName, properties);
configFilePath = configFile.getCanonicalPath();
String configFileName = configFile.getName();
String logFileName = "flume-" + agentName + "-"
+ configFileName.substring(0, configFileName.indexOf('.')) + ".log";
LOGGER.info("Created configuration file: " + configFilePath);
ImmutableList.Builder<String> builder = new ImmutableList.Builder<String>();
builder.add(launchScriptPath);
builder.add("agent");
builder.add("--conf", confDirPath);
builder.add("--conf-file", configFilePath);
builder.add("--name", agentName);
builder.add("-D" + ENV_FLUME_LOG_DIR + "=" + logDirPath);
builder.add("-D" + ENV_FLUME_ROOT_LOGGER + "="
+ ENV_FLUME_ROOT_LOGGER_VALUE);
builder.add("-D" + ENV_FLUME_LOG_FILE + "=" + logFileName);
commandOptions.forEach((key, value) -> builder.add(key, value));
List<String> cmdArgs = builder.build();
LOGGER.info("Using command: " + Joiner.on(" ").join(cmdArgs));
ProcessBuilder pb = new ProcessBuilder(cmdArgs);
Map<String, String> env = pb.environment();
env.putAll(environmentVariables);
LOGGER.debug("process environment: " + env);
pb.directory(baseDir);
pb.redirectErrorStream(true);
process = pb.start();
consumer = new ProcessInputStreamConsumer(process.getInputStream());
consumer.start();
shutdownHook = new ProcessShutdownHook();
Runtime.getRuntime().addShutdownHook(shutdownHook);
Thread.sleep(3000); // sleep for 3s to let system initialize
}
public synchronized void reconfigure(Properties properties) throws Exception {
File configFile = createConfigurationFile(agentName, properties);
Files.copy(configFile, new File(configFilePath));
configFile.delete();
LOGGER.info("Updated agent config file: " + configFilePath);
}
public synchronized File getStageDir() {
return stageDir;
}
private File createConfigurationFile(String agentName, Properties properties)
throws Exception {
Preconditions.checkNotNull(properties, "properties object must not be null");
File file = File.createTempFile("agent", "config.properties", stageDir);
OutputStream os = null;
try {
os = new FileOutputStream(file);
properties.store(os, "Config file for agent: " + agentName);
} catch (Exception ex) {
LOGGER.error("Failed to create config file: " + file, ex);
throw ex;
} finally {
if (os != null) {
try {
os.close();
} catch (Exception ex) {
LOGGER.warn("Unable to close config file stream", ex);
}
}
}
return file;
}
private StagedInstall() throws Exception {
String tarballPath = System.getProperty(PROP_PATH_TO_DIST_TARBALL);
if (tarballPath == null || tarballPath.trim().length() == 0) {
LOGGER.info("No value specified for system property: "
+ PROP_PATH_TO_DIST_TARBALL
+ ". Will attempt to use relative path to locate dist tarball.");
tarballPath = getRelativeTarballPath();
}
if (tarballPath == null || tarballPath.trim().length() == 0) {
throw new Exception("Failed to locate tar-ball distribution. "
+ "Please specify explicitly via system property: "
+ PROP_PATH_TO_DIST_TARBALL);
}
// Validate
File tarballFile = new File(tarballPath);
if (!tarballFile.isFile() || !tarballFile.canRead()) {
throw new Exception("The tarball distribution file is invalid: "
+ tarballPath + ". You can override this by explicitly setting the "
+ "system property: " + PROP_PATH_TO_DIST_TARBALL);
}
LOGGER.info("Dist tarball to use: " + tarballPath);
// Now set up a staging directory for this distribution
stageDir = getStagingDirectory();
// Deflate the gzip compressed archive
File tarFile = gunzipDistTarball(tarballFile, stageDir);
// Untar the deflated file
untarTarFile(tarFile, stageDir);
// Delete the tarfile
tarFile.delete();
LOGGER.info("Dist tarball staged to: " + stageDir);
File rootDir = stageDir;
File[] listBaseDirs = stageDir.listFiles();
if (listBaseDirs != null && listBaseDirs.length == 1
&& listBaseDirs[0].isDirectory()) {
rootDir = listBaseDirs[0];
}
baseDir = rootDir;
// Give execute permissions to the bin/flume-ng script
File launchScript = new File(baseDir, "bin/flume-ng");
giveExecutePermissions(launchScript);
launchScriptPath = launchScript.getCanonicalPath();
File confDir = new File(baseDir, "conf");
confDirPath = confDir.getCanonicalPath();
File logDir = new File(baseDir, "logs");
logDir.mkdirs();
logDirPath = logDir.getCanonicalPath();
LOGGER.info("Staged install root directory: " + rootDir.getCanonicalPath());
}
private void giveExecutePermissions(File file) throws Exception {
String[] args = {
"chmod", "+x", file.getCanonicalPath()
};
Runtime.getRuntime().exec(args);
LOGGER.info("Set execute permissions on " + file);
}
private void untarTarFile(File tarFile, File destDir) throws Exception {
TarArchiveInputStream tarInputStream = null;
try {
tarInputStream = new TarArchiveInputStream(new FileInputStream(tarFile));
TarArchiveEntry entry = null;
while ((entry = tarInputStream.getNextTarEntry()) != null) {
String name = entry.getName();
LOGGER.debug("Next file: " + name);
File destFile = new File(destDir, entry.getName());
if (entry.isDirectory()) {
destFile.mkdirs();
continue;
}
File destParent = destFile.getParentFile();
destParent.mkdirs();
OutputStream entryOutputStream = null;
try {
entryOutputStream = new FileOutputStream(destFile);
byte[] buffer = new byte[2048];
int length = 0;
while ((length = tarInputStream.read(buffer, 0, 2048)) != -1) {
entryOutputStream.write(buffer, 0, length);
}
} catch (Exception ex) {
LOGGER.error("Exception while expanding tar file", ex);
throw ex;
} finally {
if (entryOutputStream != null) {
try {
entryOutputStream.close();
} catch (Exception ex) {
LOGGER.warn("Failed to close entry output stream", ex);
}
}
}
}
} catch (Exception ex) {
LOGGER.error("Exception caught while untarring tar file: "
+ tarFile.getAbsolutePath(), ex);
throw ex;
} finally {
if (tarInputStream != null) {
try {
tarInputStream.close();
} catch (Exception ex) {
LOGGER.warn("Unable to close tar input stream: "
+ tarFile.getCanonicalPath(), ex);
}
}
}
}
private File gunzipDistTarball(File tarballFile, File destDir)
throws Exception {
File tarFile = null;
InputStream tarballInputStream = null;
OutputStream tarFileOutputStream = null;
try {
tarballInputStream = new GZIPInputStream(
new FileInputStream(tarballFile));
File temp2File = File.createTempFile("flume", "-bin", destDir);
String temp2FilePath = temp2File.getCanonicalPath();
temp2File.delete();
tarFile = new File(temp2FilePath + ".tar");
LOGGER.info("Tarball being unzipped to: " + tarFile.getCanonicalPath());
tarFileOutputStream = new FileOutputStream(tarFile);
int length = 0;
byte[] buffer = new byte[10240];
while ((length = tarballInputStream.read(buffer, 0, 10240)) != -1) {
tarFileOutputStream.write(buffer, 0, length);
}
} catch (Exception ex) {
LOGGER.error("Exception caught while unpacking the tarball", ex);
throw ex;
} finally {
if (tarballInputStream != null) {
try {
tarballInputStream.close();
} catch (Exception ex) {
LOGGER.warn("Unable to close input stream to tarball", ex);
}
}
if (tarFileOutputStream != null) {
try {
tarFileOutputStream.close();
} catch (Exception ex) {
LOGGER.warn("Unable to close tarfile output stream", ex);
}
}
}
return tarFile;
}
private File getStagingDirectory() throws Exception {
File targetDir = new File("target");
if (!targetDir.exists() || !targetDir.isDirectory()) {
// Probably operating from command line. Use temp dir as target
targetDir = new File(System.getProperty("java.io.tmpdir"));
}
File testDir = new File(targetDir, "test");
testDir.mkdirs();
File tempFile = File.createTempFile("flume", "_stage", testDir);
String absFileName = tempFile.getCanonicalPath();
tempFile.delete();
File stageDir = new File(absFileName + "_dir");
if (stageDir.exists()) {
throw new Exception("Stage directory exists: " +
stageDir.getCanonicalPath());
}
stageDir.mkdirs();
LOGGER.info("Staging Directory: " + stageDir.getCanonicalPath());
return stageDir;
}
private String getRelativeTarballPath() throws Exception {
String tarballPath = null;
File dir = new File("..");
while (dir != null && dir.isDirectory()) {
File testFile = new File(dir, "flume-ng-dist/target");
if (testFile.exists() && testFile.isDirectory()) {
LOGGER.info("Found candidate dir: " + testFile.getCanonicalPath());
File[] candidateFiles = testFile.listFiles(new FileFilter() {
@Override
public boolean accept(File pathname) {
String name = pathname.getName();
if (name != null && name.startsWith("apache-flume-")
&& name.endsWith("-bin.tar.gz")) {
return true;
}
return false;
}
});
// There should be at most one
if (candidateFiles != null && candidateFiles.length > 0) {
if (candidateFiles.length == 1) {
// Found it
File file = candidateFiles[0];
if (file.isFile() && file.canRead()) {
tarballPath = file.getCanonicalPath();
LOGGER.info("Found file: " + tarballPath);
break;
} else {
LOGGER.warn("Invalid file: " + file.getCanonicalPath());
}
} else {
StringBuilder sb = new StringBuilder("Multiple candidate tarballs");
sb.append(" found in directory ");
sb.append(testFile.getCanonicalPath()).append(": ");
boolean first = true;
for (File file : candidateFiles) {
if (first) {
first = false;
sb.append(" ");
} else {
sb.append(", ");
}
sb.append(file.getCanonicalPath());
}
sb.append(". All these files will be ignored.");
LOGGER.warn(sb.toString());
}
}
}
dir = dir.getParentFile();
}
return tarballPath;
}
public static void waitUntilPortOpens(String host, int port, long timeout)
throws IOException, InterruptedException {
long startTime = System.currentTimeMillis();
Socket socket;
boolean connected = false;
//See if port has opened for timeout.
while (System.currentTimeMillis() - startTime < timeout) {
try {
socket = new Socket(host, port);
socket.close();
connected = true;
break;
} catch (IOException e) {
Thread.sleep(2000);
}
}
if (!connected) {
throw new IOException("Port not opened within specified timeout.");
}
}
private class ProcessShutdownHook extends Thread {
public void run() {
synchronized (StagedInstall.this) {
if (StagedInstall.this.process != null) {
process.destroy();
}
}
}
}
private static class ProcessInputStreamConsumer extends Thread {
private final InputStream is;
private ProcessInputStreamConsumer(InputStream is) {
this.is = is;
this.setDaemon(true);
}
public void run() {
try {
byte[] buffer = new byte[1024];
int length = 0;
while ((length = is.read(buffer, 0, 1024)) != -1) {
LOGGER.info("[process-out] " + new String(buffer, 0, length));
}
} catch (Exception ex) {
LOGGER.warn("Error while reading process stream", ex);
}
}
}
}
| 9,852 |
0 | Create_ds/flume/flume-ng-tests/src/test/java/org/apache/flume/test | Create_ds/flume/flume-ng-tests/src/test/java/org/apache/flume/test/agent/TestFileChannel.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.test.agent;
import com.google.common.base.Charsets;
import com.google.common.io.Files;
import org.apache.flume.test.util.StagedInstall;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
public class TestFileChannel {
private static final Logger LOGGER = LoggerFactory.getLogger(TestFileChannel.class);
private static final Collection<File> tempResources = new ArrayList<File>();
private Properties agentProps;
private File sinkOutputDir;
@Before
public void setUp() throws Exception {
/* Create 3 temp dirs, each used as value within agentProps */
final File sinkOutputDir = Files.createTempDir();
tempResources.add(sinkOutputDir);
final String sinkOutputDirPath = sinkOutputDir.getCanonicalPath();
LOGGER.info("Created rolling file sink's output dir: " + sinkOutputDirPath);
final File channelCheckpointDir = Files.createTempDir();
tempResources.add(channelCheckpointDir);
final String channelCheckpointDirPath = channelCheckpointDir.getCanonicalPath();
LOGGER.info("Created file channel's checkpoint dir: " + channelCheckpointDirPath);
final File channelDataDir = Files.createTempDir();
tempResources.add(channelDataDir);
final String channelDataDirPath = channelDataDir.getCanonicalPath();
LOGGER.info("Created file channel's data dir: " + channelDataDirPath);
/* Build props to pass to flume agent */
Properties agentProps = new Properties();
// Active sets
agentProps.put("a1.channels", "c1");
agentProps.put("a1.sources", "r1");
agentProps.put("a1.sinks", "k1");
// c1
agentProps.put("a1.channels.c1.type", "FILE");
agentProps.put("a1.channels.c1.checkpointDir", channelCheckpointDirPath);
agentProps.put("a1.channels.c1.dataDirs", channelDataDirPath);
// r1
agentProps.put("a1.sources.r1.channels", "c1");
agentProps.put("a1.sources.r1.type", "EXEC");
agentProps.put("a1.sources.r1.command", "seq 1 100");
// k1
agentProps.put("a1.sinks.k1.channel", "c1");
agentProps.put("a1.sinks.k1.type", "FILE_ROLL");
agentProps.put("a1.sinks.k1.sink.directory", sinkOutputDirPath);
agentProps.put("a1.sinks.k1.sink.rollInterval", "0");
this.agentProps = agentProps;
this.sinkOutputDir = sinkOutputDir;
}
@After
public void tearDown() throws Exception {
StagedInstall.getInstance().stopAgent();
for (File tempResource : tempResources) {
tempResource.delete();
}
agentProps = null;
}
/**
* File channel in/out test. Verifies that all events inserted into the
* file channel are received by the sink in order.
* <p>
* The EXEC source creates 100 events where the event bodies have
* sequential numbers. The source puts those events into the file channel,
* and the FILE_ROLL The sink is expected to take all 100 events in FIFO
* order.
*
* @throws Exception
*/
@Test
public void testInOut() throws Exception {
LOGGER.debug("testInOut() started.");
StagedInstall.getInstance().startAgent("a1", agentProps);
TimeUnit.SECONDS.sleep(10); // Wait for source and sink to finish
// TODO make this more deterministic
/* Create expected output */
StringBuffer sb = new StringBuffer();
for (int i = 1; i <= 100; i++) {
sb.append(i).append("\n");
}
String expectedOutput = sb.toString();
LOGGER.info("Created expected output: " + expectedOutput);
/* Create actual output file */
File[] sinkOutputDirChildren = sinkOutputDir.listFiles();
// Only 1 file should be in FILE_ROLL sink's dir (rolling is disabled)
Assert.assertEquals("Expected FILE_ROLL sink's dir to have only 1 child," +
" but found " + sinkOutputDirChildren.length + " children.",
1, sinkOutputDirChildren.length);
File actualOutput = sinkOutputDirChildren[0];
if (!Files.toString(actualOutput, Charsets.UTF_8).equals(expectedOutput)) {
LOGGER.error("Actual output doesn't match expected output.\n");
throw new AssertionError("FILE_ROLL sink's actual output doesn't " +
"match expected output.");
}
LOGGER.debug("testInOut() ended.");
}
}
| 9,853 |
0 | Create_ds/flume/flume-ng-tests/src/test/java/org/apache/flume/test | Create_ds/flume/flume-ng-tests/src/test/java/org/apache/flume/test/agent/TestSyslogSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.test.agent;
import org.apache.flume.test.util.SyslogAgent;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
@RunWith(Parameterized.class)
public class TestSyslogSource {
private static final Logger LOGGER = LoggerFactory.getLogger(TestSyslogSource.class);
private SyslogAgent agent;
private SyslogAgent.SyslogSourceType sourceType;
public TestSyslogSource(SyslogAgent.SyslogSourceType sourceType) {
this.sourceType = sourceType;
}
@Parameterized.Parameters
public static Collection syslogSourceTypes() {
List<Object[]> sourceTypes = new ArrayList<Object[]>();
for (SyslogAgent.SyslogSourceType sourceType : SyslogAgent.SyslogSourceType.values()) {
sourceTypes.add(new Object[]{sourceType});
}
return sourceTypes;
}
@Before
public void setUp() throws Exception {
agent = new SyslogAgent();
agent.configure(sourceType);
}
@After
public void tearDown() throws Exception {
if (agent != null) {
agent.stop();
agent = null;
}
}
@Test
public void testKeepFields() throws Exception {
LOGGER.debug("testKeepFields() started.");
agent.start("all");
agent.runKeepFieldsTest();
LOGGER.debug("testKeepFields() ended.");
}
@Test
public void testRemoveFields() throws Exception {
LOGGER.debug("testRemoveFields() started.");
agent.start("none");
agent.runKeepFieldsTest();
LOGGER.debug("testRemoveFields() ended.");
}
@Test
public void testKeepTimestampAndHostname() throws Exception {
LOGGER.debug("testKeepTimestampAndHostname() started.");
agent.start("timestamp hostname");
agent.runKeepFieldsTest();
LOGGER.debug("testKeepTimestampAndHostname() ended.");
}
}
| 9,854 |
0 | Create_ds/flume/flume-ng-tests/src/test/java/org/apache/flume/test | Create_ds/flume/flume-ng-tests/src/test/java/org/apache/flume/test/agent/TestSpooldirSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.test.agent;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.io.Files;
import org.apache.flume.test.util.StagedInstall;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* This test creates NUM_SOURCES directories with NUM_FILES_PER_DIRE files in each
* directory. It then starts the agent and waits 10 seconds to check that all
* the files have been consumed. Depending on the speed of the system either
* the wait interval may need to be increased or the number of files reduced.
* This would be better if the test could be signaled when the files have been
* processed.
*/
public class TestSpooldirSource {
private static final Logger LOGGER =
LoggerFactory.getLogger(TestSpooldirSource.class);
private Properties agentProps;
private File sinkOutputDir;
private List<File> spoolDirs = Lists.newArrayList();
@Before
public void setup() throws Exception {
File agentDir = StagedInstall.getInstance().getStageDir();
LOGGER.debug("Using agent stage dir: {}", agentDir);
File testDir = new File(agentDir, TestSpooldirSource.class.getName());
assertTrue(testDir.mkdirs());
File spoolParentDir = new File(testDir, "spools");
assertTrue("Unable to create sink output dir: " + spoolParentDir.getPath(),
spoolParentDir.mkdir());
final int NUM_SOURCES = 30;
agentProps = new Properties();
List<String> spooldirSrcNames = Lists.newArrayList();
String channelName = "mem-01";
// Create source dirs and property file chunks
for (int i = 0; i < NUM_SOURCES; i++) {
String srcName = String.format("spooldir-%03d", i);
File spoolDir = new File(spoolParentDir, srcName);
assertTrue(spoolDir.mkdir());
spooldirSrcNames.add(srcName);
spoolDirs.add(spoolDir);
agentProps.put(String.format("agent.sources.%s.type", srcName),
"SPOOLDIR");
agentProps.put(String.format("agent.sources.%s.spoolDir", srcName),
spoolDir.getPath());
agentProps.put(String.format("agent.sources.%s.channels", srcName),
channelName);
}
// Create the rest of the properties file
agentProps.put("agent.channels.mem-01.type", "MEMORY");
agentProps.put("agent.channels.mem-01.capacity", String.valueOf(100000));
sinkOutputDir = new File(testDir, "out");
assertTrue("Unable to create sink output dir: " + sinkOutputDir.getPath(),
sinkOutputDir.mkdir());
agentProps.put("agent.sinks.roll-01.channel", channelName);
agentProps.put("agent.sinks.roll-01.type", "FILE_ROLL");
agentProps.put("agent.sinks.roll-01.sink.directory", sinkOutputDir.getPath());
agentProps.put("agent.sinks.roll-01.sink.rollInterval", "0");
agentProps.put("agent.sources", Joiner.on(" ").join(spooldirSrcNames));
agentProps.put("agent.channels", channelName);
agentProps.put("agent.sinks", "roll-01");
}
@After
public void teardown() throws Exception {
StagedInstall.getInstance().stopAgent();
}
private String getTestString(int dirNum, int fileNum) {
return String.format("Test dir %03d, test file %03d.\n", dirNum, fileNum);
}
/** Create a bunch of test files. */
private void createInputTestFiles(List<File> spoolDirs, int numFiles, int startNum)
throws IOException {
int numSpoolDirs = spoolDirs.size();
for (int dirNum = 0; dirNum < numSpoolDirs; dirNum++) {
File spoolDir = spoolDirs.get(dirNum);
for (int fileNum = startNum; fileNum < numFiles; fileNum++) {
// Stage the files on what is almost certainly the same FS partition.
File tmp = new File(spoolDir.getParent(), UUID.randomUUID().toString());
Files.append(getTestString(dirNum, fileNum), tmp, Charsets.UTF_8);
File dst = new File(spoolDir, String.format("test-file-%03d", fileNum));
// Ensure we move them into the spool directory atomically, if possible.
assertTrue(String.format("Failed to rename %s to %s", tmp, dst),
tmp.renameTo(dst));
}
}
}
private void validateSeenEvents(File outDir, int outFiles, int dirs, int events)
throws IOException {
File[] sinkOutputDirChildren = outDir.listFiles();
assertEquals("Unexpected number of files in output dir",
outFiles, sinkOutputDirChildren.length);
Set<String> seenEvents = Sets.newHashSet();
for (File outFile : sinkOutputDirChildren) {
List<String> lines = Files.readLines(outFile, Charsets.UTF_8);
for (String line : lines) {
seenEvents.add(line);
}
}
for (int dirNum = 0; dirNum < dirs; dirNum++) {
for (int fileNum = 0; fileNum < events; fileNum++) {
String event = getTestString(dirNum, fileNum).trim();
assertTrue("Missing event: {" + event + "}", seenEvents.contains(event));
}
}
}
@Test
public void testManySpooldirs() throws Exception {
LOGGER.debug("testManySpooldirs() started.");
StagedInstall.getInstance().startAgent("agent", agentProps);
final int NUM_FILES_PER_DIR = 10;
createInputTestFiles(spoolDirs, NUM_FILES_PER_DIR, 0);
TimeUnit.SECONDS.sleep(10); // Wait for sources and sink to process files
// Ensure we received all events.
validateSeenEvents(sinkOutputDir,1, spoolDirs.size(), NUM_FILES_PER_DIR);
LOGGER.debug("Processed all the events!");
LOGGER.debug("testManySpooldirs() ended.");
}
}
| 9,855 |
0 | Create_ds/flume/flume-ng-tests/src/test/java/org/apache/flume/test | Create_ds/flume/flume-ng-tests/src/test/java/org/apache/flume/test/agent/TestConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.test.agent;
import org.apache.commons.io.FileUtils;
import org.apache.flume.test.util.StagedInstall;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.alias.CredentialShell;
import org.apache.hadoop.util.ToolRunner;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.contrib.java.lang.system.EnvironmentVariables;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
import java.util.Properties;
import java.util.Scanner;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestConfig {
private static final Logger LOGGER =
LoggerFactory.getLogger(TestConfig.class);
@ClassRule
public static final EnvironmentVariables environmentVariables
= new EnvironmentVariables();
private Properties agentProps;
private Map<String, String> agentEnv;
private Map<String, String> agentOptions;
private File sinkOutputDir1;
private File sinkOutputDir2;
private File sinkOutputDir3;
private File hadoopCredStore;
@Before
public void setup() throws Exception {
File agentDir = StagedInstall.getInstance().getStageDir();
LOGGER.debug("Using agent stage dir: {}", agentDir);
File testDir = new File(agentDir, TestConfig.class.getName());
if (testDir.exists()) {
FileUtils.deleteDirectory(testDir);
}
assertTrue(testDir.mkdirs());
agentProps = new Properties();
agentEnv = new HashMap<>();
agentOptions = new HashMap<>();
agentOptions.put("-C", getAdditionalClassPath());
// Create the rest of the properties file
agentProps.put("agent.sources.seq-01.type", "seq");
agentProps.put("agent.sources.seq-01.totalEvents", "100");
agentProps.put("agent.sources.seq-01.channels", "mem-01 mem-02 mem-03");
agentProps.put("agent.channels.mem-01.type", "MEMORY");
agentProps.put("agent.channels.mem-01.capacity", String.valueOf(100000));
agentProps.put("agent.channels.mem-02.type", "MEMORY");
agentProps.put("agent.channels.mem-02.capacity", String.valueOf(100000));
agentProps.put("agent.channels.mem-03.type", "MEMORY");
agentProps.put("agent.channels.mem-04.capacity", String.valueOf(100000));
sinkOutputDir1 = new File(testDir, "out1");
assertTrue("Unable to create sink output dir: " + sinkOutputDir1.getPath(),
sinkOutputDir1.mkdir());
sinkOutputDir2 = new File(testDir, "out2");
assertTrue("Unable to create sink output dir: " + sinkOutputDir2.getPath(),
sinkOutputDir2.mkdir());
sinkOutputDir3 = new File(testDir, "out3");
assertTrue("Unable to create sink output dir: " + sinkOutputDir3.getPath(),
sinkOutputDir3.mkdir());
environmentVariables.set("HADOOP_CREDSTORE_PASSWORD", "envSecret");
agentEnv.put("dirname_env", sinkOutputDir1.getAbsolutePath());
agentEnv.put("HADOOP_CREDSTORE_PASSWORD", "envSecret");
hadoopCredStore = new File(testDir, "credstore.jceks");
String providerPath = "jceks://file/" + hadoopCredStore.getAbsolutePath();
ToolRunner.run(
new Configuration(), new CredentialShell(),
("create dirname_hadoop -value " + sinkOutputDir3.getAbsolutePath()
+ " -provider " + providerPath).split(" "));
agentProps.put("agent.sinks.roll-01.channel", "mem-01");
agentProps.put("agent.sinks.roll-01.type", "FILE_ROLL");
agentProps.put("agent.sinks.roll-01.sink.directory", "${filter-01[\"dirname_env\"]}");
agentProps.put("agent.sinks.roll-01.sink.rollInterval", "0");
agentProps.put("agent.sinks.roll-02.channel", "mem-02");
agentProps.put("agent.sinks.roll-02.type", "FILE_ROLL");
agentProps.put("agent.sinks.roll-02.sink.directory",
sinkOutputDir2.getParentFile().getAbsolutePath() + "/${filter-02['out2']}");
agentProps.put("agent.sinks.roll-02.sink.rollInterval", "0");
agentProps.put("agent.sinks.roll-03.channel", "mem-03");
agentProps.put("agent.sinks.roll-03.type", "FILE_ROLL");
agentProps.put("agent.sinks.roll-03.sink.directory", "${filter-03[dirname_hadoop]}");
agentProps.put("agent.sinks.roll-03.sink.rollInterval", "0");
agentProps.put("agent.configfilters.filter-01.type", "env");
agentProps.put("agent.configfilters.filter-02.type", "external");
agentProps.put("agent.configfilters.filter-02.command", "echo");
agentProps.put("agent.configfilters.filter-03.type", "hadoop");
agentProps.put("agent.configfilters.filter-03.credential.provider.path", providerPath);
agentProps.put("agent.sources", "seq-01");
agentProps.put("agent.channels", "mem-01 mem-02 mem-03");
agentProps.put("agent.sinks", "roll-01 roll-02 roll-03");
agentProps.put("agent.configfilters", "filter-01 filter-02 filter-03");
}
private String getAdditionalClassPath() throws Exception {
URL resource = this.getClass().getClassLoader().getResource("classpath.txt");
Path path = Paths.get(Objects.requireNonNull(resource).getPath());
return Files.readAllLines(path).stream().findFirst().orElse("");
}
@After
public void teardown() throws Exception {
StagedInstall.getInstance().stopAgent();
}
private void validateSeenEvents(File outDir, int outFiles, int events)
throws IOException {
File[] sinkOutputDirChildren = outDir.listFiles();
assertEquals("Unexpected number of files in output dir",
outFiles, sinkOutputDirChildren.length);
Set<String> seenEvents = new HashSet<>();
for (File outFile : sinkOutputDirChildren) {
Scanner scanner = new Scanner(outFile);
while (scanner.hasNext()) {
seenEvents.add(scanner.nextLine());
}
}
for (int event = 0; event < events; event++) {
assertTrue(
"Missing event: {" + event + "}",
seenEvents.contains(String.valueOf(event))
);
}
}
@Test
public void testConfigReplacement() throws Exception {
LOGGER.debug("testConfigReplacement() started.");
StagedInstall.getInstance().startAgent("agent", agentProps, agentEnv, agentOptions);
TimeUnit.SECONDS.sleep(10); // Wait for sources and sink to process files
// Ensure we received all events.
validateSeenEvents(sinkOutputDir1, 1, 100);
validateSeenEvents(sinkOutputDir2, 1, 100);
validateSeenEvents(sinkOutputDir3, 1, 100);
LOGGER.debug("Processed all the events!");
LOGGER.debug("testConfigReplacement() ended.");
}
@Test
public void testConfigReload() throws Exception {
LOGGER.debug("testConfigReplacement() started.");
agentProps.put("agent.channels.mem-01.transactionCapacity", "10");
agentProps.put("agent.sinks.roll-01.sink.batchSize", "20");
StagedInstall.getInstance().startAgent("agent", agentProps, agentEnv, agentOptions);
TimeUnit.SECONDS.sleep(10); // Wait for sources and sink to process files
// This directory is empty due to misconfiguration
validateSeenEvents(sinkOutputDir1, 0, 0);
// These are well configured
validateSeenEvents(sinkOutputDir2, 1, 100);
validateSeenEvents(sinkOutputDir3, 1, 100);
LOGGER.debug("Processed all the events!");
//repair the config
agentProps.put("agent.channels.mem-01.transactionCapacity", "20");
StagedInstall.getInstance().reconfigure(agentProps);
TimeUnit.SECONDS.sleep(40); // Wait for sources and sink to process files
// Ensure we received all events.
validateSeenEvents(sinkOutputDir1, 1, 100);
LOGGER.debug("testConfigReplacement() ended.");
}
}
| 9,856 |
0 | Create_ds/flume/flume-ng-tests/src/test/java/org/apache/flume/test | Create_ds/flume/flume-ng-tests/src/test/java/org/apache/flume/test/agent/TestRpcClientCommunicationFailure.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.test.agent;
import junit.framework.Assert;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.api.RpcClient;
import org.apache.flume.api.RpcClientFactory;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.test.util.StagedInstall;
import org.junit.Test;
public class TestRpcClientCommunicationFailure {
public static final String CONFIG_FILE_PRCCLIENT_TEST =
"rpc-client-test.properties";
@Test
public void testFailure() throws Exception {
try {
int port = StagedInstall.getInstance().startAgent(
"rpccagent", CONFIG_FILE_PRCCLIENT_TEST);
StagedInstall.waitUntilPortOpens("localhost", port, 20000);
RpcClient client = RpcClientFactory.getDefaultInstance(
"localhost", port);
String[] text = {"foo", "bar", "xyz", "abc"};
for (String str : text) {
client.append(EventBuilder.withBody(str.getBytes()));
}
// Stop the agent
StagedInstall.getInstance().stopAgent();
// Try sending the event which should fail
try {
client.append(EventBuilder.withBody("test".getBytes()));
Assert.fail("EventDeliveryException expected but not raised");
} catch (EventDeliveryException ex) {
System.out.println("Attempting to close client");
client.close();
}
} finally {
if (StagedInstall.getInstance().isRunning()) {
StagedInstall.getInstance().stopAgent();
}
}
}
}
| 9,857 |
0 | Create_ds/flume/flume-ng-tests/src/test/java/org/apache/flume/test | Create_ds/flume/flume-ng-tests/src/test/java/org/apache/flume/test/agent/TestRpcClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.test.agent;
import org.apache.flume.api.RpcClient;
import org.apache.flume.api.RpcClientFactory;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.test.util.StagedInstall;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestRpcClient {
public static final String CONFIG_FILE_PRCCLIENT_TEST =
"rpc-client-test.properties";
private int port;
@Before
public void setUp() throws Exception {
port = StagedInstall.getInstance().startAgent(
"rpccagent", CONFIG_FILE_PRCCLIENT_TEST);
}
@After
public void tearDown() throws Exception {
StagedInstall.getInstance().stopAgent();
}
@Test
public void testRpcClient() throws Exception {
StagedInstall.waitUntilPortOpens("localhost", port, 20000);
RpcClient client = RpcClientFactory.getDefaultInstance("localhost", port);
String[] text = {"foo", "bar", "xyz", "abc"};
for (String str : text) {
client.append(EventBuilder.withBody(str.getBytes()));
}
}
}
| 9,858 |
0 | Create_ds/flume/flume-ng-tests/src/main/java/org/apache | Create_ds/flume/flume-ng-tests/src/main/java/org/apache/flume/Dummy.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume;
/**
* Placeholder class so the git mirror does not leave out the src/main/java dir
*/
public class Dummy {
public Dummy() {
throw new UnsupportedOperationException("Dummy class meant for use");
}
}
| 9,859 |
0 | Create_ds/flume/flume-ng-auth/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-auth/src/test/java/org/apache/flume/auth/TestFlumeAuthenticator.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.auth;
import org.apache.hadoop.minikdc.MiniKdc;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.Properties;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class TestFlumeAuthenticator {
private static MiniKdc kdc;
private static File workDir;
private static File flumeKeytab;
private static String flumePrincipal = "flume/localhost";
private static File aliceKeytab;
private static String alicePrincipal = "alice";
private static Properties conf;
@BeforeClass
public static void startMiniKdc() throws Exception {
workDir = new File(System.getProperty("test.dir", "target"),
TestFlumeAuthenticator.class.getSimpleName());
flumeKeytab = new File(workDir, "flume.keytab");
aliceKeytab = new File(workDir, "alice.keytab");
conf = MiniKdc.createConf();
kdc = new MiniKdc(conf, workDir);
kdc.start();
kdc.createPrincipal(flumeKeytab, flumePrincipal);
flumePrincipal = flumePrincipal + "@" + kdc.getRealm();
kdc.createPrincipal(aliceKeytab, alicePrincipal);
alicePrincipal = alicePrincipal + "@" + kdc.getRealm();
}
@AfterClass
public static void stopMiniKdc() {
if (kdc != null) {
kdc.stop();
}
}
@After
public void tearDown() {
// Clear the previous statically stored logged in credentials
FlumeAuthenticationUtil.clearCredentials();
}
@Test
public void testNullLogin() throws IOException {
String principal = null;
String keytab = null;
FlumeAuthenticator authenticator = FlumeAuthenticationUtil.getAuthenticator(
principal, keytab);
assertFalse(authenticator.isAuthenticated());
}
@Test
public void testFlumeLogin() throws IOException {
String principal = flumePrincipal;
String keytab = flumeKeytab.getAbsolutePath();
String expResult = principal;
FlumeAuthenticator authenticator = FlumeAuthenticationUtil.getAuthenticator(
principal, keytab);
assertTrue(authenticator.isAuthenticated());
String result = ((KerberosAuthenticator)authenticator).getUserName();
assertEquals("Initial login failed", expResult, result);
authenticator = FlumeAuthenticationUtil.getAuthenticator(
principal, keytab);
result = ((KerberosAuthenticator)authenticator).getUserName();
assertEquals("Re-login failed", expResult, result);
principal = alicePrincipal;
keytab = aliceKeytab.getAbsolutePath();
try {
authenticator = FlumeAuthenticationUtil.getAuthenticator(
principal, keytab);
result = ((KerberosAuthenticator)authenticator).getUserName();
fail("Login should have failed with a new principal: " + result);
} catch (Exception ex) {
assertTrue("Login with a new principal failed, but for an unexpected "
+ "reason: " + ex.getMessage(),
ex.getMessage().contains("Cannot use multiple kerberos principals"));
}
}
/**
* Test whether the exception raised in the <code>PrivilegedExceptionAction</code> gets
* propagated as-is from {@link KerberosAuthenticator#execute(PrivilegedExceptionAction)}.
*/
@Test(expected = IOException.class)
public void testKerberosAuthenticatorExceptionInExecute() throws Exception {
String principal = flumePrincipal;
String keytab = flumeKeytab.getAbsolutePath();
FlumeAuthenticator authenticator = FlumeAuthenticationUtil.getAuthenticator(principal, keytab);
assertTrue(authenticator instanceof KerberosAuthenticator);
authenticator.execute(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
throw new IOException();
}
});
}
/**
* Test whether the exception raised in the <code>PrivilegedExceptionAction</code> gets
* propagated as-is from {@link SimpleAuthenticator#execute(PrivilegedExceptionAction)}.
*/
@Test(expected = IOException.class)
public void testSimpleAuthenticatorExceptionInExecute() throws Exception {
FlumeAuthenticator authenticator = FlumeAuthenticationUtil.getAuthenticator(null, null);
assertTrue(authenticator instanceof SimpleAuthenticator);
authenticator.execute(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
throw new IOException();
}
});
}
@Test
public void testProxyAs() throws IOException {
String username = "alice";
String expResult = username;
FlumeAuthenticator authenticator = FlumeAuthenticationUtil.getAuthenticator(
null, null);
String result = ((UGIExecutor)(authenticator.proxyAs(username))).getUserName();
assertEquals("Proxy as didn't generate the expected username", expResult, result);
authenticator = FlumeAuthenticationUtil.getAuthenticator(
flumePrincipal, flumeKeytab.getAbsolutePath());
String login = ((KerberosAuthenticator)authenticator).getUserName();
assertEquals("Login succeeded, but the principal doesn't match",
flumePrincipal, login);
result = ((UGIExecutor)(authenticator.proxyAs(username))).getUserName();
assertEquals("Proxy as didn't generate the expected username", expResult, result);
}
@Test
public void testFlumeLoginPrincipalWithoutRealm() throws Exception {
String principal = "flume";
File keytab = new File(workDir, "flume2.keytab");
kdc.createPrincipal(keytab, principal);
String expResult = principal + "@" + kdc.getRealm();
FlumeAuthenticator authenticator = FlumeAuthenticationUtil.getAuthenticator(
principal, keytab.getAbsolutePath());
assertTrue(authenticator.isAuthenticated());
String result = ((KerberosAuthenticator)authenticator).getUserName();
assertEquals("Initial login failed", expResult, result);
authenticator = FlumeAuthenticationUtil.getAuthenticator(
principal, keytab.getAbsolutePath());
result = ((KerberosAuthenticator)authenticator).getUserName();
assertEquals("Re-login failed", expResult, result);
principal = "alice";
keytab = aliceKeytab;
try {
authenticator = FlumeAuthenticationUtil.getAuthenticator(
principal, keytab.getAbsolutePath());
result = ((KerberosAuthenticator)authenticator).getUserName();
fail("Login should have failed with a new principal: " + result);
} catch (Exception ex) {
assertTrue("Login with a new principal failed, but for an unexpected "
+ "reason: " + ex.getMessage(),
ex.getMessage().contains("Cannot use multiple kerberos principals"));
}
}
}
| 9,860 |
0 | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume/auth/FlumeAuthenticationUtil.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.auth;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.security.SaslRpcServer;
import org.apache.hadoop.security.SecurityUtil;
import javax.security.auth.callback.CallbackHandler;
import java.io.IOException;
/**
* FlumeAuthentication utility class that provides methods to get an
* Authenticator. If proper credentials are provided KerberosAuthenticator is
* returned which can be used to execute as the authenticated principal ,
* or else a SimpleAuthenticator which executes without any authentication
*/
public class FlumeAuthenticationUtil {
private FlumeAuthenticationUtil() {
}
private static KerberosAuthenticator kerbAuthenticator;
/**
* If principal and keytab are null, this method returns a SimpleAuthenticator
* which executes without authentication. If valid credentials are
* provided KerberosAuthenitcator is returned which can be used to execute as
* the authenticated principal. Invalid credentials result in
* IllegalArgumentException and Failure to authenticate results in SecurityException
*
* @param principal
* @param keytab
* @return FlumeAuthenticator
*
* @throws org.apache.flume.auth.SecurityException
*/
public static synchronized FlumeAuthenticator getAuthenticator(
String principal, String keytab) throws SecurityException {
if (principal == null && keytab == null) {
return SimpleAuthenticator.getSimpleAuthenticator();
}
Preconditions.checkArgument(principal != null,
"Principal can not be null when keytab is provided");
Preconditions.checkArgument(keytab != null,
"Keytab can not be null when Principal is provided");
if (kerbAuthenticator == null) {
kerbAuthenticator = new KerberosAuthenticator();
}
kerbAuthenticator.authenticate(principal, keytab);
return kerbAuthenticator;
}
/**
* Returns the standard SaslGssCallbackHandler from the hadoop common module
*
* @return CallbackHandler
*/
public static CallbackHandler getSaslGssCallbackHandler() {
return new SaslRpcServer.SaslGssCallbackHandler();
}
/**
* Resolves the principal using Hadoop common's SecurityUtil and splits
* the kerberos principal into three parts user name, host and kerberos realm
*
* @param principal
* @return String[] of username, hostname and kerberos realm
* @throws IOException
*/
public static String[] splitKerberosName(String principal) throws IOException {
String resolvedPrinc = SecurityUtil.getServerPrincipal(principal, "");
return SaslRpcServer.splitKerberosName(resolvedPrinc);
}
@VisibleForTesting
static void clearCredentials() {
kerbAuthenticator = null;
}
}
| 9,861 |
0 | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume/auth/SecurityException.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.auth;
/**
* SecurityException thrown in the Flume security module
*/
public class SecurityException extends RuntimeException {
public SecurityException(String message) {
super(message);
}
public SecurityException(String message, Throwable cause) {
super(message, cause);
}
public SecurityException(Throwable cause) {
super(cause);
}
}
| 9,862 |
0 | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume/auth/KerberosAuthenticator.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.auth;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
/**
* A kerberos authenticator, which authenticates using the supplied principal
* and keytab and executes with authenticated privileges
*/
class KerberosAuthenticator implements FlumeAuthenticator {
private static final Logger LOG = LoggerFactory
.getLogger(KerberosAuthenticator.class);
private volatile UserGroupInformation ugi;
private volatile KerberosUser prevUser;
private volatile PrivilegedExecutor privilegedExecutor;
private Map<String, PrivilegedExecutor> proxyCache = new HashMap<String, PrivilegedExecutor>();
@Override
public <T> T execute(PrivilegedAction<T> action) {
return privilegedExecutor.execute(action);
}
@Override
public <T> T execute(PrivilegedExceptionAction<T> action) throws Exception {
return privilegedExecutor.execute(action);
}
@Override
public synchronized PrivilegedExecutor proxyAs(String proxyUserName) {
if (proxyUserName == null || proxyUserName.isEmpty()) {
return this;
}
if (proxyCache.get(proxyUserName) == null) {
UserGroupInformation proxyUgi;
proxyUgi = UserGroupInformation.createProxyUser(proxyUserName, ugi);
printUGI(proxyUgi);
proxyCache.put(proxyUserName, new UGIExecutor(proxyUgi));
}
return proxyCache.get(proxyUserName);
}
@Override
public boolean isAuthenticated() {
return true;
}
/**
* When valid principal and keytab are provided and if authentication has
* not yet been done for this object, this method authenticates the
* credentials and populates the ugi. In case of null or invalid credentials
* IllegalArgumentException is thrown. In case of failure to authenticate,
* SecurityException is thrown. If authentication has already happened on
* this KerberosAuthenticator object, then this method checks to see if the current
* credentials passed are same as the validated credentials. If not, it throws
* an exception as this authenticator can represent only one Principal.
*
* @param principal
* @param keytab
*/
public synchronized void authenticate(String principal, String keytab) {
// sanity checking
Preconditions.checkArgument(principal != null && !principal.isEmpty(),
"Invalid Kerberos principal: " + String.valueOf(principal));
Preconditions.checkArgument(keytab != null && !keytab.isEmpty(),
"Invalid Kerberos keytab: " + String.valueOf(keytab));
File keytabFile = new File(keytab);
Preconditions.checkArgument(keytabFile.isFile() && keytabFile.canRead(),
"Keytab is not a readable file: " + String.valueOf(keytab));
// resolve the requested principal
String resolvedPrincipal;
try {
// resolves _HOST pattern using standard Hadoop search/replace
// via DNS lookup when 2nd argument is empty
resolvedPrincipal = SecurityUtil.getServerPrincipal(principal, "");
} catch (IOException e) {
throw new IllegalArgumentException("Host lookup error resolving kerberos principal ("
+ principal + "). Exception follows.", e);
}
Preconditions.checkNotNull(resolvedPrincipal,
"Resolved Principal must not be null");
// be cruel and unusual when user tries to login as multiple principals
// this isn't really valid with a reconfigure but this should be rare
// enough to warrant a restart of the agent JVM
// TODO: find a way to interrogate the entire current config state,
// since we don't have to be unnecessarily protective if they switch all
// HDFS sinks to use a different principal all at once.
KerberosUser newUser = new KerberosUser(resolvedPrincipal, keytab);
Preconditions.checkState(prevUser == null || prevUser.equals(newUser),
"Cannot use multiple kerberos principals in the same agent. " +
" Must restart agent to use new principal or keytab. " +
"Previous = %s, New = %s", prevUser, newUser);
// enable the kerberos mode of UGI, before doing anything else
if (!UserGroupInformation.isSecurityEnabled()) {
Configuration conf = new Configuration(false);
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
}
// We are interested in currently logged in user with kerberos creds
UserGroupInformation curUser = null;
try {
curUser = UserGroupInformation.getLoginUser();
if (curUser != null && !curUser.hasKerberosCredentials()) {
curUser = null;
}
} catch (IOException e) {
LOG.warn("User unexpectedly had no active login. Continuing with " +
"authentication", e);
}
/*
* if ugi is not null,
* if ugi matches currently logged in kerberos user, we are good
* else we are logged out, so relogin our ugi
* else if ugi is null, login and populate state
*/
try {
if (ugi != null) {
if (curUser != null && curUser.getUserName().equals(ugi.getUserName())) {
LOG.debug("Using existing principal login: {}", ugi);
} else {
LOG.info("Attempting kerberos Re-login as principal ({}) ",
new Object[] { ugi.getUserName() } );
ugi.reloginFromKeytab();
}
} else {
LOG.info("Attempting kerberos login as principal ({}) from keytab " +
"file ({})", new Object[] { resolvedPrincipal, keytab } );
UserGroupInformation.loginUserFromKeytab(resolvedPrincipal, keytab);
this.ugi = UserGroupInformation.getLoginUser();
this.prevUser = new KerberosUser(resolvedPrincipal, keytab);
this.privilegedExecutor = new UGIExecutor(this.ugi);
}
} catch (IOException e) {
throw new SecurityException("Authentication error while attempting to "
+ "login as kerberos principal (" + resolvedPrincipal + ") using "
+ "keytab (" + keytab + "). Exception follows.", e);
}
printUGI(this.ugi);
}
private void printUGI(UserGroupInformation ugi) {
if (ugi != null) {
// dump login information
AuthenticationMethod authMethod = ugi.getAuthenticationMethod();
LOG.info("\n{} \nUser: {} \nAuth method: {} \nKeytab: {} \n",
new Object[] {
authMethod.equals(AuthenticationMethod.PROXY) ? "Proxy as: " : "Logged as: ",
ugi.getUserName(), authMethod, ugi.isFromKeytab()
}
);
}
}
/**
* startCredentialRefresher should be used only for long running
* methods like Thrift source. For all privileged methods that use a UGI, the
* credentials are checked automatically and refreshed before the
* privileged method is executed in the UGIExecutor
*/
@Override
public void startCredentialRefresher() {
int CHECK_TGT_INTERVAL = 120; // seconds
ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
scheduler.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
try {
ugi.checkTGTAndReloginFromKeytab();
} catch (IOException e) {
LOG.warn("Error occured during checkTGTAndReloginFromKeytab() for user " +
ugi.getUserName(), e);
}
}
}, CHECK_TGT_INTERVAL, CHECK_TGT_INTERVAL, TimeUnit.SECONDS);
}
@VisibleForTesting
String getUserName() {
if (ugi != null) {
return ugi.getUserName();
} else {
return null;
}
}
}
| 9,863 |
0 | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume/auth/KerberosUser.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.flume.auth;
/**
* Simple Pair class used to define a unique (principal, keyTab) combination.
*/
public class KerberosUser {
private final String principal;
private final String keyTab;
public KerberosUser(String principal, String keyTab) {
this.principal = principal;
this.keyTab = keyTab;
}
public String getPrincipal() {
return principal;
}
public String getKeyTab() {
return keyTab;
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final KerberosUser other = (KerberosUser) obj;
if ((this.principal == null) ?
(other.principal != null) :
!this.principal.equals(other.principal)) {
return false;
}
if ((this.keyTab == null) ? (other.keyTab != null) : !this.keyTab.equals(other.keyTab)) {
return false;
}
return true;
}
@Override
public int hashCode() {
int hash = 7;
hash = 41 * hash + (this.principal != null ? this.principal.hashCode() : 0);
hash = 41 * hash + (this.keyTab != null ? this.keyTab.hashCode() : 0);
return hash;
}
@Override
public String toString() {
return "{ principal: " + principal + ", keytab: " + keyTab + " }";
}
}
| 9,864 |
0 | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume/auth/UGIExecutor.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.auth;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import java.io.IOException;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
class UGIExecutor implements PrivilegedExecutor {
private UserGroupInformation ugi;
private static final long MIN_TIME_BEFORE_RELOGIN = 5 * 60 * 1000L;
private volatile long lastReloginAttempt = 0;
UGIExecutor(UserGroupInformation ugi) {
this.ugi = ugi;
}
@Override
public <T> T execute(PrivilegedAction<T> action) {
ensureValidAuth();
return ugi.doAs(action);
}
@Override
public <T> T execute(PrivilegedExceptionAction<T> action) throws Exception {
ensureValidAuth();
return ugi.doAs(action);
}
private void ensureValidAuth() {
reloginUGI(ugi);
if (ugi.getAuthenticationMethod().equals(AuthenticationMethod.PROXY)) {
reloginUGI(ugi.getRealUser());
}
}
/*
* lastReloginAttempt is introduced to avoid making the synchronized call
* ugi.checkTGTAndReloginFromKeytab() often, Hence this method is
* intentionally not synchronized, so that multiple threads can execute without
* the need to lock, which may result in an edge case where multiple threads
* simultaneously reading the lastReloginAttempt, and finding it > 5 minutes, can
* result in all of them attempting the checkTGT method, which is fine
*/
private void reloginUGI(UserGroupInformation ugi) {
try {
if (ugi.hasKerberosCredentials()) {
long now = System.currentTimeMillis();
if (now - lastReloginAttempt < MIN_TIME_BEFORE_RELOGIN) {
return;
}
lastReloginAttempt = now;
ugi.checkTGTAndReloginFromKeytab();
}
} catch (IOException e) {
throw new SecurityException("Error trying to relogin from keytab for user "
+ ugi.getUserName(), e);
}
}
@VisibleForTesting
String getUserName() {
if (ugi != null) {
return ugi.getUserName();
} else {
return null;
}
}
}
| 9,865 |
0 | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume/auth/FlumeAuthenticator.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.auth;
/**
* FlumeAuthenticator extends on a PrivilegedExecutor providing capabilities to
* proxy as a different user
*/
public interface FlumeAuthenticator extends PrivilegedExecutor {
/**
* Returns the current instance if proxyUsername is null or
* returns the proxied Executor if proxyUserName is valid
* @param proxyUserName
* @return PrivilegedExecutor
*/
public PrivilegedExecutor proxyAs(String proxyUserName);
/**
* Returns true, if the underlying Authenticator was obtained by
* successful kerberos authentication
* @return boolean
*/
public boolean isAuthenticated();
/**
* For Authenticators backed by credentials, this method refreshes the
* credentials periodically
*/
public void startCredentialRefresher();
}
| 9,866 |
0 | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume/auth/SimpleAuthenticator.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.auth;
import org.apache.hadoop.security.UserGroupInformation;
import java.io.IOException;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.Map;
/**
* A no-op authenticator, which does not authenticate and executes
* without any authenticated privileges
*/
class SimpleAuthenticator implements FlumeAuthenticator {
private SimpleAuthenticator() {
}
private static class SimpleAuthenticatorHolder {
public static SimpleAuthenticator authenticator = new SimpleAuthenticator();
}
public static SimpleAuthenticator getSimpleAuthenticator() {
return SimpleAuthenticatorHolder.authenticator;
}
private Map<String, PrivilegedExecutor> proxyCache = new HashMap<String, PrivilegedExecutor>();
@Override
public <T> T execute(PrivilegedExceptionAction<T> action)
throws Exception {
return action.run();
}
@Override
public <T> T execute(PrivilegedAction<T> action) {
return action.run();
}
@Override
public synchronized PrivilegedExecutor proxyAs(String proxyUserName) {
if (proxyUserName == null || proxyUserName.isEmpty()) {
return this;
}
if (proxyCache.get(proxyUserName) == null) {
UserGroupInformation proxyUgi;
try {
proxyUgi = UserGroupInformation.createProxyUser(proxyUserName,
UserGroupInformation.getCurrentUser());
} catch (IOException e) {
throw new SecurityException("Unable to create proxy User", e);
}
proxyCache.put(proxyUserName, new UGIExecutor(proxyUgi));
}
return proxyCache.get(proxyUserName);
}
@Override
public boolean isAuthenticated() {
return false;
}
@Override
public void startCredentialRefresher() {
// no-op
}
}
| 9,867 |
0 | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume/auth/PrivilegedExecutor.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.auth;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
/**
* PrivilegedExecutor provides the ability to execute a PrivilegedAction
* or a PrivilegedExceptionAction. Implementors of this class, can chose to execute
* in normal mode or secure authenticated mode
*/
public interface PrivilegedExecutor {
/**
* This method is used to execute a privileged action, the implementor can
* chose to execute the action using the appropriate privileges
*
* @param action A PrivilegedExceptionAction to perform as the desired user
* @param <T> The return type of the action
* @return T the T value returned by action.run()
* @throws Exception
*/
public <T> T execute(PrivilegedExceptionAction<T> action) throws Exception;
/**
* This method is used to execute a privileged action, the implementor can
* chose to execute the action using the appropriate privileges
*
* @param action A PrivilegedAction to perform as the desired user
* @param <T> The return type of the action
* @return T the T value returned by action.run()
*/
public <T> T execute(PrivilegedAction<T> action);
}
| 9,868 |
0 | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume/api/SecureThriftRpcClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import org.apache.flume.FlumeException;
import org.apache.flume.auth.FlumeAuthenticationUtil;
import org.apache.flume.auth.FlumeAuthenticator;
import org.apache.flume.auth.PrivilegedExecutor;
import org.apache.thrift.transport.TSaslClientTransport;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
import javax.security.auth.callback.CallbackHandler;
import javax.security.sasl.Sasl;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
public class SecureThriftRpcClient extends ThriftRpcClient {
private static final String CLIENT_PRINCIPAL = "client-principal";
private static final String CLIENT_KEYTAB = "client-keytab";
private static final String SERVER_PRINCIPAL = "server-principal";
private String serverPrincipal;
private FlumeAuthenticator privilegedExecutor;
@Override
protected void configure(Properties properties) throws FlumeException {
super.configure(properties);
serverPrincipal = properties.getProperty(SERVER_PRINCIPAL);
if (serverPrincipal == null || serverPrincipal.isEmpty()) {
throw new IllegalArgumentException("Flume in secure mode, but Flume config doesn't "
+ "specify a server principal to use for Kerberos auth.");
}
String clientPrincipal = properties.getProperty(CLIENT_PRINCIPAL);
String keytab = properties.getProperty(CLIENT_KEYTAB);
this.privilegedExecutor = FlumeAuthenticationUtil.getAuthenticator(clientPrincipal, keytab);
if (!privilegedExecutor.isAuthenticated()) {
throw new FlumeException("Authentication failed in Kerberos mode for " +
"principal " + clientPrincipal + " keytab " + keytab);
}
}
@Override
protected TTransport getTransport(TSocket tsocket) throws Exception {
Map<String, String> saslProperties = new HashMap<String, String>();
saslProperties.put(Sasl.QOP, "auth");
String[] names;
try {
names = FlumeAuthenticationUtil.splitKerberosName(serverPrincipal);
} catch (IOException e) {
throw new FlumeException(
"Error while trying to resolve Principal name - " + serverPrincipal, e);
}
return new UgiSaslClientTransport(
"GSSAPI", null, names[0], names[1], saslProperties, null, tsocket, privilegedExecutor);
}
/**
* This transport wraps the Sasl transports to set up the right UGI context for open().
*/
public static class UgiSaslClientTransport extends TSaslClientTransport {
PrivilegedExecutor privilegedExecutor;
public UgiSaslClientTransport(String mechanism, String authorizationId,
String protocol, String serverName, Map<String, String> props,
CallbackHandler cbh, TTransport transport, PrivilegedExecutor privilegedExecutor)
throws IOException, TTransportException {
super(mechanism, authorizationId, protocol, serverName, props, cbh, transport);
this.privilegedExecutor = privilegedExecutor;
}
/**
* Open the SASL transport with using the current UserGroupInformation.
* This is needed to get the current login context stored
*/
@Override
public void open() throws FlumeException {
try {
this.privilegedExecutor.execute(
new PrivilegedExceptionAction<Void>() {
public Void run() throws FlumeException {
// this is a workaround to using UgiSaslClientTransport.super.open()
// which results in IllegalAccessError
callSuperClassOpen();
return null;
}
});
} catch (InterruptedException e) {
throw new FlumeException("Interrupted while opening underlying transport", e);
} catch (Exception e) {
throw new FlumeException("Failed to open SASL transport", e);
}
}
private void callSuperClassOpen() throws FlumeException {
try {
super.open();
} catch (TTransportException e) {
throw new FlumeException("Failed to open SASL transport", e);
}
}
}
}
| 9,869 |
0 | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-auth/src/main/java/org/apache/flume/api/SecureRpcClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import java.util.Properties;
/**
* Factory class to construct Flume {@link RPCClient} implementations.
*/
public class SecureRpcClientFactory {
/**
* Return a secure {@linkplain org.apache.flume.api.RpcClient} that uses Thrift for communicating
* with the next hop.
* @param props
* @return - An {@linkplain org.apache.flume.api.RpcClient} which uses thrift configured with the
* given parameters.
*/
public static RpcClient getThriftInstance(Properties props) {
ThriftRpcClient client = new SecureThriftRpcClient();
client.configure(props);
return client;
}
}
| 9,870 |
0 | Create_ds/flume/flume-ng-embedded-agent/src/test/java/org/apache/flume/agent | Create_ds/flume/flume-ng-embedded-agent/src/test/java/org/apache/flume/agent/embedded/TestEmbeddedAgentConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.agent.embedded;
import java.util.Map;
import junit.framework.Assert;
import org.apache.flume.FlumeException;
import org.junit.Before;
import org.junit.Test;
import com.google.common.collect.Maps;
public class TestEmbeddedAgentConfiguration {
private Map<String, String> properties;
@Before
public void setUp() throws Exception {
properties = Maps.newHashMap();
properties.put("source.type", EmbeddedAgentConfiguration.SOURCE_TYPE_EMBEDDED);
properties.put("channel.type", "memory");
properties.put("channel.capacity", "200");
properties.put("sinks", "sink1 sink2");
properties.put("sink1.type", "avro");
properties.put("sink2.type", "avro");
properties.put("sink1.hostname", "sink1.host");
properties.put("sink1.port", "2");
properties.put("sink2.hostname", "sink2.host");
properties.put("sink2.port", "2");
properties.put("processor.type", "load_balance");
properties.put("source.interceptors", "i1");
properties.put("source.interceptors.i1.type", "timestamp");
}
@Test
public void testFullSourceType() throws Exception {
doTestExcepted(EmbeddedAgentConfiguration.configure("test1", properties));
}
@Test
public void testMissingSourceType() throws Exception {
Assert.assertNotNull(properties.remove("source.type"));
doTestExcepted(EmbeddedAgentConfiguration.configure("test1", properties));
}
@Test
public void testShortSourceType() throws Exception {
properties.put("source.type", "EMBEDDED");
doTestExcepted(EmbeddedAgentConfiguration.configure("test1", properties));
}
public void doTestExcepted(Map<String, String> actual) throws Exception {
Map<String, String> expected = Maps.newHashMap();
expected.put("test1.channels", "channel-test1");
expected.put("test1.channels.channel-test1.capacity", "200");
expected.put("test1.channels.channel-test1.type", "memory");
expected.put("test1.sinkgroups", "sink-group-test1");
expected.put("test1.sinkgroups.sink-group-test1.processor.type", "load_balance");
expected.put("test1.sinkgroups.sink-group-test1.sinks", "sink1 sink2");
expected.put("test1.sinks", "sink1 sink2");
expected.put("test1.sinks.sink1.channel", "channel-test1");
expected.put("test1.sinks.sink1.hostname", "sink1.host");
expected.put("test1.sinks.sink1.port", "2");
expected.put("test1.sinks.sink1.type", "avro");
expected.put("test1.sinks.sink2.channel", "channel-test1");
expected.put("test1.sinks.sink2.hostname", "sink2.host");
expected.put("test1.sinks.sink2.port", "2");
expected.put("test1.sinks.sink2.type", "avro");
expected.put("test1.sources", "source-test1");
expected.put("test1.sources.source-test1.channels", "channel-test1");
expected.put("test1.sources.source-test1.type",
EmbeddedAgentConfiguration.SOURCE_TYPE_EMBEDDED);
expected.put("test1.sources.source-test1.interceptors", "i1");
expected.put("test1.sources.source-test1.interceptors.i1.type", "timestamp");
Assert.assertEquals(expected, actual);
}
@Test(expected = FlumeException.class)
public void testBadSource() throws Exception {
properties.put("source.type", "exec");
EmbeddedAgentConfiguration.configure("test1", properties);
}
@Test(expected = FlumeException.class)
public void testBadChannel() throws Exception {
properties.put("channel.type", "jdbc");
EmbeddedAgentConfiguration.configure("test1", properties);
}
@Test(expected = FlumeException.class)
public void testBadSink() throws Exception {
properties.put("sink1.type", "hbase");
EmbeddedAgentConfiguration.configure("test1", properties);
}
@Test(expected = FlumeException.class)
public void testBadSinkProcessor() throws Exception {
properties.put("processor.type", "bad");
EmbeddedAgentConfiguration.configure("test1", properties);
}
@Test(expected = FlumeException.class)
public void testNoChannel() throws Exception {
properties.remove("channel.type");
EmbeddedAgentConfiguration.configure("test1", properties);
}
@Test(expected = FlumeException.class)
public void testNoSink() throws Exception {
properties.remove("sink2.type");
EmbeddedAgentConfiguration.configure("test1", properties);
}
@Test(expected = FlumeException.class)
public void testNoSinkProcessor() throws Exception {
properties.remove("processor.type");
EmbeddedAgentConfiguration.configure("test1", properties);
}
@Test(expected = FlumeException.class)
public void testBadKey() throws Exception {
properties.put("bad.key.name", "bad");
EmbeddedAgentConfiguration.configure("test1", properties);
}
@Test(expected = FlumeException.class)
public void testSinkNamedLikeSource() throws Exception {
properties.put("sinks", "source");
EmbeddedAgentConfiguration.configure("test1", properties);
}
@Test(expected = FlumeException.class)
public void testSinkNamedLikeChannel() throws Exception {
properties.put("sinks", "channel");
EmbeddedAgentConfiguration.configure("test1", properties);
}
@Test(expected = FlumeException.class)
public void testSinkNamedLikeProcessor() throws Exception {
properties.put("sinks", "processor");
EmbeddedAgentConfiguration.configure("test1", properties);
}
} | 9,871 |
0 | Create_ds/flume/flume-ng-embedded-agent/src/test/java/org/apache/flume/agent | Create_ds/flume/flume-ng-embedded-agent/src/test/java/org/apache/flume/agent/embedded/TestEmbeddedAgentEmbeddedSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.agent.embedded;
import static org.mockito.Mockito.*;
import java.util.List;
import java.util.Map;
import junit.framework.Assert;
import org.apache.flume.Channel;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.SinkRunner;
import org.apache.flume.SourceRunner;
import org.apache.flume.event.SimpleEvent;
import org.apache.flume.lifecycle.LifecycleState;
import org.apache.flume.node.MaterializedConfiguration;
import org.junit.Before;
import org.junit.Test;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
public class TestEmbeddedAgentEmbeddedSource {
private EmbeddedAgent agent;
private Map<String, String> properties;
private MaterializedConfiguration config;
private EmbeddedSource source;
private SourceRunner sourceRunner;
private Channel channel;
private SinkRunner sinkRunner;
@Before
public void setUp() throws Exception {
properties = Maps.newHashMap();
properties.put("source.type", EmbeddedAgentConfiguration.SOURCE_TYPE_EMBEDDED);
properties.put("channel.type", "memory");
properties.put("sinks", "sink1 sink2");
properties.put("sink1.type", "avro");
properties.put("sink2.type", "avro");
properties.put("processor.type", "load_balance");
sourceRunner = mock(SourceRunner.class);
channel = mock(Channel.class);
sinkRunner = mock(SinkRunner.class);
source = mock(EmbeddedSource.class);
when(sourceRunner.getSource()).thenReturn(source);
when(sourceRunner.getLifecycleState()).thenReturn(LifecycleState.START);
when(channel.getLifecycleState()).thenReturn(LifecycleState.START);
when(sinkRunner.getLifecycleState()).thenReturn(LifecycleState.START);
config = new MaterializedConfiguration() {
@Override
public Map<String, SourceRunner> getSourceRunners() {
Map<String, SourceRunner> result = Maps.newHashMap();
result.put("source", sourceRunner);
return ImmutableMap.copyOf(result);
}
@Override
public Map<String, SinkRunner> getSinkRunners() {
Map<String, SinkRunner> result = Maps.newHashMap();
result.put("sink", sinkRunner);
return ImmutableMap.copyOf(result);
}
@Override
public Map<String, Channel> getChannels() {
Map<String, Channel> result = Maps.newHashMap();
result.put("channel", channel);
return ImmutableMap.copyOf(result);
}
@Override
public void addSourceRunner(String name, SourceRunner sourceRunner) {
throw new UnsupportedOperationException();
}
@Override
public void addSinkRunner(String name, SinkRunner sinkRunner) {
throw new UnsupportedOperationException();
}
@Override
public void addChannel(String name, Channel channel) {
throw new UnsupportedOperationException();
}
};
agent = new EmbeddedAgent(new MaterializedConfigurationProvider() {
public MaterializedConfiguration get(String name, Map<String, String> properties) {
return config;
}
}, "dummy");
}
@Test
public void testStart() {
agent.configure(properties);
agent.start();
verify(sourceRunner, times(1)).start();
verify(channel, times(1)).start();
verify(sinkRunner, times(1)).start();
}
@Test
public void testStop() {
agent.configure(properties);
agent.start();
agent.stop();
verify(sourceRunner, times(1)).stop();
verify(channel, times(1)).stop();
verify(sinkRunner, times(1)).stop();
}
@Test
public void testStartSourceThrowsException() {
doThrow(new LocalRuntimeException()).when(sourceRunner).start();
startExpectingLocalRuntimeException();
}
@Test
public void testStartChannelThrowsException() {
doThrow(new LocalRuntimeException()).when(channel).start();
startExpectingLocalRuntimeException();
}
@Test
public void testStartSinkThrowsException() {
doThrow(new LocalRuntimeException()).when(sinkRunner).start();
startExpectingLocalRuntimeException();
}
private void startExpectingLocalRuntimeException() {
agent.configure(properties);
try {
agent.start();
Assert.fail();
} catch (LocalRuntimeException e) {
// expected
}
verify(sourceRunner, times(1)).stop();
verify(channel, times(1)).stop();
verify(sinkRunner, times(1)).stop();
}
private static class LocalRuntimeException extends RuntimeException {
private static final long serialVersionUID = 116546244849853151L;
}
@Test
public void testPut() throws EventDeliveryException {
Event event = new SimpleEvent();
agent.configure(properties);
agent.start();
agent.put(event);
verify(source, times(1)).put(event);
}
@Test
public void testPutAll() throws EventDeliveryException {
Event event = new SimpleEvent();
List<Event> events = Lists.newArrayList();
events.add(event);
agent.configure(properties);
agent.start();
agent.putAll(events);
verify(source, times(1)).putAll(events);
}
@Test(expected = IllegalStateException.class)
public void testPutNotStarted() throws EventDeliveryException {
Event event = new SimpleEvent();
agent.configure(properties);
agent.put(event);
}
@Test(expected = IllegalStateException.class)
public void testPutAllNotStarted() throws EventDeliveryException {
Event event = new SimpleEvent();
List<Event> events = Lists.newArrayList();
events.add(event);
agent.configure(properties);
agent.putAll(events);
}
}
| 9,872 |
0 | Create_ds/flume/flume-ng-embedded-agent/src/test/java/org/apache/flume/agent | Create_ds/flume/flume-ng-embedded-agent/src/test/java/org/apache/flume/agent/embedded/TestEmbeddedAgent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.agent.embedded;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.avro.ipc.netty.NettyServer;
import org.apache.avro.ipc.Responder;
import org.apache.avro.ipc.specific.SpecificResponder;
import org.apache.flume.Event;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.source.avro.AvroFlumeEvent;
import org.apache.flume.source.avro.AvroSourceProtocol;
import org.apache.flume.source.avro.Status;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Charsets;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
public class TestEmbeddedAgent {
private static final Logger LOGGER = LoggerFactory
.getLogger(TestEmbeddedAgent.class);
private static final String HOSTNAME = "localhost";
private static AtomicInteger serialNumber = new AtomicInteger(0);
private EmbeddedAgent agent;
private Map<String, String> properties;
private EventCollector eventCollector;
private NettyServer nettyServer;
private Map<String, String> headers;
private byte[] body;
@Before
public void setUp() throws Exception {
headers = Maps.newHashMap();
headers.put("key1", "value1");
body = "body".getBytes(Charsets.UTF_8);
int port = findFreePort();
eventCollector = new EventCollector();
Responder responder = new SpecificResponder(AvroSourceProtocol.class,
eventCollector);
nettyServer = new NettyServer(responder,
new InetSocketAddress(HOSTNAME, port));
nettyServer.start();
// give the server a second to start
Thread.sleep(1000L);
properties = Maps.newHashMap();
properties.put("channel.type", "memory");
properties.put("channel.capacity", "200");
properties.put("sinks", "sink1 sink2");
properties.put("sink1.type", "avro");
properties.put("sink2.type", "avro");
properties.put("sink1.hostname", HOSTNAME);
properties.put("sink1.port", String.valueOf(port));
properties.put("sink2.hostname", HOSTNAME);
properties.put("sink2.port", String.valueOf(port));
properties.put("processor.type", "load_balance");
agent = new EmbeddedAgent("test-" + serialNumber.incrementAndGet());
}
@After
public void tearDown() throws Exception {
if (agent != null) {
try {
agent.stop();
} catch (Exception e) {
LOGGER.debug("Error shutting down agent", e);
}
}
if (nettyServer != null) {
try {
nettyServer.close();
} catch (Exception e) {
LOGGER.debug("Error shutting down server", e);
}
}
}
@Test(timeout = 30000L)
public void testPut() throws Exception {
agent.configure(properties);
agent.start();
agent.put(EventBuilder.withBody(body, headers));
Event event;
while ((event = eventCollector.poll()) == null) {
Thread.sleep(500L);
}
Assert.assertNotNull(event);
Assert.assertArrayEquals(body, event.getBody());
Assert.assertEquals(headers, event.getHeaders());
}
@Test(timeout = 30000L)
public void testPutAll() throws Exception {
List<Event> events = Lists.newArrayList();
events.add(EventBuilder.withBody(body, headers));
agent.configure(properties);
agent.start();
agent.putAll(events);
Event event;
while ((event = eventCollector.poll()) == null) {
Thread.sleep(500L);
}
Assert.assertNotNull(event);
Assert.assertArrayEquals(body, event.getBody());
Assert.assertEquals(headers, event.getHeaders());
}
@Test(timeout = 30000L)
public void testPutWithInterceptors() throws Exception {
properties.put("source.interceptors", "i1");
properties.put("source.interceptors.i1.type", "static");
properties.put("source.interceptors.i1.key", "key2");
properties.put("source.interceptors.i1.value", "value2");
agent.configure(properties);
agent.start();
agent.put(EventBuilder.withBody(body, headers));
Event event;
while ((event = eventCollector.poll()) == null) {
Thread.sleep(500L);
}
Assert.assertNotNull(event);
Assert.assertArrayEquals(body, event.getBody());
Map<String, String> newHeaders = new HashMap<String, String>(headers);
newHeaders.put("key2", "value2");
Assert.assertEquals(newHeaders, event.getHeaders());
}
@Test(timeout = 30000L)
public void testEmbeddedAgentName() throws Exception {
EmbeddedAgent embedAgent = new EmbeddedAgent("test 1 2" + serialNumber.incrementAndGet());
List<Event> events = Lists.newArrayList();
events.add(EventBuilder.withBody(body, headers));
embedAgent.configure(properties);
embedAgent.start();
embedAgent.putAll(events);
Event event;
while ((event = eventCollector.poll()) == null) {
Thread.sleep(500L);
}
Assert.assertNotNull(event);
Assert.assertArrayEquals(body, event.getBody());
Assert.assertEquals(headers, event.getHeaders());
if (embedAgent != null) {
try {
embedAgent.stop();
} catch (Exception e) {
LOGGER.debug("Error shutting down agent", e);
}
}
}
static class EventCollector implements AvroSourceProtocol {
private final Queue<AvroFlumeEvent> eventQueue =
new LinkedBlockingQueue<AvroFlumeEvent>();
public Event poll() {
AvroFlumeEvent avroEvent = eventQueue.poll();
if (avroEvent != null) {
return EventBuilder.withBody(avroEvent.getBody().array(),
toStringMap(avroEvent.getHeaders()));
}
return null;
}
@Override
public Status append(AvroFlumeEvent event) {
eventQueue.add(event);
return Status.OK;
}
@Override
public Status appendBatch(List<AvroFlumeEvent> events) {
Preconditions.checkState(eventQueue.addAll(events));
return Status.OK;
}
}
private static Map<String, String> toStringMap(Map<CharSequence, CharSequence> charSeqMap) {
Map<String, String> stringMap = new HashMap<String, String>();
for (Map.Entry<CharSequence, CharSequence> entry : charSeqMap.entrySet()) {
stringMap.put(entry.getKey().toString(), entry.getValue().toString());
}
return stringMap;
}
private static int findFreePort() throws IOException {
try (ServerSocket socket = new ServerSocket(0)) {
return socket.getLocalPort();
}
}
}
| 9,873 |
0 | Create_ds/flume/flume-ng-embedded-agent/src/test/java/org/apache/flume/agent | Create_ds/flume/flume-ng-embedded-agent/src/test/java/org/apache/flume/agent/embedded/TestEmbeddedAgentState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.agent.embedded;
import com.google.common.base.Throwables;
import com.google.common.collect.Maps;
import org.apache.flume.FlumeException;
import org.junit.Before;
import org.junit.Test;
import java.util.Map;
public class TestEmbeddedAgentState {
private static final String HOSTNAME = "localhost";
private EmbeddedAgent agent;
private Map<String, String> properties;
@Before
public void setUp() throws Exception {
agent = new EmbeddedAgent("dummy");
properties = Maps.newHashMap();
properties.put("source.type", EmbeddedAgentConfiguration.SOURCE_TYPE_EMBEDDED);
properties.put("channel.type", "memory");
properties.put("sinks", "sink1 sink2");
properties.put("sink1.type", "avro");
properties.put("sink2.type", "avro");
properties.put("sink1.hostname", HOSTNAME);
properties.put("sink1.port", "0");
properties.put("sink2.hostname", HOSTNAME);
properties.put("sink2.port", "0");
properties.put("processor.type", "load_balance");
}
@Test(expected = FlumeException.class)
public void testConfigureWithBadSourceType() {
properties.put(EmbeddedAgentConfiguration.SOURCE_TYPE, "bad");
agent.configure(properties);
}
@Test(expected = IllegalStateException.class)
public void testConfigureWhileStarted() {
try {
agent.configure(properties);
agent.start();
} catch (Exception e) {
Throwables.propagate(e);
}
agent.configure(properties);
}
@Test
public void testConfigureMultipleTimes() {
agent.configure(properties);
agent.configure(properties);
}
@Test(expected = IllegalStateException.class)
public void testStartWhileStarted() {
try {
agent.configure(properties);
agent.start();
} catch (Exception e) {
Throwables.propagate(e);
}
agent.start();
}
@Test(expected = IllegalStateException.class)
public void testStartUnconfigured() {
agent.start();
}
@Test(expected = IllegalStateException.class)
public void testStopBeforeConfigure() {
agent.stop();
}
@Test(expected = IllegalStateException.class)
public void testStoppedWhileStopped() {
try {
agent.configure(properties);
} catch (Exception e) {
Throwables.propagate(e);
}
agent.stop();
}
@Test(expected = IllegalStateException.class)
public void testStopAfterStop() {
try {
agent.configure(properties);
agent.start();
agent.stop();
} catch (Exception e) {
Throwables.propagate(e);
}
agent.stop();
}
@Test(expected = IllegalStateException.class)
public void testStopAfterConfigure() {
try {
agent.configure(properties);
} catch (Exception e) {
Throwables.propagate(e);
}
agent.stop();
}
}
| 9,874 |
0 | Create_ds/flume/flume-ng-embedded-agent/src/main/java/org/apache/flume/agent | Create_ds/flume/flume-ng-embedded-agent/src/main/java/org/apache/flume/agent/embedded/EmbeddedAgentConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.agent.embedded;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import org.apache.flume.FlumeException;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.apache.flume.conf.BasicConfigurationConstants;
import org.apache.flume.conf.channel.ChannelType;
import org.apache.flume.conf.sink.SinkProcessorType;
import org.apache.flume.conf.sink.SinkType;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Maps;
/**
* Stores publicly accessible configuration constants and private
* configuration constants and methods.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class EmbeddedAgentConfiguration {
public static final String SEPERATOR = ".";
private static final Joiner JOINER = Joiner.on(SEPERATOR);
private static final String TYPE = "type";
/**
* Prefix for source properties
*/
public static final String SOURCE = "source";
/**
* Prefix for channel properties
*/
public static final String CHANNEL = "channel";
/**
* Prefix for sink processor properties
*/
public static final String SINK_PROCESSOR = "processor";
/**
* Space delimited list of sink names: e.g. sink1 sink2 sink3
*/
public static final String SINKS = "sinks";
public static final String SINKS_PREFIX = join(SINKS, "");
/**
* Source type, choices are `embedded'
*/
public static final String SOURCE_TYPE = join(SOURCE, TYPE);
/**
* Prefix for passing configuration parameters to the source
*/
public static final String SOURCE_PREFIX = join(SOURCE, "");
/**
* Channel type, choices are `memory' or `file'
*/
public static final String CHANNEL_TYPE = join(CHANNEL, TYPE);
/**
* Prefix for passing configuration parameters to the channel
*/
public static final String CHANNEL_PREFIX = join(CHANNEL, "");
/**
* Sink processor type, choices are `default', `failover' or `load_balance'
*/
public static final String SINK_PROCESSOR_TYPE = join(SINK_PROCESSOR, TYPE);
/**
* Prefix for passing configuration parameters to the sink processor
*/
public static final String SINK_PROCESSOR_PREFIX = join(SINK_PROCESSOR, "");
/**
* Embedded source which provides simple in-memory transfer to channel.
* Use this source via the put,putAll methods on the EmbeddedAgent. This
* is the only supported source to use for Embedded Agents.
*/
public static final String SOURCE_TYPE_EMBEDDED = EmbeddedSource.class.getName();
private static final String SOURCE_TYPE_EMBEDDED_ALIAS = "EMBEDDED";
/**
* Memory channel which stores events in heap. See Flume User Guide for
* configuration information. This is the recommended channel to use for
* Embedded Agents.
*/
public static final String CHANNEL_TYPE_MEMORY = ChannelType.MEMORY.name();
/**
* Spillable Memory channel which stores events in heap. See Flume User Guide for
* configuration information. This is the recommended channel to use for
* Embedded Agents.
*/
public static final String CHANNEL_TYPE_SPILLABLEMEMORY = ChannelType.SPILLABLEMEMORY.name();
/**
* File based channel which stores events in on local disk. See Flume User
* Guide for configuration information.
*/
public static final String CHANNEL_TYPE_FILE = ChannelType.FILE.name();
/**
* Avro sink which can send events to a downstream avro source. This is the
* only supported sink for Embedded Agents.
*/
public static final String SINK_TYPE_AVRO = SinkType.AVRO.name();
/**
* Default sink processors which may be used when there is only a single sink.
*/
public static final String SINK_PROCESSOR_TYPE_DEFAULT = SinkProcessorType.DEFAULT.name();
/**
* Failover sink processor. See Flume User Guide for configuration
* information.
*/
public static final String SINK_PROCESSOR_TYPE_FAILOVER = SinkProcessorType.FAILOVER.name();
/**
* Load balancing sink processor. See Flume User Guide for configuration
* information.
*/
public static final String SINK_PROCESSOR_TYPE_LOAD_BALANCE =
SinkProcessorType.LOAD_BALANCE.name();
private static final String[] ALLOWED_SOURCES = {
SOURCE_TYPE_EMBEDDED_ALIAS,
SOURCE_TYPE_EMBEDDED,
};
private static final String[] ALLOWED_CHANNELS = {
CHANNEL_TYPE_MEMORY,
CHANNEL_TYPE_FILE
};
private static final String[] ALLOWED_SINKS = {
SINK_TYPE_AVRO
};
private static final String[] ALLOWED_SINK_PROCESSORS = {
SINK_PROCESSOR_TYPE_DEFAULT,
SINK_PROCESSOR_TYPE_FAILOVER,
SINK_PROCESSOR_TYPE_LOAD_BALANCE
};
private static final ImmutableList<String> DISALLOWED_SINK_NAMES =
ImmutableList.of("source", "channel", "processor");
private static void validate(String name,
Map<String, String> properties) throws FlumeException {
if (properties.containsKey(SOURCE_TYPE)) {
checkAllowed(ALLOWED_SOURCES, properties.get(SOURCE_TYPE));
}
checkRequired(properties, CHANNEL_TYPE);
checkAllowed(ALLOWED_CHANNELS, properties.get(CHANNEL_TYPE));
checkRequired(properties, SINKS);
String sinkNames = properties.get(SINKS);
for (String sink : sinkNames.split("\\s+")) {
if (DISALLOWED_SINK_NAMES.contains(sink.toLowerCase(Locale.ENGLISH))) {
throw new FlumeException("Sink name " + sink + " is one of the" +
" disallowed sink names: " + DISALLOWED_SINK_NAMES);
}
String key = join(sink, TYPE);
checkRequired(properties, key);
checkAllowed(ALLOWED_SINKS, properties.get(key));
}
checkRequired(properties, SINK_PROCESSOR_TYPE);
checkAllowed(ALLOWED_SINK_PROCESSORS, properties.get(SINK_PROCESSOR_TYPE));
}
/**
* Folds embedded configuration structure into an agent configuration.
* Should only be called after validate returns without error.
*
* @param name - agent name
* @param properties - embedded agent configuration
* @return configuration applicable to a flume agent
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
static Map<String, String> configure(String name,
Map<String, String> properties) throws FlumeException {
validate(name, properties);
// we are going to modify the properties as we parse the config
properties = new HashMap<String, String>(properties);
if (!properties.containsKey(SOURCE_TYPE) ||
SOURCE_TYPE_EMBEDDED_ALIAS.equalsIgnoreCase(properties.get(SOURCE_TYPE))) {
properties.put(SOURCE_TYPE, SOURCE_TYPE_EMBEDDED);
}
String sinkNames = properties.remove(SINKS);
String strippedName = name.replaceAll("\\s+","");
String sourceName = "source-" + strippedName;
String channelName = "channel-" + strippedName;
String sinkGroupName = "sink-group-" + strippedName;
/*
* Now we are going to process the user supplied configuration
* and generate an agent configuration. This is only to supply
* a simpler client api than passing in an entire agent configuration.
*/
// user supplied config -> agent configuration
Map<String, String> result = Maps.newHashMap();
/*
* First we are going to setup all the root level pointers. I.E
* point the agent at the components, sink group at sinks, and
* source at the channel.
*/
// point agent at source
result.put(join(name, BasicConfigurationConstants.CONFIG_SOURCES),
sourceName);
// point agent at channel
result.put(join(name, BasicConfigurationConstants.CONFIG_CHANNELS),
channelName);
// point agent at sinks
result.put(join(name, BasicConfigurationConstants.CONFIG_SINKS),
sinkNames);
// points the agent at the sinkgroup
result.put(join(name, BasicConfigurationConstants.CONFIG_SINKGROUPS),
sinkGroupName);
// points the sinkgroup at the sinks
result.put(join(name, BasicConfigurationConstants.CONFIG_SINKGROUPS,
sinkGroupName, SINKS), sinkNames);
// points the source at the channel
result.put(join(name,
BasicConfigurationConstants.CONFIG_SOURCES, sourceName,
BasicConfigurationConstants.CONFIG_CHANNELS), channelName);
// Properties will be modified during iteration so we need a
// copy of the keys.
Set<String> userProvidedKeys = new HashSet<String>(properties.keySet());
/*
* Second process the sink configuration and point the sinks
* at the channel.
*/
for (String sink : sinkNames.split("\\s+")) {
for (String key : userProvidedKeys) {
String value = properties.get(key);
if (key.startsWith(sink + SEPERATOR)) {
properties.remove(key);
result.put(join(name,
BasicConfigurationConstants.CONFIG_SINKS, key), value);
}
}
// point the sink at the channel
result.put(join(name,
BasicConfigurationConstants.CONFIG_SINKS, sink,
BasicConfigurationConstants.CONFIG_CHANNEL), channelName);
}
/*
* Third, process all remaining configuration items, prefixing them
* correctly and then passing them on to the agent.
*/
userProvidedKeys = new HashSet<String>(properties.keySet());
for (String key : userProvidedKeys) {
String value = properties.get(key);
if (key.startsWith(SOURCE_PREFIX)) {
// users use `source' but agent needs the actual source name
key = key.replaceFirst(SOURCE, sourceName);
result.put(join(name,
BasicConfigurationConstants.CONFIG_SOURCES, key), value);
} else if (key.startsWith(CHANNEL_PREFIX)) {
// users use `channel' but agent needs the actual channel name
key = key.replaceFirst(CHANNEL, channelName);
result.put(join(name,
BasicConfigurationConstants.CONFIG_CHANNELS, key), value);
} else if (key.startsWith(SINK_PROCESSOR_PREFIX)) {
// agent.sinkgroups.sinkgroup.processor.*
result.put(join(name, BasicConfigurationConstants.CONFIG_SINKGROUPS,
sinkGroupName, key), value);
} else {
// XXX should we simply ignore this?
throw new FlumeException("Unknown configuration " + key);
}
}
return result;
}
private static void checkAllowed(String[] allowedTypes, String type) {
boolean isAllowed = false;
type = type.trim();
for (String allowedType : allowedTypes) {
if (allowedType.equalsIgnoreCase(type)) {
isAllowed = true;
break;
}
}
if (!isAllowed) {
throw new FlumeException("Component type of " + type + " is not in " +
"allowed types of " + Arrays.toString(allowedTypes));
}
}
private static void checkRequired(Map<String, String> properties, String name) {
if (!properties.containsKey(name)) {
throw new FlumeException("Required parameter not found " + name);
}
}
private static String join(String... parts) {
return JOINER.join(parts);
}
private EmbeddedAgentConfiguration() {
}
} | 9,875 |
0 | Create_ds/flume/flume-ng-embedded-agent/src/main/java/org/apache/flume/agent | Create_ds/flume/flume-ng-embedded-agent/src/main/java/org/apache/flume/agent/embedded/EmbeddedAgent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.agent.embedded;
import java.util.List;
import java.util.Map;
import java.util.TreeSet;
import org.apache.flume.Channel;
import org.apache.flume.ChannelException;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.apache.flume.SinkRunner;
import org.apache.flume.Source;
import org.apache.flume.SourceRunner;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.apache.flume.conf.LogPrivacyUtil;
import org.apache.flume.lifecycle.LifecycleAware;
import org.apache.flume.lifecycle.LifecycleState;
import org.apache.flume.lifecycle.LifecycleSupervisor;
import org.apache.flume.lifecycle.LifecycleSupervisor.SupervisorPolicy;
import org.apache.flume.node.MaterializedConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* EmbeddedAgent gives Flume users the ability to embed simple agents in
* applications. This Agent is mean to be much simpler than a traditional
* agent and as such it's more restrictive than what can be configured
* for a traditional agent. For specifics see the Flume Developer Guide.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class EmbeddedAgent {
private static final Logger LOGGER = LoggerFactory
.getLogger(EmbeddedAgent.class);
private final MaterializedConfigurationProvider configurationProvider;
private final String name;
private final LifecycleSupervisor supervisor;
private State state;
private SourceRunner sourceRunner;
private Channel channel;
private SinkRunner sinkRunner;
private EmbeddedSource embeddedSource;
@InterfaceAudience.Private
@InterfaceStability.Unstable
@VisibleForTesting
EmbeddedAgent(MaterializedConfigurationProvider configurationProvider,
String name) {
this.configurationProvider = configurationProvider;
this.name = name;
state = State.NEW;
supervisor = new LifecycleSupervisor();
}
public EmbeddedAgent(String name) {
this(new MaterializedConfigurationProvider(), name);
}
/**
* Configures the embedded agent. Can only be called after the object
* is created or after the stop() method is called.
*
* @param properties source, channel, and sink group configuration
* @throws FlumeException if a component is unable to be found or configured
* @throws IllegalStateException if called while the agent is started
*/
public void configure(Map<String, String> properties)
throws FlumeException {
if (state == State.STARTED) {
throw new IllegalStateException("Cannot be configured while started");
}
doConfigure(properties);
state = State.STOPPED;
}
/**
* Started the agent. Can only be called after a successful call to
* configure().
*
* @throws FlumeException if a component cannot be started
* @throws IllegalStateException if the agent has not been configured or is
* already started
*/
public void start()
throws FlumeException {
if (state == State.STARTED) {
throw new IllegalStateException("Cannot be started while started");
} else if (state == State.NEW) {
throw new IllegalStateException("Cannot be started before being " +
"configured");
}
// This check needs to be done before doStart(),
// as doStart() accesses sourceRunner.getSource()
Source source = Preconditions.checkNotNull(sourceRunner.getSource(),
"Source runner returned null source");
if (source instanceof EmbeddedSource) {
embeddedSource = (EmbeddedSource)source;
} else {
throw new IllegalStateException("Unknown source type: " + source.getClass().getName());
}
doStart();
state = State.STARTED;
}
/**
* Stops the agent. Can only be called after a successful call to start().
* After a call to stop(), the agent can be re-configured with the
* configure() method or re-started with the start() method.
*
* @throws FlumeException if a component cannot be stopped
* @throws IllegalStateException if the agent is not started
*/
public void stop()
throws FlumeException {
if (state != State.STARTED) {
throw new IllegalStateException("Cannot be stopped unless started");
}
supervisor.stop();
embeddedSource = null;
state = State.STOPPED;
}
private void doConfigure(Map<String, String> properties) {
properties = EmbeddedAgentConfiguration.configure(name, properties);
if (LOGGER.isDebugEnabled() && LogPrivacyUtil.allowLogPrintConfig()) {
LOGGER.debug("Agent configuration values");
for (String key : new TreeSet<String>(properties.keySet())) {
LOGGER.debug(key + " = " + properties.get(key));
}
}
MaterializedConfiguration conf = configurationProvider.get(name,
properties);
Map<String, SourceRunner> sources = conf.getSourceRunners();
if (sources.size() != 1) {
throw new FlumeException("Expected one source and got " +
sources.size());
}
Map<String, Channel> channels = conf.getChannels();
if (channels.size() != 1) {
throw new FlumeException("Expected one channel and got " +
channels.size());
}
Map<String, SinkRunner> sinks = conf.getSinkRunners();
if (sinks.size() != 1) {
throw new FlumeException("Expected one sink group and got " +
sinks.size());
}
this.sourceRunner = sources.values().iterator().next();
this.channel = channels.values().iterator().next();
this.sinkRunner = sinks.values().iterator().next();
}
/**
* Adds event to the channel owned by the agent. Note however, that the
* event is not copied and as such, the byte array and headers cannot
* be re-used by the caller.
* @param event
* @throws EventDeliveryException if unable to add event to channel
*/
public void put(Event event) throws EventDeliveryException {
if (state != State.STARTED) {
throw new IllegalStateException("Cannot put events unless started");
}
try {
embeddedSource.put(event);
} catch (ChannelException ex) {
throw new EventDeliveryException("Embedded agent " + name +
": Unable to process event: " + ex.getMessage(), ex);
}
}
/**
* Adds events to the channel owned by the agent. Note however, that the
* event is not copied and as such, the byte array and headers cannot
* be re-used by the caller.
* @param events
* @throws EventDeliveryException if unable to add event to channel
*/
public void putAll(List<Event> events) throws EventDeliveryException {
if (state != State.STARTED) {
throw new IllegalStateException("Cannot put events unless started");
}
try {
embeddedSource.putAll(events);
} catch (ChannelException ex) {
throw new EventDeliveryException("Embedded agent " + name +
": Unable to process event: " + ex.getMessage(), ex);
}
}
private void doStart() {
boolean error = true;
try {
channel.start();
sinkRunner.start();
sourceRunner.start();
supervisor.supervise(channel,
new SupervisorPolicy.AlwaysRestartPolicy(), LifecycleState.START);
supervisor.supervise(sinkRunner,
new SupervisorPolicy.AlwaysRestartPolicy(), LifecycleState.START);
supervisor.supervise(sourceRunner,
new SupervisorPolicy.AlwaysRestartPolicy(), LifecycleState.START);
error = false;
} finally {
if (error) {
stopLogError(sourceRunner);
stopLogError(channel);
stopLogError(sinkRunner);
supervisor.stop();
}
}
}
private void stopLogError(LifecycleAware lifeCycleAware) {
try {
if (LifecycleState.START.equals(lifeCycleAware.getLifecycleState())) {
lifeCycleAware.stop();
}
} catch (Exception e) {
LOGGER.warn("Exception while stopping " + lifeCycleAware, e);
}
}
private static enum State {
NEW(),
STOPPED(),
STARTED();
}
}
| 9,876 |
0 | Create_ds/flume/flume-ng-embedded-agent/src/main/java/org/apache/flume/agent | Create_ds/flume/flume-ng-embedded-agent/src/main/java/org/apache/flume/agent/embedded/MemoryConfigurationProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.agent.embedded;
import java.util.Map;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.apache.flume.conf.FlumeConfiguration;
import org.apache.flume.node.AbstractConfigurationProvider;
/**
* MemoryConfigurationProvider is the simplest possible
* AbstractConfigurationProvider simply turning a give properties file and
* agent name into a FlumeConfiguration object.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
class MemoryConfigurationProvider extends AbstractConfigurationProvider {
private final Map<String, String> properties;
MemoryConfigurationProvider(String name, Map<String, String> properties) {
super(name);
this.properties = properties;
}
@Override
protected FlumeConfiguration getFlumeConfiguration() {
return new FlumeConfiguration(properties);
}
}
| 9,877 |
0 | Create_ds/flume/flume-ng-embedded-agent/src/main/java/org/apache/flume/agent | Create_ds/flume/flume-ng-embedded-agent/src/main/java/org/apache/flume/agent/embedded/MaterializedConfigurationProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.agent.embedded;
import java.util.Map;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.apache.flume.node.MaterializedConfiguration;
/**
* Provides {@see MaterializedConfiguration} for a given agent and set of
* properties. This class exists simply to make more easily testable. That is
* it allows us to mock the actual Source, Sink, and Channel components
* as opposed to instantiation of real components.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
class MaterializedConfigurationProvider {
MaterializedConfiguration get(String name, Map<String, String> properties) {
MemoryConfigurationProvider confProvider =
new MemoryConfigurationProvider(name, properties);
return confProvider.getConfiguration();
}
}
| 9,878 |
0 | Create_ds/flume/flume-ng-embedded-agent/src/main/java/org/apache/flume/agent | Create_ds/flume/flume-ng-embedded-agent/src/main/java/org/apache/flume/agent/embedded/EmbeddedSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.agent.embedded;
import java.util.List;
import org.apache.flume.ChannelException;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDrivenSource;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.apache.flume.conf.Configurable;
import org.apache.flume.source.AbstractSource;
/**
* Simple source used to allow direct access to the channel for the Embedded
* Agent.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class EmbeddedSource extends AbstractSource implements EventDrivenSource, Configurable {
@Override
public void configure(Context context) {
}
public void put(Event event) throws ChannelException {
getChannelProcessor().processEvent(event);
}
public void putAll(List<Event> events) throws ChannelException {
getChannelProcessor().processEventBatch(events);
}
}
| 9,879 |
0 | Create_ds/flume/flume-ng-embedded-agent/src/main/java/org/apache/flume/agent | Create_ds/flume/flume-ng-embedded-agent/src/main/java/org/apache/flume/agent/embedded/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* This package provides Flume users the ability to embed simple agents
* in applications. For specific and up to date information, please see
* the Flume Developer Guide.
*/
package org.apache.flume.agent.embedded;
| 9,880 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/TestContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume;
import static org.junit.Assert.*;
import org.junit.Before;
import org.junit.Test;
import com.google.common.collect.ImmutableMap;
public class TestContext {
private Context context;
@Before
public void setUp() {
context = new Context();
}
@Test
public void testPutGet() {
assertEquals("Context is empty", 0, context.getParameters().size());
context.put("test", "value");
assertEquals("value", context.getString("test"));
context.clear();
assertNull(context.getString("test"));
assertEquals("value", context.getString("test", "value"));
context.put("test", "true");
assertEquals(new Boolean(true), context.getBoolean("test"));
context.clear();
assertNull(context.getBoolean("test"));
assertEquals(new Boolean(true), context.getBoolean("test", true));
context.put("test", "1");
assertEquals(new Integer(1), context.getInteger("test"));
context.clear();
assertNull(context.getInteger("test"));
assertEquals(new Integer(1), context.getInteger("test", 1));
context.put("test", String.valueOf(Long.MAX_VALUE));
assertEquals(new Long(Long.MAX_VALUE), context.getLong("test"));
context.clear();
assertNull(context.getLong("test"));
assertEquals(new Long(Long.MAX_VALUE), context.getLong("test", Long.MAX_VALUE));
context.put("test", "0.1");
assertEquals(new Float(0.1), context.getFloat("test"));
context.clear();
assertNull(context.getFloat("test"));
assertEquals(new Float(1.1), context.getFloat("test",1.1F));
context.put("test", "0.1");
assertEquals(new Double(0.1), context.getDouble("test"));
context.clear();
assertNull(context.getDouble("test"));
assertEquals(new Double(1.1), context.getDouble("test",1.1));
}
@Test
public void testSubProperties() {
context.put("my.key", "1");
context.put("otherKey", "otherValue");
assertEquals(ImmutableMap.of("key", "1"), context.getSubProperties("my."));
}
@Test
public void testClear() {
context.put("test", "1");
context.clear();
assertNull(context.getInteger("test"));
}
@Test
public void testPutAll() {
context.putAll(ImmutableMap.of("test", "1"));
assertEquals("1", context.getString("test"));
}
}
| 9,881 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/TestCounterGroup.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume;
import java.util.concurrent.atomic.AtomicLong;
import junit.framework.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestCounterGroup {
private CounterGroup counterGroup;
@Before
public void setUp() {
counterGroup = new CounterGroup();
}
@Test
public void testGetCounter() {
AtomicLong counter = counterGroup.getCounter("test");
Assert.assertNotNull(counter);
Assert.assertEquals(0, counter.get());
}
@Test
public void testGet() {
long value = counterGroup.get("test");
Assert.assertEquals(0, value);
}
@Test
public void testIncrementAndGet() {
long value = counterGroup.incrementAndGet("test");
Assert.assertEquals(1, value);
}
@Test
public void testAddAndGet() {
long value = counterGroup.addAndGet("test", 13L);
Assert.assertEquals(13, value);
}
}
| 9,882 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/tools/TestFlumeConfigurator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.tools;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.UUID;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.flume.Context;
import org.junit.Test;
import junit.framework.Assert;
public class TestFlumeConfigurator {
private String testPrefix = "TestBean.";
/**
* Test configuring an int. Creates a random int greater than zero
* and then uses FlumeBeanConfigurator to test it can be set.
*/
@Test
public void testIntConfiguration() {
Map<String, String> props = new HashMap<String, String>();
Random random = new Random();
int intValue = random.nextInt(Integer.MAX_VALUE - 1 ) + 1;
props.put(testPrefix + "testInt", Integer.toString(intValue));
Context context = new Context(props);
TestBean bean = new TestBean();
Assert.assertEquals(0, bean.getTestInt());
FlumeBeanConfigurator.setConfigurationFields(bean, context);
Assert.assertEquals(intValue, bean.getTestInt());
}
/**
* Test configuring an short. Creates a random short greater than zero
* and then uses FlumeBeanConfigurator to test it can be set.
*/
@Test
public void testShortConfiguration() {
Map<String, String> props = new HashMap<String, String>();
Random random = new Random();
short shortValue = (short)(random.nextInt(Short.MAX_VALUE - 1 ) + 1);
props.put(testPrefix + "testShort", Short.toString(shortValue));
Context context = new Context(props);
TestBean bean = new TestBean();
Assert.assertEquals(0, bean.getTestShort());
FlumeBeanConfigurator.setConfigurationFields(bean, context);
Assert.assertEquals(shortValue, bean.getTestShort());
}
/**
* Test configuring a long. Creates a random long greater than Integer.MAX_VALUE
* and then uses FlumeBeanConfigurator to test it can be set.
*/
@Test
public void testLongConfiguration() {
Map<String, String> props = new HashMap<String, String>();
long longValue = ThreadLocalRandom.current().nextLong(Integer.MAX_VALUE, Long.MAX_VALUE);
props.put(testPrefix + "testLong", Long.toString(longValue));
Context context = new Context(props);
TestBean bean = new TestBean();
Assert.assertEquals(0, bean.getTestLong());
FlumeBeanConfigurator.setConfigurationFields(bean, context);
Assert.assertEquals(longValue, bean.getTestLong());
}
/**
* Test configuring an byte. Creates a random byte greater than zero
* and then uses FlumeBeanConfigurator to test it can be set.
*/
@Test
public void testByteConfiguration() {
Map<String, String> props = new HashMap<String, String>();
Random random = new Random();
byte byteValue = (byte)(random.nextInt(Byte.MAX_VALUE - 1 ) + 1);
props.put(testPrefix + "testByte", Byte.toString(byteValue));
Context context = new Context(props);
TestBean bean = new TestBean();
Assert.assertEquals(0, bean.getTestByte());
FlumeBeanConfigurator.setConfigurationFields(bean, context);
Assert.assertEquals(byteValue, bean.getTestByte());
}
/**
* Test configuring an boolean.
*/
@Test
public void testBooleanConfiguration() {
Map<String, String> props = new HashMap<String, String>();
props.put(testPrefix + "testBoolean", "true");
Context context = new Context(props);
TestBean bean = new TestBean();
Assert.assertEquals(false, bean.getTestBoolean());
FlumeBeanConfigurator.setConfigurationFields(bean, context);
Assert.assertEquals(true, bean.getTestBoolean());
}
/**
* Test configuring an double. Creates a random double
* and then uses FlumeBeanConfigurator to test it can be set.
*/
@Test
public void testDoubleConfiguration() {
Map<String, String> props = new HashMap<String, String>();
Random random = new Random();
double doubleValue = random.nextDouble();
props.put(testPrefix + "testDouble", Double.toString(doubleValue));
Context context = new Context(props);
TestBean bean = new TestBean();
Assert.assertEquals(0.0d, bean.getTestDouble());
FlumeBeanConfigurator.setConfigurationFields(bean, context);
Assert.assertEquals(doubleValue, bean.getTestDouble());
}
/**
* Test configuring an float. Creates a random float
* and then uses FlumeBeanConfigurator to test it can be set.
*/
@Test
public void testFloatConfiguration() {
Map<String, String> props = new HashMap<String, String>();
Random random = new Random();
float floatValue = random.nextFloat();
props.put(testPrefix + "testFloat", Float.toString(floatValue));
Context context = new Context(props);
TestBean bean = new TestBean();
Assert.assertEquals(0.0f, bean.getTestFloat());
FlumeBeanConfigurator.setConfigurationFields(bean, context);
Assert.assertEquals(floatValue, bean.getTestFloat());
}
/**
* Test configuring a String. Creates a random String (UUID in this case)
* and then uses FlumeBeanConfigurator to test it can be set.
*/
@Test
public void testStringConfiguration() {
Map<String, String> props = new HashMap<String, String>();
String stringValue = UUID.randomUUID().toString();
props.put(testPrefix + "testString", stringValue);
Context context = new Context(props);
TestBean bean = new TestBean();
Assert.assertEquals("", bean.getTestString());
FlumeBeanConfigurator.setConfigurationFields(bean, context);
Assert.assertEquals(stringValue, bean.getTestString());
}
/**
* Test that is is not possible to configure using private setters.
*/
@Test
public void testPrivateConfiguration() {
Map<String, String> props = new HashMap<String, String>();
Random random = new Random();
int intValue = random.nextInt(Integer.MAX_VALUE - 1 ) + 1;
props.put(testPrefix + "privateInt", Integer.toString(intValue));
Context context = new Context(props);
TestBean bean = new TestBean();
Assert.assertEquals(0, bean.getPrivateInt());
FlumeBeanConfigurator.setConfigurationFields(bean, context);
Assert.assertTrue(bean.getPrivateInt() != intValue);
}
public class TestBean {
private int testInt = 0;
private short testShort = 0;
private long testLong = 0;
private byte testByte = 0;
private boolean testBoolean = false;
private float testFloat = 0f;
private double testDouble = 0d;
private String testString = "";
private int privateInt = 0;
public int getTestInt() {
return testInt;
}
public void setTestInt(int testInt) {
this.testInt = testInt;
}
public short getTestShort() {
return testShort;
}
public void setTestShort(short testShort) {
this.testShort = testShort;
}
public long getTestLong() {
return testLong;
}
public void setTestLong(long testLong) {
this.testLong = testLong;
}
public byte getTestByte() {
return testByte;
}
public void setTestByte(byte testByte) {
this.testByte = testByte;
}
public boolean getTestBoolean() {
return testBoolean;
}
public void setTestBoolean(boolean testBoolean) {
this.testBoolean = testBoolean;
}
public float getTestFloat() {
return testFloat;
}
public void setTestFloat(float testFloat) {
this.testFloat = testFloat;
}
public double getTestDouble() {
return testDouble;
}
public void setTestDouble(double testDouble) {
this.testDouble = testDouble;
}
public String getTestString() {
return testString;
}
public void setTestString(String testString) {
this.testString = testString;
}
private int getPrivateInt() {
return privateInt;
}
private void setPrivateInt(int privateInt) {
this.privateInt = privateInt;
}
}
}
| 9,883 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/tools/TestTimestampRoundDownUtil.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.tools;
import java.util.Calendar;
import java.util.SimpleTimeZone;
import java.util.TimeZone;
import junit.framework.Assert;
import org.junit.Test;
import javax.annotation.Nullable;
import static org.hamcrest.core.IsEqual.equalTo;
import static org.hamcrest.core.IsNot.not;
import static org.junit.Assert.assertThat;
public class TestTimestampRoundDownUtil {
private static final TimeZone CUSTOM_TIMEZONE = new SimpleTimeZone(1, "custom-timezone");
private static final Calendar BASE_CALENDAR_WITH_DEFAULT_TIMEZONE =
createCalendar(2012, 5, 15, 15, 12, 54, 0, null);
private static final Calendar BASE_CALENDAR_WITH_CUSTOM_TIMEZONE =
createCalendar(2012, 5, 15, 15, 12, 54, 0, CUSTOM_TIMEZONE);
/**
* Tests if the timestamp with the default timezone is properly rounded down
* to 60 seconds.
*/
@Test
public void testRoundDownTimeStampSeconds() {
Calendar cal = BASE_CALENDAR_WITH_DEFAULT_TIMEZONE;
Calendar cal2 = createCalendar(2012, 5, 15, 15, 12, 0, 0, null);
long timeToVerify = cal2.getTimeInMillis();
long ret = TimestampRoundDownUtil.roundDownTimeStampSeconds(cal.getTimeInMillis(), 60);
System.out.println("Cal 1: " + cal.toString());
System.out.println("Cal 2: " + cal2.toString());
Assert.assertEquals(timeToVerify, ret);
}
/**
* Tests if the timestamp with the custom timezone is properly rounded down
* to 60 seconds.
*/
@Test
public void testRoundDownTimeStampSecondsWithTimeZone() {
Calendar cal = BASE_CALENDAR_WITH_CUSTOM_TIMEZONE;
Calendar cal2 = createCalendar(2012, 5, 15, 15, 12, 0, 0, CUSTOM_TIMEZONE);
long timeToVerify = cal2.getTimeInMillis();
long withoutTimeZone = TimestampRoundDownUtil.roundDownTimeStampSeconds(
cal.getTimeInMillis(), 60);
long withTimeZone = TimestampRoundDownUtil.roundDownTimeStampSeconds(
cal.getTimeInMillis(), 60, CUSTOM_TIMEZONE);
assertThat(withoutTimeZone, not(equalTo(timeToVerify)));
Assert.assertEquals(withTimeZone, timeToVerify);
}
/**
* Tests if the timestamp with the default timezone is properly rounded down
* to 5 minutes.
*/
@Test
public void testRoundDownTimeStampMinutes() {
Calendar cal = BASE_CALENDAR_WITH_DEFAULT_TIMEZONE;
Calendar cal2 = createCalendar(2012, 5, 15, 15, 10, 0, 0, null);
long timeToVerify = cal2.getTimeInMillis();
long ret = TimestampRoundDownUtil.roundDownTimeStampMinutes(cal.getTimeInMillis(), 5);
System.out.println("Cal 1: " + cal.toString());
System.out.println("Cal 2: " + cal2.toString());
Assert.assertEquals(timeToVerify, ret);
}
/**
* Tests if the timestamp with the custom timezone is properly rounded down
* to 5 minutes.
*/
@Test
public void testRoundDownTimeStampMinutesWithTimeZone() {
Calendar cal = BASE_CALENDAR_WITH_CUSTOM_TIMEZONE;
Calendar cal2 = createCalendar(2012, 5, 15, 15, 10, 0, 0, CUSTOM_TIMEZONE);
long timeToVerify = cal2.getTimeInMillis();
long withoutTimeZone = TimestampRoundDownUtil.roundDownTimeStampMinutes(
cal.getTimeInMillis(), 5);
long withTimeZone = TimestampRoundDownUtil.roundDownTimeStampMinutes(
cal.getTimeInMillis(), 5, CUSTOM_TIMEZONE);
assertThat(withoutTimeZone, not(equalTo(timeToVerify)));
Assert.assertEquals(withTimeZone, timeToVerify);
}
/**
* Tests if the timestamp with the default timezone is properly rounded down
* to 2 hours.
*/
@Test
public void testRoundDownTimeStampHours() {
Calendar cal = BASE_CALENDAR_WITH_DEFAULT_TIMEZONE;
Calendar cal2 = createCalendar(2012, 5, 15, 14, 0, 0, 0, null);
long timeToVerify = cal2.getTimeInMillis();
long ret = TimestampRoundDownUtil.roundDownTimeStampHours(cal.getTimeInMillis(), 2);
System.out.println("Cal 1: " + ret);
System.out.println("Cal 2: " + cal2.toString());
Assert.assertEquals(timeToVerify, ret);
}
/**
* Tests if the timestamp with the custom timezone is properly rounded down
* to 2 hours.
*/
@Test
public void testRoundDownTimeStampHoursWithTimeZone() {
Calendar cal = BASE_CALENDAR_WITH_CUSTOM_TIMEZONE;
Calendar cal2 = createCalendar(2012, 5, 15, 14, 0, 0, 0, CUSTOM_TIMEZONE);
long timeToVerify = cal2.getTimeInMillis();
long withoutTimeZone = TimestampRoundDownUtil.roundDownTimeStampHours(
cal.getTimeInMillis(), 2);
long withTimeZone = TimestampRoundDownUtil.roundDownTimeStampHours(
cal.getTimeInMillis(), 2, CUSTOM_TIMEZONE);
assertThat(withoutTimeZone, not(equalTo(timeToVerify)));
Assert.assertEquals(withTimeZone, timeToVerify);
}
private static Calendar createCalendar(int year, int month, int day,
int hour, int minute, int second, int ms,
@Nullable TimeZone timeZone) {
Calendar cal = (timeZone == null) ? Calendar.getInstance() : Calendar.getInstance(timeZone);
cal.set(year, month, day, hour, minute, second);
cal.set(Calendar.MILLISECOND, ms);
return cal;
}
}
| 9,884 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/tools/TestVersionInfo.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.tools;
import static org.junit.Assert.*;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TestVersionInfo {
private static final Logger logger = LoggerFactory
.getLogger(TestVersionInfo.class);
/**
* Make sure that Unknown is expected when no version info
*/
@Test
public void testVersionInfoUnknown() {
logger.debug("Flume " + VersionInfo.getVersion());
logger.debug("Subversion " + VersionInfo.getUrl() + " -r " + VersionInfo.getRevision());
logger.debug("Compiled by " + VersionInfo.getUser() + " on " + VersionInfo.getDate());
logger.debug("From source with checksum " + VersionInfo.getSrcChecksum());
logger.debug("Flume " + VersionInfo.getBuildVersion());
assertTrue("getVersion returned Unknown",
!VersionInfo.getVersion().equals("Unknown"));
assertTrue("getUser returned Unknown",
!VersionInfo.getUser().equals("Unknown"));
assertTrue("getUrl returned Unknown",
!VersionInfo.getUrl().equals("Unknown"));
assertTrue("getSrcChecksum returned Unknown",
!VersionInfo.getSrcChecksum().equals("Unknown"));
// check getBuildVersion() return format
assertTrue("getBuildVersion returned unexpected format",
VersionInfo.getBuildVersion().matches(".+from.+by.+on.+source checksum.+"));
//"Unknown" when build without svn or git
assertNotNull("getRevision returned null", VersionInfo.getRevision());
assertNotNull("getBranch returned null", VersionInfo.getBranch());
}
}
| 9,885 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/serialization/TransientPositionTracker.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.serialization;
import java.io.IOException;
public class TransientPositionTracker implements PositionTracker {
private final String target;
private long position = 0;
public TransientPositionTracker(String target) {
this.target = target;
}
@Override
public void storePosition(long position) throws IOException {
this.position = position;
}
@Override
public long getPosition() {
return position;
}
@Override
public String getTarget() {
return target;
}
@Override
public void close() throws IOException {
// no-op
}
}
| 9,886 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/serialization/TestBodyTextEventSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.serialization;
import com.google.common.base.Charsets;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.commons.io.FileUtils;
import org.apache.flume.Context;
import org.apache.flume.event.EventBuilder;
import org.junit.Assert;
import org.junit.Test;
public class TestBodyTextEventSerializer {
File testFile = new File("src/test/resources/events.txt");
File expectedFile = new File("src/test/resources/events.txt");
@Test
public void testWithNewline() throws FileNotFoundException, IOException {
OutputStream out = new FileOutputStream(testFile);
EventSerializer serializer =
EventSerializerFactory.getInstance("text", new Context(), out);
serializer.afterCreate();
serializer.write(EventBuilder.withBody("event 1", Charsets.UTF_8));
serializer.write(EventBuilder.withBody("event 2", Charsets.UTF_8));
serializer.write(EventBuilder.withBody("event 3", Charsets.UTF_8));
serializer.flush();
serializer.beforeClose();
out.flush();
out.close();
BufferedReader reader = new BufferedReader(new FileReader(testFile));
Assert.assertEquals("event 1", reader.readLine());
Assert.assertEquals("event 2", reader.readLine());
Assert.assertEquals("event 3", reader.readLine());
Assert.assertNull(reader.readLine());
reader.close();
FileUtils.forceDelete(testFile);
}
@Test
public void testNoNewline() throws FileNotFoundException, IOException {
OutputStream out = new FileOutputStream(testFile);
Context context = new Context();
context.put("appendNewline", "false");
EventSerializer serializer =
EventSerializerFactory.getInstance("text", context, out);
serializer.afterCreate();
serializer.write(EventBuilder.withBody("event 1\n", Charsets.UTF_8));
serializer.write(EventBuilder.withBody("event 2\n", Charsets.UTF_8));
serializer.write(EventBuilder.withBody("event 3\n", Charsets.UTF_8));
serializer.flush();
serializer.beforeClose();
out.flush();
out.close();
BufferedReader reader = new BufferedReader(new FileReader(testFile));
Assert.assertEquals("event 1", reader.readLine());
Assert.assertEquals("event 2", reader.readLine());
Assert.assertEquals("event 3", reader.readLine());
Assert.assertNull(reader.readLine());
reader.close();
FileUtils.forceDelete(testFile);
}
}
| 9,887 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/serialization/SyslogAvroEventSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.serialization;
import com.google.common.base.Charsets;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.conf.LogPrivacyUtil;
import org.apache.flume.serialization.SyslogAvroEventSerializer.SyslogEvent;
import org.apache.flume.source.SyslogUtils;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class exists to give an idea of how to use the AvroEventWriter
* and is not intended for inclusion in the Flume core.<br/>
* Problems with it are:<br/>
* (1) assumes very little parsing is done at the first hop (more TBD)<br/>
* (2) no field has been defined for use as a UUID for deduping<br/>
* (3) tailored to syslog messages but not specific to any application<br/>
* (4) not efficient about data copying from an implementation perspective<br/>
* Often, it makes more sense to parse your (meta-)data out of the message part
* itself and then store that in an application-specific Avro schema.
*/
public class SyslogAvroEventSerializer
extends AbstractAvroEventSerializer<SyslogEvent> {
private static final DateTimeFormatter dateFmt1 =
DateTimeFormat.forPattern("MMM dd HH:mm:ss").withZoneUTC();
private static final DateTimeFormatter dateFmt2 =
DateTimeFormat.forPattern("MMM d HH:mm:ss").withZoneUTC();
private static final Logger logger =
LoggerFactory.getLogger(SyslogAvroEventSerializer.class);
// It's usually better to embed this schema in the class as a string.
// Avro does this for you if you generate Java classes from a schema file.
// But since this is a test class, having the schema in an .avsc file is more
// readable. Should probably just use the maven avro plugin to generate
// the inner SyslogEvent class from this file.
private static final File schemaFile =
new File("src/test/resources/syslog_event.avsc");
private final OutputStream out;
private final Schema schema;
public SyslogAvroEventSerializer(OutputStream out) throws IOException {
this.out = out;
this.schema = new Schema.Parser().parse(schemaFile);
}
@Override
protected OutputStream getOutputStream() {
return out;
}
@Override
protected Schema getSchema() {
return schema;
}
// very simple rfc3164 parser
@Override
protected SyslogEvent convert(Event event) {
SyslogEvent sle = new SyslogEvent();
// Stringify body so it's easy to parse.
// This is a pretty inefficient way to do it.
String msg = new String(event.getBody(), Charsets.UTF_8);
// parser read pointer
int seek = 0;
// Check Flume headers to see if we came from SyslogTcp(or UDP) Source,
// which at the time of this writing only parses the priority.
// This is a bit schizophrenic and it should parse all the fields or none.
Map<String, String> headers = event.getHeaders();
boolean fromSyslogSource = false;
if (headers.containsKey(SyslogUtils.SYSLOG_FACILITY)) {
fromSyslogSource = true;
int facility = Integer.parseInt(headers.get(SyslogUtils.SYSLOG_FACILITY));
sle.setFacility(facility);
}
if (headers.containsKey(SyslogUtils.SYSLOG_SEVERITY)) {
fromSyslogSource = true;
int severity = Integer.parseInt(headers.get(SyslogUtils.SYSLOG_SEVERITY));
sle.setSeverity(severity);
}
// assume the message was received raw (maybe via NetcatSource)
// parse the priority string
if (!fromSyslogSource) {
if (msg.charAt(0) == '<') {
int end = msg.indexOf(">");
if (end > -1) {
seek = end + 1;
String priStr = msg.substring(1, end);
int priority = Integer.parseInt(priStr);
int severity = priority % 8;
int facility = (priority - severity) / 8;
sle.setFacility(facility);
sle.setSeverity(severity);
}
}
}
// parse the timestamp
String timestampStr = msg.substring(seek, seek + 15);
long ts = parseRfc3164Date(timestampStr);
if (ts != 0) {
sle.setTimestamp(ts);
seek += 15 + 1; // space after timestamp
}
// parse the hostname
int nextSpace = msg.indexOf(' ', seek);
if (nextSpace > -1) {
String hostname = msg.substring(seek, nextSpace);
sle.setHostname(hostname);
seek = nextSpace + 1;
}
// everything else is the message
String actualMessage = msg.substring(seek);
sle.setMessage(actualMessage);
if (logger.isDebugEnabled() && LogPrivacyUtil.allowLogRawData()) {
logger.debug("Serialized event as: {}", sle);
}
return sle;
}
/**
* Returns epoch time in millis, or 0 if the string cannot be parsed.
* We use two date formats because the date spec in rfc3164 is kind of weird.
* <br/>
* <b>Warning:</b> logic is used here to determine the year even though it's
* not part of the timestamp format, and we assume that the machine running
* Flume has a clock that is at least close to the same day as the machine
* that generated the event. We also assume that the event was generated
* recently.
*/
private static long parseRfc3164Date(String in) {
DateTime date = null;
try {
date = dateFmt1.parseDateTime(in);
} catch (IllegalArgumentException e) {
// ignore the exception, we act based on nullity of date object
logger.debug("Date parse failed on ({}), trying single-digit date", in);
}
if (date == null) {
try {
date = dateFmt2.parseDateTime(in);
} catch (IllegalArgumentException e) {
// ignore the exception, we act based on nullity of date object
logger.debug("2nd date parse failed on ({}), unknown date format", in);
}
}
// hacky stuff to try and deal with boundary cases, i.e. new year's eve.
// rfc3164 dates are really dumb.
// NB: cannot handle replaying of old logs or going back to the future
if (date != null) {
DateTime now = new DateTime();
int year = now.getYear();
DateTime corrected = date.withYear(year);
// flume clock is ahead or there is some latency, and the year rolled
if (corrected.isAfter(now) && corrected.minusMonths(1).isAfter(now)) {
corrected = date.minusYears(1);
// flume clock is behind and the year rolled
} else if (corrected.isBefore(now) && corrected.plusMonths(1).isBefore(now)) {
corrected = date.plusYears(1);
}
date = corrected;
}
if (date == null) {
return 0;
}
return date.getMillis();
}
public static class Builder implements EventSerializer.Builder {
@Override
public EventSerializer build(Context context, OutputStream out) {
SyslogAvroEventSerializer writer = null;
try {
writer = new SyslogAvroEventSerializer(out);
writer.configure(context);
} catch (IOException e) {
logger.error("Unable to parse schema file. Exception follows.", e);
}
return writer;
}
}
// This class would ideally be generated from the avro schema file,
// but we are letting reflection do the work instead.
// There's no great reason not to let Avro generate it.
public static class SyslogEvent {
private int facility;
private int severity;
private long timestamp;
private String hostname = "";
private String message = "";
public void setFacility(int f) {
facility = f;
}
public int getFacility() {
return facility;
}
public void setSeverity(int s) {
severity = s;
}
public int getSeverity() {
return severity;
}
public void setTimestamp(long t) {
timestamp = t;
}
public long getTimestamp() {
return timestamp;
}
public void setHostname(String h) {
hostname = h;
}
public String getHostname() {
return hostname;
}
public void setMessage(String m) {
message = m;
}
public String getMessage() {
return message;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("{ Facility: ").append(facility).append(", ");
builder.append(" Severity: ").append(severity).append(", ");
builder.append(" Timestamp: ").append(timestamp).append(", ");
builder.append(" Hostname: ").append(hostname).append(", ");
builder.append(" Message: \"").append(message).append("\" }");
return builder.toString();
}
}
}
| 9,888 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/serialization/TestSyslogAvroEventSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.serialization;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.List;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumReader;
import org.apache.commons.io.FileUtils;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.source.SyslogUtils;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
public class TestSyslogAvroEventSerializer {
File testFile = new File("src/test/resources/SyslogEvents.avro");
File schemaFile = new File("src/test/resources/syslog_event.avsc");
private static List<Event> generateSyslogEvents() {
List<Event> list = Lists.newArrayList();
Event e;
// generate one that we supposedly parsed with SyslogTcpSource
e = EventBuilder.withBody("Apr 7 01:00:00 host Msg 01", Charsets.UTF_8);
e.getHeaders().put(SyslogUtils.SYSLOG_FACILITY, "1");
e.getHeaders().put(SyslogUtils.SYSLOG_SEVERITY, "2");
list.add(e);
// generate another supposedly parsed with SyslogTcpSource with 2-digit date
e = EventBuilder.withBody("Apr 22 01:00:00 host Msg 02", Charsets.UTF_8);
e.getHeaders().put(SyslogUtils.SYSLOG_FACILITY, "1");
e.getHeaders().put(SyslogUtils.SYSLOG_SEVERITY, "3");
list.add(e);
// generate a "raw" syslog event
e = EventBuilder.withBody("<8>Apr 22 01:00:00 host Msg 03", Charsets.UTF_8);
list.add(e);
return list;
}
@Test
public void test() throws FileNotFoundException, IOException {
// Snappy currently broken on Mac in OpenJDK 7 per FLUME-2012
Assume.assumeTrue(!"Mac OS X".equals(System.getProperty("os.name")) ||
!System.getProperty("java.version").startsWith("1.7."));
//Schema schema = new Schema.Parser().parse(schemaFile);
// create the file, write some data
OutputStream out = new FileOutputStream(testFile);
String builderName = SyslogAvroEventSerializer.Builder.class.getName();
Context ctx = new Context();
ctx.put("syncInterval", "4096");
ctx.put("compressionCodec", "snappy");
EventSerializer serializer =
EventSerializerFactory.getInstance(builderName, ctx, out);
serializer.afterCreate(); // must call this when a file is newly created
List<Event> events = generateSyslogEvents();
for (Event e : events) {
serializer.write(e);
}
serializer.flush();
serializer.beforeClose();
out.flush();
out.close();
// now try to read the file back
DatumReader<GenericRecord> reader = new GenericDatumReader<GenericRecord>();
DataFileReader<GenericRecord> fileReader =
new DataFileReader<GenericRecord>(testFile, reader);
GenericRecord record = new GenericData.Record(fileReader.getSchema());
int numEvents = 0;
while (fileReader.hasNext()) {
fileReader.next(record);
int facility = (Integer) record.get("facility");
int severity = (Integer) record.get("severity");
long timestamp = (Long) record.get("timestamp");
String hostname = record.get("hostname").toString();
String message = record.get("message").toString();
Assert.assertEquals("Facility should be 1", 1, facility);
System.out.println(timestamp + ": " + message);
numEvents++;
}
fileReader.close();
Assert.assertEquals("Should have found a total of 3 events", 3, numEvents);
FileUtils.forceDelete(testFile);
}
}
| 9,889 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/serialization/TestDurablePositionTracker.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.serialization;
import com.google.common.base.Charsets;
import com.google.common.io.Files;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.net.URISyntaxException;
public class TestDurablePositionTracker {
private static final Logger logger = LoggerFactory.getLogger(TestDurablePositionTracker.class);
@Test
public void testBasicTracker() throws IOException {
File metaFile = File.createTempFile(getClass().getName(), ".meta");
metaFile.delete();
File dataFile = File.createTempFile(getClass().getName(), ".data");
Files.write("line 1\nline2\n", dataFile, Charsets.UTF_8);
final long NEW_POS = 7;
PositionTracker tracker;
tracker = new DurablePositionTracker(metaFile, dataFile.toString());
Assert.assertEquals(0, tracker.getPosition());
tracker.storePosition(NEW_POS);
Assert.assertEquals(NEW_POS, tracker.getPosition());
tracker.close();
// target only gets updated if the file did not exist
tracker = new DurablePositionTracker(metaFile, "foobar");
Assert.assertEquals(NEW_POS, tracker.getPosition());
Assert.assertEquals(dataFile.getAbsolutePath(), tracker.getTarget());
}
// test a valid file
@Test
public void testGoodTrackerFile() throws IOException, URISyntaxException {
String fileName = "/TestResettableFileInputStream_1.avro";
File trackerFile = new File(getClass().getResource(fileName).toURI());
Assert.assertTrue(trackerFile.exists());
PositionTracker tracker;
tracker = new DurablePositionTracker(trackerFile, "foo");
// note: 62 is the last value in this manually-created file
Assert.assertEquals(62, tracker.getPosition());
}
// test a truncated file
@Test
public void testPartialTrackerFile() throws IOException, URISyntaxException {
String fileName = "/TestResettableFileInputStream_1.truncated.avro";
File trackerFile = new File(getClass().getResource(fileName).toURI());
Assert.assertTrue(trackerFile.exists());
PositionTracker tracker;
tracker = new DurablePositionTracker(trackerFile, "foo");
// note: 25 is the last VALID value in this manually-created file
Assert.assertEquals(25, tracker.getPosition());
}
}
| 9,890 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/serialization/TestAvroEventDeserializer.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.serialization;
import org.apache.avro.Schema;
import org.apache.avro.SchemaNormalization;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.GenericRecordBuilder;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DecoderFactory;
import org.apache.commons.codec.binary.Hex;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import java.io.File;
import java.io.IOException;
import java.security.NoSuchAlgorithmException;
import java.util.Collections;
public class TestAvroEventDeserializer {
private static final Logger logger =
LoggerFactory.getLogger(TestAvroEventDeserializer.class);
private static final Schema schema;
static {
schema = Schema.createRecord("MyRecord", "", "org.apache.flume", false);
Schema.Field field = new Schema.Field("foo",
Schema.create(Schema.Type.STRING), "", null);
schema.setFields(Collections.singletonList(field));
}
@Test
public void resetTest() throws IOException {
File tempFile = newTestFile(true);
String target = tempFile.getAbsolutePath();
logger.info("Target: {}", target);
TransientPositionTracker tracker = new TransientPositionTracker(target);
AvroEventDeserializer.Builder desBuilder =
new AvroEventDeserializer.Builder();
EventDeserializer deserializer = desBuilder.build(new Context(),
new ResettableFileInputStream(tempFile, tracker));
BinaryDecoder decoder = null;
DatumReader<GenericRecord> reader =
new GenericDatumReader<GenericRecord>(schema);
decoder = DecoderFactory.get().binaryDecoder(
deserializer.readEvent().getBody(), decoder);
assertEquals("bar", reader.read(null, decoder).get("foo").toString());
deserializer.reset();
decoder = DecoderFactory.get().binaryDecoder(
deserializer.readEvent().getBody(), decoder);
assertEquals("bar", reader.read(null, decoder).get("foo").toString());
deserializer.mark();
decoder = DecoderFactory.get().binaryDecoder(
deserializer.readEvent().getBody(), decoder);
assertEquals("baz", reader.read(null, decoder).get("foo").toString());
deserializer.reset();
decoder = DecoderFactory.get().binaryDecoder(
deserializer.readEvent().getBody(), decoder);
assertEquals("baz", reader.read(null, decoder).get("foo").toString());
assertNull(deserializer.readEvent());
}
@Test
public void testSchemaHash() throws IOException, NoSuchAlgorithmException {
File tempFile = newTestFile(true);
String target = tempFile.getAbsolutePath();
logger.info("Target: {}", target);
TransientPositionTracker tracker = new TransientPositionTracker(target);
Context context = new Context();
context.put(AvroEventDeserializer.CONFIG_SCHEMA_TYPE_KEY,
AvroEventDeserializer.AvroSchemaType.HASH.toString());
ResettableInputStream in =
new ResettableFileInputStream(tempFile, tracker);
EventDeserializer des =
new AvroEventDeserializer.Builder().build(context, in);
Event event = des.readEvent();
String eventSchemaHash =
event.getHeaders().get(AvroEventDeserializer.AVRO_SCHEMA_HEADER_HASH);
String expectedSchemaHash = Hex.encodeHexString(
SchemaNormalization.parsingFingerprint("CRC-64-AVRO", schema));
Assert.assertEquals(expectedSchemaHash, eventSchemaHash);
}
@Test
public void testSchemaLiteral() throws IOException {
File tempFile = newTestFile(true);
String target = tempFile.getAbsolutePath();
logger.info("Target: {}", target);
TransientPositionTracker tracker = new TransientPositionTracker(target);
Context context = new Context();
context.put(AvroEventDeserializer.CONFIG_SCHEMA_TYPE_KEY,
AvroEventDeserializer.AvroSchemaType.LITERAL.toString());
ResettableInputStream in =
new ResettableFileInputStream(tempFile, tracker);
EventDeserializer des =
new AvroEventDeserializer.Builder().build(context, in);
Event event = des.readEvent();
String eventSchema =
event.getHeaders().get(AvroEventDeserializer.AVRO_SCHEMA_HEADER_LITERAL);
Assert.assertEquals(schema.toString(), eventSchema);
}
private File newTestFile(boolean deleteOnExit) throws IOException {
File tempFile = File.createTempFile("testDirectFile", "tmp");
if (deleteOnExit) {
tempFile.deleteOnExit();
}
DataFileWriter<GenericRecord> writer =
new DataFileWriter<GenericRecord>(
new GenericDatumWriter<GenericRecord>(schema));
writer.create(schema, tempFile);
GenericRecordBuilder recordBuilder;
recordBuilder = new GenericRecordBuilder(schema);
recordBuilder.set("foo", "bar");
GenericRecord record = recordBuilder.build();
writer.append(record);
writer.sync();
recordBuilder = new GenericRecordBuilder(schema);
recordBuilder.set("foo", "baz");
record = recordBuilder.build();
writer.append(record);
writer.sync();
writer.flush();
writer.close();
return tempFile;
}
}
| 9,891 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/serialization/TestHeaderAndBodyTextEventSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.serialization;
import com.google.common.base.Charsets;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.OutputStream;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.io.FileUtils;
import org.apache.flume.Context;
import org.apache.flume.event.EventBuilder;
import org.junit.Assert;
import org.junit.Test;
public class TestHeaderAndBodyTextEventSerializer {
File testFile = new File("src/test/resources/events.txt");
File expectedFile = new File("src/test/resources/events.txt");
@Test
public void testWithNewline() throws FileNotFoundException, IOException {
Map<String, String> headers = new HashMap<String, String>();
headers.put("header1", "value1");
headers.put("header2", "value2");
OutputStream out = new FileOutputStream(testFile);
EventSerializer serializer =
EventSerializerFactory.getInstance("header_and_text", new Context(), out);
serializer.afterCreate();
serializer.write(EventBuilder.withBody("event 1", Charsets.UTF_8, headers));
serializer.write(EventBuilder.withBody("event 2", Charsets.UTF_8, headers));
serializer.write(EventBuilder.withBody("event 3", Charsets.UTF_8, headers));
serializer.flush();
serializer.beforeClose();
out.flush();
out.close();
BufferedReader reader = new BufferedReader(new FileReader(testFile));
Assert.assertEquals("{header2=value2, header1=value1} event 1", reader.readLine());
Assert.assertEquals("{header2=value2, header1=value1} event 2", reader.readLine());
Assert.assertEquals("{header2=value2, header1=value1} event 3", reader.readLine());
Assert.assertNull(reader.readLine());
reader.close();
FileUtils.forceDelete(testFile);
}
@Test
public void testNoNewline() throws FileNotFoundException, IOException {
Map<String, String> headers = new HashMap<String, String>();
headers.put("header1", "value1");
headers.put("header2", "value2");
OutputStream out = new FileOutputStream(testFile);
Context context = new Context();
context.put("appendNewline", "false");
EventSerializer serializer =
EventSerializerFactory.getInstance("header_and_text", context, out);
serializer.afterCreate();
serializer.write(EventBuilder.withBody("event 1\n", Charsets.UTF_8, headers));
serializer.write(EventBuilder.withBody("event 2\n", Charsets.UTF_8, headers));
serializer.write(EventBuilder.withBody("event 3\n", Charsets.UTF_8, headers));
serializer.flush();
serializer.beforeClose();
out.flush();
out.close();
BufferedReader reader = new BufferedReader(new FileReader(testFile));
Assert.assertEquals("{header2=value2, header1=value1} event 1", reader.readLine());
Assert.assertEquals("{header2=value2, header1=value1} event 2", reader.readLine());
Assert.assertEquals("{header2=value2, header1=value1} event 3", reader.readLine());
Assert.assertNull(reader.readLine());
reader.close();
FileUtils.forceDelete(testFile);
}
}
| 9,892 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/serialization/TestFlumeEventAvroEventSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.serialization;
import com.google.common.base.Charsets;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumReader;
import org.apache.commons.io.FileUtils;
import org.apache.flume.Context;
import org.apache.flume.event.EventBuilder;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.charset.CharsetDecoder;
public class TestFlumeEventAvroEventSerializer {
private static final File TESTFILE =
new File("src/test/resources/FlumeEventAvroEvent.avro");
@Test
public void testAvroSerializer()
throws FileNotFoundException, IOException {
createAvroFile(TESTFILE, null);
validateAvroFile(TESTFILE);
FileUtils.forceDelete(TESTFILE);
}
@Test
public void testAvroSerializerNullCompression()
throws FileNotFoundException, IOException {
createAvroFile(TESTFILE, "null");
validateAvroFile(TESTFILE);
FileUtils.forceDelete(TESTFILE);
}
@Test
public void testAvroSerializerDeflateCompression()
throws FileNotFoundException, IOException {
createAvroFile(TESTFILE, "deflate");
validateAvroFile(TESTFILE);
FileUtils.forceDelete(TESTFILE);
}
@Test
public void testAvroSerializerSnappyCompression()
throws FileNotFoundException, IOException {
// Snappy currently broken on Mac in OpenJDK 7 per FLUME-2012
Assume.assumeTrue(!"Mac OS X".equals(System.getProperty("os.name")) ||
!System.getProperty("java.version").startsWith("1.7."));
createAvroFile(TESTFILE, "snappy");
validateAvroFile(TESTFILE);
FileUtils.forceDelete(TESTFILE);
}
public void createAvroFile(File file, String codec) throws FileNotFoundException, IOException {
if (file.exists()) {
FileUtils.forceDelete(file);
}
// serialize a few events using the reflection-based avro serializer
OutputStream out = new FileOutputStream(file);
Context ctx = new Context();
if (codec != null) {
ctx.put("compressionCodec", codec);
}
EventSerializer.Builder builder =
new FlumeEventAvroEventSerializer.Builder();
EventSerializer serializer = builder.build(ctx, out);
serializer.afterCreate();
serializer.write(EventBuilder.withBody("yo man!", Charsets.UTF_8));
serializer.write(EventBuilder.withBody("2nd event!", Charsets.UTF_8));
serializer.write(EventBuilder.withBody("last one!", Charsets.UTF_8));
serializer.flush();
serializer.beforeClose();
out.flush();
out.close();
}
public void validateAvroFile(File file) throws IOException {
// read the events back using GenericRecord
DatumReader<GenericRecord> reader = new GenericDatumReader<GenericRecord>();
DataFileReader<GenericRecord> fileReader =
new DataFileReader<GenericRecord>(file, reader);
GenericRecord record = new GenericData.Record(fileReader.getSchema());
int numEvents = 0;
while (fileReader.hasNext()) {
fileReader.next(record);
ByteBuffer body = (ByteBuffer) record.get("body");
CharsetDecoder decoder = Charsets.UTF_8.newDecoder();
String bodyStr = decoder.decode(body).toString();
System.out.println(bodyStr);
numEvents++;
}
fileReader.close();
Assert.assertEquals("Should have found a total of 3 events", 3, numEvents);
}
}
| 9,893 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/serialization/TestResettableFileInputStream.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.serialization;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import junit.framework.Assert;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedOutputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.charset.Charset;
import java.nio.charset.MalformedInputException;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class TestResettableFileInputStream {
private static final boolean CLEANUP = true;
private static final File WORK_DIR =
new File("target/test/work").getAbsoluteFile();
private static final Logger logger =
LoggerFactory.getLogger(TestResettableFileInputStream.class);
private File file;
private File meta;
@Before
public void setup() throws Exception {
Files.createParentDirs(new File(WORK_DIR, "dummy"));
file = File.createTempFile(getClass().getSimpleName(), ".txt", WORK_DIR);
logger.info("Data file: {}", file);
meta = File.createTempFile(getClass().getSimpleName(), ".avro", WORK_DIR);
logger.info("PositionTracker meta file: {}", meta);
meta.delete(); // We want the filename but not the empty file
}
@After
public void tearDown() throws Exception {
if (CLEANUP) {
meta.delete();
file.delete();
}
}
/**
* Ensure that we can simply read bytes from a file.
* @throws IOException
*/
@Test
public void testBasicRead() throws IOException {
String output = singleLineFileInit(file, Charsets.UTF_8);
PositionTracker tracker = new DurablePositionTracker(meta, file.getPath());
ResettableInputStream in = new ResettableFileInputStream(file, tracker);
String result = readLine(in, output.length());
assertEquals(output, result);
String afterEOF = readLine(in, output.length());
assertNull(afterEOF);
in.close();
}
/**
* Ensure that we can simply read bytes from a file using InputStream.read() method.
* @throws IOException
*/
@Test
public void testReadByte() throws IOException {
byte[] bytes = new byte[255];
for (int i = 0; i < 255; i++) {
bytes[i] = (byte) i;
}
Files.write(bytes, file);
PositionTracker tracker = new DurablePositionTracker(meta, file.getPath());
ResettableInputStream in = new ResettableFileInputStream(file, tracker);
for (int i = 0; i < 255; i++) {
assertEquals(i, in.read());
}
assertEquals(-1, in.read());
in.close();
}
/**
* Ensure that we can process lines that contain multi byte characters in weird places
* such as at the end of a buffer.
* @throws IOException
*/
@Test
public void testMultiByteCharRead() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
out.write("1234567".getBytes(Charsets.UTF_8));
// write a multi byte char encompassing buffer boundaries
generateUtf83ByteSequence(out);
// buffer now contains 8 chars and 10 bytes total
Files.write(out.toByteArray(), file);
ResettableInputStream in = initInputStream(8, Charsets.UTF_8, DecodeErrorPolicy.FAIL);
String result = readLine(in, 8);
assertEquals("1234567\u0A93\n", result);
}
/**
* Ensure that we can process UTF-8 lines that contain surrogate pairs
* even if they appear astride buffer boundaries.
* @throws IOException
*/
@Test
public void testUtf8SurrogatePairRead() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
out.write("1234567".getBytes(Charsets.UTF_8));
generateUtf8SurrogatePairSequence(out);
// buffer now contains 9 chars (7 "normal" and 2 surrogates) and 11 bytes total
// surrogate pair will encompass buffer boundaries
Files.write(out.toByteArray(), file);
ResettableInputStream in = initInputStream(8, Charsets.UTF_8, DecodeErrorPolicy.FAIL);
String result = readLine(in, 9);
assertEquals("1234567\uD83D\uDE18\n", result);
}
/**
* Ensure that we can process UTF-16 lines that contain surrogate pairs, even
* preceded by a Byte Order Mark (BOM).
* @throws IOException
*/
@Test
public void testUtf16BOMAndSurrogatePairRead() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
generateUtf16SurrogatePairSequence(out);
// buffer now contains 1 BOM and 2 chars (1 surrogate pair) and 6 bytes total
// (including 2-byte BOM)
Files.write(out.toByteArray(), file);
ResettableInputStream in = initInputStream(8, Charsets.UTF_16, DecodeErrorPolicy.FAIL);
String result = readLine(in, 2);
assertEquals("\uD83D\uDE18\n", result);
}
/**
* Ensure that we can process Shift_JIS lines that contain multi byte Japanese chars
* even if they appear astride buffer boundaries.
* @throws IOException
*/
@Test
public void testShiftJisSurrogateCharRead() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
out.write("1234567".getBytes(Charset.forName("Shift_JIS")));
// write a multi byte char encompassing buffer boundaries
generateShiftJis2ByteSequence(out);
// buffer now contains 8 chars and 10 bytes total
Files.write(out.toByteArray(), file);
ResettableInputStream in = initInputStream(8, Charset.forName("Shift_JIS"),
DecodeErrorPolicy.FAIL);
String result = readLine(in, 8);
assertEquals("1234567\u4E9C\n", result);
}
@Test(expected = MalformedInputException.class)
public void testUtf8DecodeErrorHandlingFailMalformed() throws IOException {
ResettableInputStream in = initUtf8DecodeTest(DecodeErrorPolicy.FAIL);
while (in.readChar() != -1) {
// Do nothing... read the whole file and throw away the bytes.
}
fail("Expected MalformedInputException!");
}
@Test
public void testUtf8DecodeErrorHandlingIgnore() throws IOException {
ResettableInputStream in = initUtf8DecodeTest(DecodeErrorPolicy.IGNORE);
int c;
StringBuilder sb = new StringBuilder();
while ((c = in.readChar()) != -1) {
sb.append((char)c);
}
assertEquals("Latin1: ()\nLong: ()\nNonUnicode: ()\n", sb.toString());
}
@Test
public void testUtf8DecodeErrorHandlingReplace() throws IOException {
ResettableInputStream in = initUtf8DecodeTest(DecodeErrorPolicy.REPLACE);
int c;
StringBuilder sb = new StringBuilder();
while ((c = in.readChar()) != -1) {
sb.append((char)c);
}
String preJdk8ExpectedStr = "Latin1: (X)\nLong: (XXX)\nNonUnicode: (X)\n";
String expectedStr = "Latin1: (X)\nLong: (XXX)\nNonUnicode: (XXXXX)\n";
String javaVersionStr = System.getProperty("java.version");
double javaVersion = Double.parseDouble(javaVersionStr.substring(0, 3));
if (javaVersion < 1.8) {
assertTrue(preJdk8ExpectedStr.replaceAll("X", "\ufffd").equals(sb.toString()));
} else {
assertTrue(expectedStr.replaceAll("X", "\ufffd").equals(sb.toString()));
}
}
@Test(expected = MalformedInputException.class)
public void testLatin1DecodeErrorHandlingFailMalformed() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
generateLatin1InvalidSequence(out);
Files.write(out.toByteArray(), file);
ResettableInputStream in = initInputStream(DecodeErrorPolicy.FAIL);
while (in.readChar() != -1) {
// Do nothing... read the whole file and throw away the bytes.
}
fail("Expected MalformedInputException!");
}
@Test
public void testLatin1DecodeErrorHandlingReplace() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
generateLatin1InvalidSequence(out);
Files.write(out.toByteArray(), file);
ResettableInputStream in = initInputStream(DecodeErrorPolicy.REPLACE);
int c;
StringBuilder sb = new StringBuilder();
while ((c = in.readChar()) != -1) {
sb.append((char)c);
}
assertEquals("Invalid: (X)\n".replaceAll("X", "\ufffd"), sb.toString());
}
/**
* Ensure a reset() brings us back to the default mark (beginning of file)
* @throws IOException
*/
@Test
public void testReset() throws IOException {
String output = singleLineFileInit(file, Charsets.UTF_8);
PositionTracker tracker = new DurablePositionTracker(meta, file.getPath());
ResettableInputStream in = new ResettableFileInputStream(file, tracker);
String result1 = readLine(in, output.length());
assertEquals(output, result1);
in.reset();
String result2 = readLine(in, output.length());
assertEquals(output, result2);
String result3 = readLine(in, output.length());
assertNull("Should be null: " + result3, result3);
in.close();
}
/**
* Ensure that marking and resetting works.
* @throws IOException
*/
@Test
public void testMarkReset() throws IOException {
List<String> expected = multiLineFileInit(file, Charsets.UTF_8);
int MAX_LEN = 100;
PositionTracker tracker = new DurablePositionTracker(meta, file.getPath());
ResettableInputStream in = new ResettableFileInputStream(file, tracker);
String result0 = readLine(in, MAX_LEN);
assertEquals(expected.get(0), result0);
in.reset();
String result0a = readLine(in, MAX_LEN);
assertEquals(expected.get(0), result0a);
in.mark();
String result1 = readLine(in, MAX_LEN);
assertEquals(expected.get(1), result1);
in.reset();
String result1a = readLine(in, MAX_LEN);
assertEquals(expected.get(1), result1a);
in.mark();
in.close();
}
/**
* Ensure that surrogate pairs work well with mark/reset.
* @throws IOException
*/
@Test
public void testMarkResetWithSurrogatePairs() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
out.write("foo".getBytes(Charsets.UTF_8));
generateUtf8SurrogatePairSequence(out);
out.write("bar".getBytes(Charsets.UTF_8));
Files.write(out.toByteArray(), file);
PositionTracker tracker = new DurablePositionTracker(meta, file.getPath());
ResettableInputStream in = new ResettableFileInputStream(file, tracker);
Assert.assertEquals('f', in.readChar());
Assert.assertEquals('o', in.readChar());
in.mark();
Assert.assertEquals('o', in.readChar());
// read high surrogate
Assert.assertEquals('\ud83d', in.readChar());
// call reset in the middle of a surrogate pair
in.reset();
// will read low surrogate *before* reverting back to mark, to ensure
// surrogate pair is properly read
Assert.assertEquals('\ude18', in.readChar());
// now back to marked position
Assert.assertEquals('o', in.readChar());
// read high surrogate again
Assert.assertEquals('\ud83d', in.readChar());
// call mark in the middle of a surrogate pair:
// will mark the position *after* the pair, *not* low surrogate's position
in.mark();
// will reset to the position *after* the pair
in.reset();
// read low surrogate normally despite of reset being called
// so that the pair is entirely read
Assert.assertEquals('\ude18', in.readChar());
Assert.assertEquals('b', in.readChar());
Assert.assertEquals('a', in.readChar());
// will reset to the position *after* the pair
in.reset();
Assert.assertEquals('b', in.readChar());
Assert.assertEquals('a', in.readChar());
Assert.assertEquals('r', in.readChar());
Assert.assertEquals(-1, in.readChar());
in.close();
tracker.close(); // redundant
}
@Test
public void testResume() throws IOException {
List<String> expected = multiLineFileInit(file, Charsets.UTF_8);
int MAX_LEN = 100;
PositionTracker tracker = new DurablePositionTracker(meta, file.getPath());
ResettableInputStream in = new ResettableFileInputStream(file, tracker);
String result0 = readLine(in, MAX_LEN);
String result1 = readLine(in, MAX_LEN);
in.mark();
String result2 = readLine(in, MAX_LEN);
Assert.assertEquals(expected.get(2), result2);
String result3 = readLine(in, MAX_LEN);
Assert.assertEquals(expected.get(3), result3);
in.close();
tracker.close(); // redundant
// create new Tracker & RIS
tracker = new DurablePositionTracker(meta, file.getPath());
in = new ResettableFileInputStream(file, tracker);
String result2a = readLine(in, MAX_LEN);
String result3a = readLine(in, MAX_LEN);
Assert.assertEquals(result2, result2a);
Assert.assertEquals(result3, result3a);
}
/**
* Ensure that surrogate pairs work well when resuming
* reading. Specifically, this test brings up special situations
* where a surrogate pair cannot be correctly decoded because
* the second character is lost.
*
* @throws IOException
*/
@Test
public void testResumeWithSurrogatePairs() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
out.write("foo".getBytes(Charsets.UTF_8));
generateUtf8SurrogatePairSequence(out);
out.write("bar".getBytes(Charsets.UTF_8));
Files.write(out.toByteArray(), file);
PositionTracker tracker = new DurablePositionTracker(meta, file.getPath());
ResettableInputStream in = new ResettableFileInputStream(file, tracker);
Assert.assertEquals('f', in.readChar());
Assert.assertEquals('o', in.readChar());
in.mark();
Assert.assertEquals('o', in.readChar());
// read high surrogate
Assert.assertEquals('\ud83d', in.readChar());
// call reset in the middle of a surrogate pair
in.reset();
// close RIS - this will cause the low surrogate char
// stored in-memory to be lost
in.close();
tracker.close(); // redundant
// create new Tracker & RIS
tracker = new DurablePositionTracker(meta, file.getPath());
in = new ResettableFileInputStream(file, tracker);
// low surrogate char is now lost - resume from marked position
Assert.assertEquals('o', in.readChar());
// read high surrogate again
Assert.assertEquals('\ud83d', in.readChar());
// call mark in the middle of a surrogate pair:
// will mark the position *after* the pair, *not* low surrogate's position
in.mark();
// close RIS - this will cause the low surrogate char
// stored in-memory to be lost
in.close();
tracker.close(); // redundant
// create new Tracker & RIS
tracker = new DurablePositionTracker(meta, file.getPath());
in = new ResettableFileInputStream(file, tracker);
// low surrogate char is now lost - resume from marked position
Assert.assertEquals('b', in.readChar());
Assert.assertEquals('a', in.readChar());
Assert.assertEquals('r', in.readChar());
Assert.assertEquals(-1, in.readChar());
in.close();
tracker.close(); // redundant
}
@Test
public void testSeek() throws IOException {
int NUM_LINES = 1000;
int LINE_LEN = 1000;
generateData(file, Charsets.UTF_8, NUM_LINES, LINE_LEN);
PositionTracker tracker = new DurablePositionTracker(meta, file.getPath());
ResettableInputStream in = new ResettableFileInputStream(file, tracker,
10 * LINE_LEN, Charsets.UTF_8, DecodeErrorPolicy.FAIL);
String line = "";
for (int i = 0; i < 9; i++) {
line = readLine(in, LINE_LEN);
}
int lineNum = Integer.parseInt(line.substring(0, 10));
assertEquals(8, lineNum);
// seek back within our buffer
long pos = in.tell();
in.seek(pos - 2 * LINE_LEN); // jump back 2 lines
line = readLine(in, LINE_LEN);
lineNum = Integer.parseInt(line.substring(0, 10));
assertEquals(7, lineNum);
// seek forward within our buffer
in.seek(in.tell() + LINE_LEN);
line = readLine(in, LINE_LEN);
lineNum = Integer.parseInt(line.substring(0, 10));
assertEquals(9, lineNum);
// seek forward outside our buffer
in.seek(in.tell() + 20 * LINE_LEN);
line = readLine(in, LINE_LEN);
lineNum = Integer.parseInt(line.substring(0, 10));
assertEquals(30, lineNum);
// seek backward outside our buffer
in.seek(in.tell() - 25 * LINE_LEN);
line = readLine(in, LINE_LEN);
lineNum = Integer.parseInt(line.substring(0, 10));
assertEquals(6, lineNum);
// test a corner-case seek which requires a buffer refill
in.seek(100 * LINE_LEN);
in.seek(0); // reset buffer
in.seek(9 * LINE_LEN);
assertEquals(9, Integer.parseInt(readLine(in, LINE_LEN).substring(0, 10)));
assertEquals(10, Integer.parseInt(readLine(in, LINE_LEN).substring(0, 10)));
assertEquals(11, Integer.parseInt(readLine(in, LINE_LEN).substring(0, 10)));
}
private ResettableInputStream initUtf8DecodeTest(DecodeErrorPolicy policy)
throws IOException {
writeBigBadUtf8Sequence(file);
return initInputStream(policy);
}
private ResettableInputStream initInputStream(DecodeErrorPolicy policy)
throws IOException {
return initInputStream(2048, Charsets.UTF_8, policy);
}
private ResettableInputStream initInputStream(int bufferSize, Charset charset,
DecodeErrorPolicy policy) throws IOException {
PositionTracker tracker = new DurablePositionTracker(meta, file.getPath());
ResettableInputStream in = new ResettableFileInputStream(file, tracker,
bufferSize, charset, policy);
return in;
}
private void writeBigBadUtf8Sequence(File file) throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
generateUtf8Latin1Sequence(out);
generateUtf8OverlyLongSequence(out);
generateUtf8NonUnicodeSequence(out);
Files.write(out.toByteArray(), file);
}
private void generateUtf8OverlyLongSequence(OutputStream out)
throws IOException {
out.write("Long: (".getBytes(Charsets.UTF_8));
// Overly-long slash character should not be accepted.
out.write(new byte[] { (byte)0xe0, (byte)0x80, (byte)0xaf });
out.write(")\n".getBytes(Charsets.UTF_8));
}
private void generateUtf8NonUnicodeSequence(OutputStream out)
throws IOException {
out.write("NonUnicode: (".getBytes(Charsets.UTF_8));
// This is a valid 5-octet sequence but is not Unicode
out.write(new byte[]{(byte) 0xf8, (byte) 0xa1, (byte) 0xa1, (byte) 0xa1,
(byte) 0xa1});
out.write(")\n".getBytes(Charsets.UTF_8));
}
private void generateUtf8Latin1Sequence(OutputStream out) throws IOException {
out.write("Latin1: (".getBytes(Charsets.UTF_8));
// This is "e" with an accent in Latin-1
out.write(new byte[] { (byte)0xe9 } );
out.write(")\n".getBytes(Charsets.UTF_8));
}
private void generateLatin1InvalidSequence(OutputStream out)
throws IOException {
out.write("Invalid: (".getBytes(Charsets.UTF_8));
// Not a valid character in Latin 1.
out.write(new byte[] { (byte)0x81 } );
out.write(")\n".getBytes(Charsets.UTF_8));
}
private void generateUtf8SurrogatePairSequence(OutputStream out) throws IOException {
// U+1F618 (UTF-8: f0 9f 98 98) FACE THROWING A KISS
out.write(new byte[]{(byte) 0xF0, (byte) 0x9F, (byte) 0x98, (byte) 0x98});
}
private void generateUtf16SurrogatePairSequence(OutputStream out) throws IOException {
// BOM
out.write(new byte[]{(byte) 0xFE, (byte) 0xFF});
// U+1F618 (UTF-16: d83d de18) FACE THROWING A KISS
out.write(new byte[]{(byte) 0xD8, (byte) 0x3D, (byte) 0xDE, (byte) 0x18});
}
private void generateUtf83ByteSequence(OutputStream out) throws IOException {
// U+0A93 (UTF-8: e0 aa 93) GUJARATI LETTER O
out.write(new byte[]{(byte) 0xe0, (byte) 0xaa, (byte) 0x93});
}
private void generateShiftJis2ByteSequence(OutputStream out) throws IOException {
//U+4E9C (Shift JIS: 88 9f) CJK UNIFIED IDEOGRAPH
out.write(new byte[]{(byte) 0x88, (byte) 0x9f});
}
/**
* Helper function to read a line from a character stream.
* @param in
* @param maxLength
* @return Line as string
* @throws IOException
*/
private static String readLine(ResettableInputStream in, int maxLength)
throws IOException {
StringBuilder s = new StringBuilder();
int c;
int i = 1;
while ((c = in.readChar()) != -1) {
// FIXME: support \r\n
if (c == '\n') {
break;
}
//System.out.printf("seen char val: %c\n", (char)c);
s.append((char)c);
if (i++ > maxLength) {
System.out.println("Output: >" + s + "<");
throw new RuntimeException("Too far!");
}
}
if (s.length() > 0) {
s.append('\n');
return s.toString();
} else {
return null;
}
}
private static String singleLineFileInit(File file, Charset charset)
throws IOException {
String output = "This is gonna be great!\n";
Files.write(output.getBytes(charset), file);
return output;
}
private static List<String> multiLineFileInit(File file, Charset charset)
throws IOException {
List<String> lines = Lists.newArrayList();
lines.add("1. On the planet of Mars\n");
lines.add("2. They have clothes just like ours,\n");
lines.add("3. And they have the same shoes and same laces,\n");
lines.add("4. And they have the same charms and same graces...\n");
StringBuilder sb = new StringBuilder();
for (String line : lines) {
sb.append(line);
}
Files.write(sb.toString().getBytes(charset), file);
return lines;
}
private static void generateData(File file, Charset charset,
int numLines, int lineLen) throws IOException {
OutputStream out = new BufferedOutputStream(new FileOutputStream(file));
StringBuilder junk = new StringBuilder();
for (int x = 0; x < lineLen - 13; x++) {
junk.append('x');
}
String payload = junk.toString();
StringBuilder builder = new StringBuilder();
for (int i = 0; i < numLines; i++) {
builder.append(String.format("%010d: %s\n", i, payload));
if (i % 1000 == 0 && i != 0) {
out.write(builder.toString().getBytes(charset));
builder.setLength(0);
}
}
out.write(builder.toString().getBytes(charset));
out.close();
Assert.assertEquals(lineLen * numLines, file.length());
}
}
| 9,894 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/serialization/ResettableTestStringInputStream.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.serialization;
import java.io.IOException;
public class ResettableTestStringInputStream extends ResettableInputStream {
private String str;
int markPos = 0;
int curPos = 0;
/**
* Warning: This test class does not handle character/byte conversion at all!
* @param str String to use for testing
*/
public ResettableTestStringInputStream(String str) {
this.str = str;
}
@Override
public int readChar() throws IOException {
if (curPos >= str.length()) {
return -1;
}
return str.charAt(curPos++);
}
@Override
public void mark() throws IOException {
markPos = curPos;
}
@Override
public void reset() throws IOException {
curPos = markPos;
}
@Override
public void seek(long position) throws IOException {
throw new UnsupportedOperationException("Unimplemented in test class");
}
@Override
public long tell() throws IOException {
throw new UnsupportedOperationException("Unimplemented in test class");
}
@Override
public int read() throws IOException {
throw new UnsupportedOperationException("This test class doesn't return " +
"bytes!");
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
throw new UnsupportedOperationException("This test class doesn't return " +
"bytes!");
}
@Override
public void close() throws IOException {
// no-op
}
}
| 9,895 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/serialization/TestLineDeserializer.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.serialization;
import com.google.common.base.Charsets;
import junit.framework.Assert;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.util.List;
public class TestLineDeserializer {
private String mini;
@Before
public void setup() {
StringBuilder sb = new StringBuilder();
sb.append("line 1\n");
sb.append("line 2\n");
mini = sb.toString();
}
@Test
public void testSimple() throws IOException {
ResettableInputStream in = new ResettableTestStringInputStream(mini);
EventDeserializer des = new LineDeserializer(new Context(), in);
validateMiniParse(des);
}
@Test
public void testSimpleViaBuilder() throws IOException {
ResettableInputStream in = new ResettableTestStringInputStream(mini);
EventDeserializer.Builder builder = new LineDeserializer.Builder();
EventDeserializer des = builder.build(new Context(), in);
validateMiniParse(des);
}
@Test
public void testSimpleViaFactory() throws IOException {
ResettableInputStream in = new ResettableTestStringInputStream(mini);
EventDeserializer des;
des = EventDeserializerFactory.getInstance("LINE", new Context(), in);
validateMiniParse(des);
}
@Test
public void testBatch() throws IOException {
ResettableInputStream in = new ResettableTestStringInputStream(mini);
EventDeserializer des = new LineDeserializer(new Context(), in);
List<Event> events;
events = des.readEvents(1); // only try to read 1
Assert.assertEquals(1, events.size());
assertEventBodyEquals("line 1", events.get(0));
events = des.readEvents(10); // try to read more than we should have
Assert.assertEquals(1, events.size());
assertEventBodyEquals("line 2", events.get(0));
des.mark();
des.close();
}
// truncation occurs at maxLineLength boundaries
@Test
public void testMaxLineLength() throws IOException {
String longLine = "abcdefghijklmnopqrstuvwxyz\n";
Context ctx = new Context();
ctx.put(LineDeserializer.MAXLINE_KEY, "10");
ResettableInputStream in = new ResettableTestStringInputStream(longLine);
EventDeserializer des = new LineDeserializer(ctx, in);
assertEventBodyEquals("abcdefghij", des.readEvent());
assertEventBodyEquals("klmnopqrst", des.readEvent());
assertEventBodyEquals("uvwxyz", des.readEvent());
Assert.assertNull(des.readEvent());
}
/*
* TODO: need test for output charset
@Test
public void testOutputCharset {
}
*/
private void assertEventBodyEquals(String expected, Event event) {
String bodyStr = new String(event.getBody(), Charsets.UTF_8);
Assert.assertEquals(expected, bodyStr);
}
private void validateMiniParse(EventDeserializer des) throws IOException {
Event evt;
evt = des.readEvent();
Assert.assertEquals(new String(evt.getBody()), "line 1");
des.mark();
evt = des.readEvent();
Assert.assertEquals(new String(evt.getBody()), "line 2");
des.reset(); // reset!
evt = des.readEvent();
Assert.assertEquals("Line 2 should be repeated, " +
"because we reset() the stream", new String(evt.getBody()), "line 2");
evt = des.readEvent();
Assert.assertNull("Event should be null because there are no lines " +
"left to read", evt);
des.mark();
des.close();
}
}
| 9,896 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/source/TestBasicSourceSemantics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.source;
import static org.mockito.Mockito.*;
import org.apache.flume.Context;
import org.apache.flume.FlumeException;
import org.apache.flume.channel.ChannelProcessor;
import org.apache.flume.lifecycle.LifecycleState;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestBasicSourceSemantics {
private BasicSourceSemantics source;
private ChannelProcessor channelProcessor;
private Context context;
@Before
public void setUp() {
context = new Context();
channelProcessor = mock(ChannelProcessor.class);
}
public DoNothingSource spyAndConfigure(DoNothingSource source) {
source = spy(source);
source.setChannelProcessor(channelProcessor);
source.configure(context);
return source;
}
@Test
public void testDoConfigureThrowsException() throws Exception {
source = spy(new DoNothingSource() {
@Override
protected void doConfigure(Context context) throws FlumeException {
throw new FlumeException("dummy");
}
});
source.setChannelProcessor(channelProcessor);
try {
source.configure(context);
Assert.fail();
} catch (FlumeException expected) {
}
Assert.assertFalse(source.isStarted());
Assert.assertEquals(LifecycleState.ERROR, source.getLifecycleState());
Assert.assertNotNull(source.getStartException());
}
@Test
public void testDoStartThrowsException() throws Exception {
source = spyAndConfigure(new DoNothingSource() {
@Override
protected void doStart() throws FlumeException {
throw new FlumeException("dummy");
}
});
source.start();
Assert.assertFalse(source.isStarted());
Assert.assertEquals(LifecycleState.ERROR, source.getLifecycleState());
Assert.assertNotNull(source.getStartException());
}
@Test
public void testDoStopThrowsException() throws Exception {
source = spyAndConfigure(new DoNothingSource() {
@Override
protected void doStop() throws FlumeException {
throw new FlumeException("dummy");
}
});
source.start();
source.stop();
Assert.assertFalse(source.isStarted());
Assert.assertEquals(LifecycleState.ERROR, source.getLifecycleState());
Assert.assertNull(source.getStartException());
}
@Test
public void testConfigureCalledWhenStarted() throws Exception {
source = spyAndConfigure(new DoNothingSource());
source.start();
try {
source.configure(context);
Assert.fail();
} catch (IllegalStateException expected) {
}
Assert.assertTrue(source.isStarted());
Assert.assertNull(source.getStartException());
}
private static class DoNothingSource extends BasicSourceSemantics {
@Override
protected void doConfigure(Context context) throws FlumeException {
}
@Override
protected void doStart() throws FlumeException {
}
@Override
protected void doStop() throws FlumeException {
}
}
} | 9,897 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/source/TestAbstractPollableSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.source;
import static org.mockito.Mockito.*;
import org.apache.flume.Context;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.util.HashMap;
public class TestAbstractPollableSource {
private AbstractPollableSource source;
@Before
public void setUp() {
source = spy(new AbstractPollableSource() {
@Override
protected Status doProcess() throws EventDeliveryException {
return Status.BACKOFF;
}
@Override
protected void doConfigure(Context context) throws FlumeException {
throw new FlumeException("dummy");
}
@Override
protected void doStart() throws FlumeException {
}
@Override
protected void doStop() throws FlumeException {
}
});
}
@Test(expected = FlumeException.class)
public void testExceptionStartup() throws Exception {
source.configure(new Context());
}
@Test(expected = EventDeliveryException.class)
public void testNotStarted() throws Exception {
source.process();
}
@Test
public void voidBackOffConfig() {
source = spy(new AbstractPollableSource() {
@Override
protected Status doProcess() throws EventDeliveryException {
return Status.BACKOFF;
}
@Override
protected void doConfigure(Context context) throws FlumeException {
}
@Override
protected void doStart() throws FlumeException {
}
@Override
protected void doStop() throws FlumeException {
}
});
HashMap<String, String> inputConfigs = new HashMap<String,String>();
inputConfigs.put(PollableSourceConstants.BACKOFF_SLEEP_INCREMENT, "42");
inputConfigs.put(PollableSourceConstants.MAX_BACKOFF_SLEEP, "4242");
Context context = new Context(inputConfigs);
source.configure(context);
Assert.assertEquals("BackOffSleepIncrement should equal 42 but it equals " +
source.getBackOffSleepIncrement(),
42L, source.getBackOffSleepIncrement());
Assert.assertEquals("BackOffSleepIncrement should equal 42 but it equals " +
source.getMaxBackOffSleepInterval(),
4242L, source.getMaxBackOffSleepInterval());
}
@Test
public void voidBackOffConfigDefaults() {
source = spy(new AbstractPollableSource() {
@Override
protected Status doProcess() throws EventDeliveryException {
return Status.BACKOFF;
}
@Override
protected void doConfigure(Context context) throws FlumeException {
}
@Override
protected void doStart() throws FlumeException {
}
@Override
protected void doStop() throws FlumeException {
}
});
HashMap<String, String> inputConfigs = new HashMap<String,String>();
Assert.assertEquals("BackOffSleepIncrement should equal " +
PollableSourceConstants.DEFAULT_BACKOFF_SLEEP_INCREMENT +
" but it equals " + source.getBackOffSleepIncrement(),
PollableSourceConstants.DEFAULT_BACKOFF_SLEEP_INCREMENT,
source.getBackOffSleepIncrement());
Assert.assertEquals("BackOffSleepIncrement should equal " +
PollableSourceConstants.DEFAULT_MAX_BACKOFF_SLEEP +
" but it equals " + source.getMaxBackOffSleepInterval(),
PollableSourceConstants.DEFAULT_MAX_BACKOFF_SLEEP,
source.getMaxBackOffSleepInterval());
}
}
| 9,898 |
0 | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-core/src/test/java/org/apache/flume/source/TestSyslogUdpSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.source;
import com.google.common.base.Charsets;
import org.apache.flume.Channel;
import org.apache.flume.ChannelException;
import org.apache.flume.ChannelSelector;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.Transaction;
import org.apache.flume.channel.ChannelProcessor;
import org.apache.flume.channel.MemoryChannel;
import org.apache.flume.channel.ReplicatingChannelSelector;
import org.apache.flume.conf.Configurables;
import org.joda.time.DateTime;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.DatagramPacket;
import java.net.DatagramSocket;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doThrow;
public class TestSyslogUdpSource {
private static final org.slf4j.Logger logger =
LoggerFactory.getLogger(TestSyslogUdpSource.class);
private static final String TEST_CLIENT_IP_HEADER = "testClientIPHeader";
private static final String TEST_CLIENT_HOSTNAME_HEADER = "testClientHostnameHeader";
private SyslogUDPSource source;
private Channel channel;
private static final int TEST_SYSLOG_PORT = 0;
private final DateTime time = new DateTime();
private final String stamp1 = time.toString();
private final String host1 = "localhost.localdomain";
private final String data1 = "test syslog data";
private final String bodyWithHostname = host1 + " " +
data1;
private final String bodyWithTimestamp = stamp1 + " " +
data1;
private final String bodyWithTandH = "<10>" + stamp1 + " " + host1 + " " +
data1;
private void init(String keepFields) {
init(keepFields, new Context());
}
private void init(String keepFields, Context context) {
source = new SyslogUDPSource();
channel = new MemoryChannel();
Configurables.configure(channel, new Context());
List<Channel> channels = new ArrayList<Channel>();
channels.add(channel);
ChannelSelector rcs = new ReplicatingChannelSelector();
rcs.setChannels(channels);
source.setChannelProcessor(new ChannelProcessor(rcs));
context.put("host", InetAddress.getLoopbackAddress().getHostAddress());
context.put("port", String.valueOf(TEST_SYSLOG_PORT));
context.put("keepFields", keepFields);
source.configure(context);
}
/** Tests the keepFields configuration parameter (enabled or disabled)
using SyslogUDPSource.*/
private void runKeepFieldsTest(String keepFields) throws IOException {
init(keepFields);
source.start();
// Write some message to the syslog port
DatagramPacket datagramPacket = createDatagramPacket(bodyWithTandH.getBytes());
for (int i = 0; i < 10 ; i++) {
sendDatagramPacket(datagramPacket);
}
List<Event> channelEvents = new ArrayList<>();
Transaction txn = channel.getTransaction();
txn.begin();
for (int i = 0; i < 10; i++) {
Event e = channel.take();
Assert.assertNotNull(e);
channelEvents.add(e);
}
commitAndCloseTransaction(txn);
source.stop();
for (Event e : channelEvents) {
Assert.assertNotNull(e);
String str = new String(e.getBody(), Charsets.UTF_8);
logger.info(str);
if (keepFields.equals("true") || keepFields.equals("all")) {
Assert.assertArrayEquals(bodyWithTandH.trim().getBytes(),
e.getBody());
} else if (keepFields.equals("false") || keepFields.equals("none")) {
Assert.assertArrayEquals(data1.getBytes(), e.getBody());
} else if (keepFields.equals("hostname")) {
Assert.assertArrayEquals(bodyWithHostname.getBytes(), e.getBody());
} else if (keepFields.equals("timestamp")) {
Assert.assertArrayEquals(bodyWithTimestamp.getBytes(), e.getBody());
}
}
}
@Test
public void testLargePayload() throws Exception {
init("true");
source.start();
// Write some message to the syslog port
byte[] largePayload = getPayload(1000).getBytes();
DatagramPacket datagramPacket = createDatagramPacket(largePayload);
for (int i = 0; i < 10 ; i++) {
sendDatagramPacket(datagramPacket);
}
List<Event> channelEvents = new ArrayList<>();
Transaction txn = channel.getTransaction();
txn.begin();
for (int i = 0; i < 10; i++) {
Event e = channel.take();
Assert.assertNotNull(e);
channelEvents.add(e);
}
commitAndCloseTransaction(txn);
source.stop();
for (Event e : channelEvents) {
Assert.assertNotNull(e);
Assert.assertArrayEquals(largePayload, e.getBody());
}
}
@Test
public void testKeepFields() throws IOException {
runKeepFieldsTest("all");
// Backwards compatibility
runKeepFieldsTest("true");
}
@Test
public void testRemoveFields() throws IOException {
runKeepFieldsTest("none");
// Backwards compatibility
runKeepFieldsTest("false");
}
@Test
public void testKeepHostname() throws IOException {
runKeepFieldsTest("hostname");
}
@Test
public void testKeepTimestamp() throws IOException {
runKeepFieldsTest("timestamp");
}
@Test
public void testSourceCounter() throws Exception {
init("true");
doCounterCommon();
// Retrying up to 10 times while the acceptedCount == 0 because the event processing in
// SyslogUDPSource is handled on a separate thread by Netty so message delivery,
// thus the sourceCounter's increment can be delayed resulting in a flaky test
for (int i = 0; i < 10 && source.getSourceCounter().getEventAcceptedCount() == 0; i++) {
Thread.sleep(100);
}
Assert.assertEquals(1, source.getSourceCounter().getEventAcceptedCount());
Assert.assertEquals(1, source.getSourceCounter().getEventReceivedCount());
}
private void doCounterCommon() throws IOException, InterruptedException {
source.start();
DatagramPacket datagramPacket = createDatagramPacket("test".getBytes());
sendDatagramPacket(datagramPacket);
}
@Test
public void testSourceCounterChannelFail() throws Exception {
init("true");
ChannelProcessor cp = Mockito.mock(ChannelProcessor.class);
doThrow(new ChannelException("dummy")).when(cp).processEvent(any(Event.class));
source.setChannelProcessor(cp);
doCounterCommon();
for (int i = 0; i < 10 && source.getSourceCounter().getChannelWriteFail() == 0; i++) {
Thread.sleep(100);
}
Assert.assertEquals(1, source.getSourceCounter().getChannelWriteFail());
}
@Test
public void testSourceCounterReadFail() throws Exception {
init("true");
ChannelProcessor cp = Mockito.mock(ChannelProcessor.class);
doThrow(new RuntimeException("dummy")).when(cp).processEvent(any(Event.class));
source.setChannelProcessor(cp);
doCounterCommon();
for (int i = 0; i < 10 && source.getSourceCounter().getEventReadFail() == 0; i++) {
Thread.sleep(100);
}
Assert.assertEquals(1, source.getSourceCounter().getEventReadFail());
}
private DatagramPacket createDatagramPacket(byte[] payload) {
InetSocketAddress addr = source.getBoundAddress();
return new DatagramPacket(payload, payload.length, addr.getAddress(), addr.getPort());
}
private void sendDatagramPacket(DatagramPacket datagramPacket) throws IOException {
try (DatagramSocket syslogSocket = new DatagramSocket()) {
syslogSocket.send(datagramPacket);
}
}
private void commitAndCloseTransaction(Transaction txn) {
try {
txn.commit();
} catch (Throwable t) {
logger.error("Transaction commit failed, rolling back", t);
txn.rollback();
} finally {
txn.close();
}
}
private String getPayload(int length) {
StringBuilder payload = new StringBuilder(length);
for (int n = 0; n < length; ++n) {
payload.append("x");
}
return payload.toString();
}
@Test
public void testClientHeaders() throws IOException {
Context context = new Context();
context.put("clientIPHeader", TEST_CLIENT_IP_HEADER);
context.put("clientHostnameHeader", TEST_CLIENT_HOSTNAME_HEADER);
init("none", context);
source.start();
DatagramPacket datagramPacket = createDatagramPacket(bodyWithTandH.getBytes());
sendDatagramPacket(datagramPacket);
Transaction txn = channel.getTransaction();
txn.begin();
Event e = channel.take();
commitAndCloseTransaction(txn);
source.stop();
Map<String, String> headers = e.getHeaders();
InetAddress loopbackAddress = InetAddress.getLoopbackAddress();
checkHeader(headers, TEST_CLIENT_IP_HEADER, loopbackAddress.getHostAddress());
checkHeader(headers, TEST_CLIENT_HOSTNAME_HEADER, loopbackAddress.getHostName());
}
private static void checkHeader(Map<String, String> headers, String headerName,
String expectedValue) {
assertTrue("Missing event header: " + headerName, headers.containsKey(headerName));
String headerValue = headers.get(headerName);
if (TEST_CLIENT_HOSTNAME_HEADER.equals(headerName)) {
if (!TestSyslogUtils.isLocalHost(headerValue)) {
fail("Expected either 'localhost' or '127.0.0.1' but got " + headerValue);
}
} else {
assertEquals("Event header value does not match: " + headerName,
expectedValue, headerValue);
}
}
}
| 9,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.