index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestCheckpointRebuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import static org.apache.flume.channel.file.TestUtils.*;
import java.io.File;
import java.util.Map;
import java.util.Set;
import org.apache.flume.channel.file.instrumentation.FileChannelCounter;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Maps;
public class TestCheckpointRebuilder extends TestFileChannelBase {
protected static final Logger LOG = LoggerFactory
.getLogger(TestCheckpointRebuilder.class);
@Before
public void setup() throws Exception {
super.setup();
}
@After
public void teardown() {
super.teardown();
}
@Test
public void testFastReplay() throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.CAPACITY,
String.valueOf(50));
overrides.put(FileChannelConfiguration.TRANSACTION_CAPACITY,
String.valueOf(50));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = fillChannel(channel, "checkpointBulder");
channel.stop();
File checkpointFile = new File(checkpointDir, "checkpoint");
File metaDataFile = Serialization.getMetaDataFile(checkpointFile);
File inflightTakesFile = new File(checkpointDir, "inflighttakes");
File inflightPutsFile = new File(checkpointDir, "inflightputs");
File queueSetDir = new File(checkpointDir, "queueset");
Assert.assertTrue(checkpointFile.delete());
Assert.assertTrue(metaDataFile.delete());
Assert.assertTrue(inflightTakesFile.delete());
Assert.assertTrue(inflightPutsFile.delete());
EventQueueBackingStore backingStore =
EventQueueBackingStoreFactory.get(checkpointFile, 50,
"test", new FileChannelCounter("test"));
FlumeEventQueue queue = new FlumeEventQueue(backingStore, inflightTakesFile,
inflightPutsFile, queueSetDir);
CheckpointRebuilder checkpointRebuilder =
new CheckpointRebuilder(getAllLogs(dataDirs), queue, true);
Assert.assertTrue(checkpointRebuilder.rebuild());
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> out = consumeChannel(channel);
compareInputAndOut(in, out);
}
}
| 9,700 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestFlumeEventPointer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import junit.framework.Assert;
import org.junit.Test;
public class TestFlumeEventPointer {
@Test
public void testGetter() {
FlumeEventPointer pointer = new FlumeEventPointer(1, 1);
Assert.assertEquals(1, pointer.getFileID());
Assert.assertEquals(1, pointer.getOffset());
}
@Test
public void testEquals() {
FlumeEventPointer pointerA = new FlumeEventPointer(1, 1);
FlumeEventPointer pointerB = new FlumeEventPointer(1, 1);
Assert.assertEquals(pointerA, pointerB);
Assert.assertEquals(pointerB, pointerA);
pointerA = new FlumeEventPointer(1, 1);
pointerB = new FlumeEventPointer(2, 2);
Assert.assertFalse(pointerA.equals(pointerB));
Assert.assertFalse(pointerB.equals(pointerA));
}
@Test
public void testHashCode() {
FlumeEventPointer pointerA = new FlumeEventPointer(1, 1);
FlumeEventPointer pointerB = new FlumeEventPointer(1, 1);
Assert.assertEquals(pointerA.hashCode(), pointerB.hashCode());
pointerA = new FlumeEventPointer(1, 1);
pointerB = new FlumeEventPointer(2, 2);
Assert.assertFalse(pointerA.hashCode() == pointerB.hashCode());
}
@Test
public void testPack() {
FlumeEventPointer pointerA = new FlumeEventPointer(1, 1);
FlumeEventPointer pointerB = new FlumeEventPointer(1, 2);
Assert.assertEquals(4294967297L, pointerA.toLong());
Assert.assertEquals(4294967298L, pointerB.toLong());
Assert.assertEquals(pointerA, FlumeEventPointer.fromLong(pointerA.toLong()));
Assert.assertEquals(pointerB, FlumeEventPointer.fromLong(pointerB.toLong()));
}
}
| 9,701 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/CountingSourceRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.util.List;
import org.apache.flume.Channel;
import org.apache.flume.PollableSource;
import org.apache.flume.channel.ChannelProcessor;
import org.apache.flume.channel.ReplicatingChannelSelector;
import com.google.common.collect.Lists;
public class CountingSourceRunner extends Thread {
private int count;
private final int until;
private final PollableSource source;
private volatile boolean run;
private final List<Exception> errors = Lists.newArrayList();
public CountingSourceRunner(PollableSource source) {
this(source, Integer.MAX_VALUE);
}
public CountingSourceRunner(PollableSource source, int until) {
this(source, until, null);
}
public CountingSourceRunner(PollableSource source, Channel channel) {
this(source, Integer.MAX_VALUE, channel);
}
public CountingSourceRunner(PollableSource source, int until, Channel channel) {
this.source = source;
this.until = until;
if (channel != null) {
ReplicatingChannelSelector selector = new ReplicatingChannelSelector();
List<Channel> channels = Lists.newArrayList();
channels.add(channel);
selector.setChannels(channels);
this.source.setChannelProcessor(new ChannelProcessor(selector));
}
}
@Override
public void run() {
run = true;
while (run && count < until) {
boolean error = true;
try {
if (PollableSource.Status.READY.equals(source.process())) {
count++;
error = false;
}
} catch (Exception ex) {
errors.add(ex);
}
if (error) {
try {
Thread.sleep(1000L);
} catch (InterruptedException e) {
}
}
}
}
public void shutdown() {
run = false;
}
public int getCount() {
return count;
}
public List<Exception> getErrors() {
return errors;
}
}
| 9,702 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestFileChannelRollback.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.base.Charsets;
import org.apache.flume.Transaction;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.sink.LoggerSink;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.Set;
import static org.apache.flume.channel.file.TestUtils.compareInputAndOut;
import static org.apache.flume.channel.file.TestUtils.putEvents;
import static org.apache.flume.channel.file.TestUtils.takeEvents;
public class TestFileChannelRollback extends TestFileChannelBase {
protected static final Logger LOG = LoggerFactory
.getLogger(TestFileChannelRollback.class);
@Before
public void setup() throws Exception {
super.setup();
}
@After
public void teardown() {
super.teardown();
}
@Test
public void testRollbackAfterNoPutTake() throws Exception {
channel.start();
Assert.assertTrue(channel.isOpen());
Transaction transaction;
transaction = channel.getTransaction();
transaction.begin();
transaction.rollback();
transaction.close();
// ensure we can reopen log with no error
channel.stop();
channel = createFileChannel();
channel.start();
Assert.assertTrue(channel.isOpen());
transaction = channel.getTransaction();
transaction.begin();
Assert.assertNull(channel.take());
transaction.commit();
transaction.close();
}
@Test
public void testRollbackSimulatedCrash() throws Exception {
channel.start();
Assert.assertTrue(channel.isOpen());
int numEvents = 50;
Set<String> in = putEvents(channel, "rollback", 1, numEvents);
Transaction transaction;
// put an item we will rollback
transaction = channel.getTransaction();
transaction.begin();
channel.put(EventBuilder.withBody("rolled back".getBytes(Charsets.UTF_8)));
transaction.rollback();
transaction.close();
// simulate crash
channel.stop();
channel = createFileChannel();
channel.start();
Assert.assertTrue(channel.isOpen());
// we should not get the rolled back item
Set<String> out = takeEvents(channel, 1, numEvents);
compareInputAndOut(in, out);
}
@Test
public void testRollbackSimulatedCrashWithSink() throws Exception {
channel.start();
Assert.assertTrue(channel.isOpen());
int numEvents = 100;
LoggerSink sink = new LoggerSink();
sink.setChannel(channel);
// sink will leave one item
CountingSinkRunner runner = new CountingSinkRunner(sink, numEvents - 1);
runner.start();
putEvents(channel, "rollback", 10, numEvents);
Transaction transaction;
// put an item we will rollback
transaction = channel.getTransaction();
transaction.begin();
byte[] bytes = "rolled back".getBytes(Charsets.UTF_8);
channel.put(EventBuilder.withBody(bytes));
transaction.rollback();
transaction.close();
while (runner.isAlive()) {
Thread.sleep(10L);
}
Assert.assertEquals(numEvents - 1, runner.getCount());
for (Exception ex : runner.getErrors()) {
LOG.warn("Sink had error", ex);
}
Assert.assertEquals(Collections.EMPTY_LIST, runner.getErrors());
// simulate crash
channel.stop();
channel = createFileChannel();
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> out = takeEvents(channel, 1, 1);
Assert.assertEquals(1, out.size());
String s = out.iterator().next();
Assert.assertTrue(s, s.startsWith("rollback-90-9"));
}
}
| 9,703 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestTransactionEventRecordV2.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import junit.framework.Assert;
import org.junit.Test;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.HashMap;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@SuppressWarnings("deprecation")
public class TestTransactionEventRecordV2 {
@Test
public void testTypes() throws IOException {
Put put = new Put(System.currentTimeMillis(), WriteOrderOracle.next());
Assert.assertEquals(TransactionEventRecord.Type.PUT.get(),
put.getRecordType());
Take take = new Take(System.currentTimeMillis(), WriteOrderOracle.next());
Assert.assertEquals(TransactionEventRecord.Type.TAKE.get(),
take.getRecordType());
Rollback rollback = new Rollback(System.currentTimeMillis(),
WriteOrderOracle.next());
Assert.assertEquals(TransactionEventRecord.Type.ROLLBACK.get(),
rollback.getRecordType());
Commit commit = new Commit(System.currentTimeMillis(),
WriteOrderOracle.next());
Assert.assertEquals(TransactionEventRecord.Type.COMMIT.get(),
commit.getRecordType());
}
@Test
public void testPutSerialization() throws IOException {
Put in = new Put(System.currentTimeMillis(),
WriteOrderOracle.next(),
new FlumeEvent(new HashMap<String, String>(), new byte[0]));
Put out = (Put)TransactionEventRecord.fromDataInputV2(toDataInput(in));
Assert.assertEquals(in.getClass(), out.getClass());
Assert.assertEquals(in.getRecordType(), out.getRecordType());
Assert.assertEquals(in.getTransactionID(), out.getTransactionID());
Assert.assertEquals(in.getLogWriteOrderID(), out.getLogWriteOrderID());
Assert.assertEquals(in.getEvent().getHeaders(), out.getEvent().getHeaders());
Assert.assertTrue(Arrays.equals(in.getEvent().getBody(), out.getEvent().getBody()));
}
@Test
public void testTakeSerialization() throws IOException {
Take in = new Take(System.currentTimeMillis(),
WriteOrderOracle.next(), 10, 20);
Take out = (Take)TransactionEventRecord.fromDataInputV2(toDataInput(in));
Assert.assertEquals(in.getClass(), out.getClass());
Assert.assertEquals(in.getRecordType(), out.getRecordType());
Assert.assertEquals(in.getTransactionID(), out.getTransactionID());
Assert.assertEquals(in.getLogWriteOrderID(), out.getLogWriteOrderID());
Assert.assertEquals(in.getFileID(), out.getFileID());
Assert.assertEquals(in.getOffset(), out.getOffset());
}
@Test
public void testRollbackSerialization() throws IOException {
Rollback in = new Rollback(System.currentTimeMillis(),
WriteOrderOracle.next());
Rollback out = (Rollback)TransactionEventRecord.fromDataInputV2(toDataInput(in));
Assert.assertEquals(in.getClass(), out.getClass());
Assert.assertEquals(in.getRecordType(), out.getRecordType());
Assert.assertEquals(in.getTransactionID(), out.getTransactionID());
Assert.assertEquals(in.getLogWriteOrderID(), out.getLogWriteOrderID());
}
@Test
public void testCommitSerialization() throws IOException {
Commit in = new Commit(System.currentTimeMillis(),
WriteOrderOracle.next());
Commit out = (Commit)TransactionEventRecord.fromDataInputV2(toDataInput(in));
Assert.assertEquals(in.getClass(), out.getClass());
Assert.assertEquals(in.getRecordType(), out.getRecordType());
Assert.assertEquals(in.getTransactionID(), out.getTransactionID());
Assert.assertEquals(in.getLogWriteOrderID(), out.getLogWriteOrderID());
}
@Test
public void testBadHeader() throws IOException {
Put in = new Put(System.currentTimeMillis(),
WriteOrderOracle.next(),
new FlumeEvent(new HashMap<String, String>(), new byte[0]));
try {
TransactionEventRecord.fromDataInputV2(toDataInput(0, in));
Assert.fail();
} catch (IOException e) {
Assert.assertEquals("Header 0 is not the required value: deadbeef",
e.getMessage());
}
}
@Test
public void testBadType() throws IOException {
TransactionEventRecord in = mock(TransactionEventRecord.class);
when(in.getRecordType()).thenReturn(Short.MIN_VALUE);
try {
TransactionEventRecord.fromDataInputV2(toDataInput(in));
Assert.fail();
} catch (NullPointerException e) {
Assert.assertEquals("Unknown action ffff8000", e.getMessage());
}
}
private DataInput toDataInput(TransactionEventRecord record) throws IOException {
ByteBuffer buffer = TransactionEventRecord.toByteBufferV2(record);
ByteArrayInputStream byteInput = new ByteArrayInputStream(buffer.array());
DataInputStream dataInput = new DataInputStream(byteInput);
return dataInput;
}
private DataInput toDataInput(int header, TransactionEventRecord record) throws IOException {
ByteArrayOutputStream byteOutput = new ByteArrayOutputStream();
DataOutputStream dataOutput = new DataOutputStream(byteOutput);
dataOutput.writeInt(header);
dataOutput.writeShort(record.getRecordType());
dataOutput.writeLong(record.getTransactionID());
dataOutput.writeLong(record.getLogWriteOrderID());
record.write(dataOutput);
ByteArrayInputStream byteInput = new ByteArrayInputStream(byteOutput.toByteArray());
DataInputStream dataInput = new DataInputStream(byteInput);
return dataInput;
}
} | 9,704 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestFileChannelBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.io.File;
import java.util.HashMap;
import java.util.Map;
import com.google.common.base.Preconditions;
import org.apache.commons.io.FileUtils;
import org.apache.flume.Context;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import com.google.common.io.Files;
public class TestFileChannelBase {
private final int dataDirCount;
protected FileChannel channel;
protected File baseDir;
protected File checkpointDir;
protected File[] dataDirs;
protected String dataDir;
protected File backupDir;
protected File uncompressedBackupCheckpoint;
protected File compressedBackupCheckpoint;
public TestFileChannelBase() {
this(3); // By default the tests run with multiple data directories
}
public TestFileChannelBase(int dataDirCount) {
Preconditions.checkArgument(dataDirCount > 0, "Invalid dataDirCount");
this.dataDirCount = dataDirCount;
}
@Before
public void setup() throws Exception {
baseDir = Files.createTempDir();
checkpointDir = new File(baseDir, "chkpt");
backupDir = new File(baseDir, "backup");
uncompressedBackupCheckpoint = new File(backupDir, "checkpoint");
compressedBackupCheckpoint = new File(backupDir,
"checkpoint.snappy");
Assert.assertTrue(checkpointDir.mkdirs() || checkpointDir.isDirectory());
Assert.assertTrue(backupDir.mkdirs() || backupDir.isDirectory());
dataDirs = new File[dataDirCount];
dataDir = "";
for (int i = 0; i < dataDirs.length; i++) {
dataDirs[i] = new File(baseDir, "data" + (i + 1));
Assert.assertTrue(dataDirs[i].mkdirs() || dataDirs[i].isDirectory());
dataDir += dataDirs[i].getAbsolutePath() + ",";
}
dataDir = dataDir.substring(0, dataDir.length() - 1);
channel = createFileChannel();
}
@After
public void teardown() {
if (channel != null && channel.isOpen()) {
channel.stop();
}
FileUtils.deleteQuietly(baseDir);
}
protected Context createContext() {
return createContext(new HashMap<String, String>());
}
protected Context createContext(Map<String, String> overrides) {
return TestUtils.createFileChannelContext(checkpointDir.getAbsolutePath(),
dataDir, backupDir.getAbsolutePath(), overrides);
}
protected FileChannel createFileChannel() {
return createFileChannel(new HashMap<String, String>());
}
protected FileChannel createFileChannel(Map<String, String> overrides) {
return TestUtils.createFileChannel(checkpointDir.getAbsolutePath(),
dataDir, backupDir.getAbsolutePath(), overrides);
}
}
| 9,705 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/CountingSinkRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.collect.Lists;
import org.apache.flume.Sink;
import java.util.List;
public class CountingSinkRunner extends Thread {
private int count;
private final int until;
private final Sink sink;
private volatile boolean run;
private final List<Exception> errors = Lists.newArrayList();
public CountingSinkRunner(Sink sink) {
this(sink, Integer.MAX_VALUE);
}
public CountingSinkRunner(Sink sink, int until) {
this.sink = sink;
this.until = until;
}
@Override
public void run() {
run = true;
while (run && count < until) {
boolean error = true;
try {
if (Sink.Status.READY.equals(sink.process())) {
count++;
error = false;
}
} catch (Exception ex) {
errors.add(ex);
}
if (error) {
try {
Thread.sleep(1000L);
} catch (InterruptedException e) {
}
}
}
}
public void shutdown() {
run = false;
}
public int getCount() {
return count;
}
public List<Exception> getErrors() {
return errors;
}
} | 9,706 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestEventQueueBackingStoreFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import com.google.protobuf.InvalidProtocolBufferException;
import junit.framework.Assert;
import org.apache.commons.io.FileUtils;
import org.apache.flume.channel.file.instrumentation.FileChannelCounter;
import org.apache.flume.channel.file.proto.ProtosFactory;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Random;
public class TestEventQueueBackingStoreFactory {
static final List<Long> pointersInTestCheckpoint = Arrays.asList(new Long[]{
8589936804L,
4294969563L,
12884904153L,
8589936919L,
4294969678L,
12884904268L,
8589937034L,
4294969793L,
12884904383L
});
File baseDir;
File checkpoint;
File inflightTakes;
File inflightPuts;
File queueSetDir;
@Before
public void setup() throws IOException {
baseDir = Files.createTempDir();
checkpoint = new File(baseDir, "checkpoint");
inflightTakes = new File(baseDir, "takes");
inflightPuts = new File(baseDir, "puts");
queueSetDir = new File(baseDir, "queueset");
TestUtils.copyDecompressed("fileformat-v2-checkpoint.gz", checkpoint);
}
@After
public void teardown() {
FileUtils.deleteQuietly(baseDir);
}
@Test
public void testWithNoFlag() throws Exception {
verify(
EventQueueBackingStoreFactory.get(checkpoint, 10, "test", new FileChannelCounter("test")),
Serialization.VERSION_3, pointersInTestCheckpoint
);
}
@Test
public void testWithFlag() throws Exception {
verify(
EventQueueBackingStoreFactory.get(
checkpoint, 10, "test", new FileChannelCounter("test"), true
),
Serialization.VERSION_3, pointersInTestCheckpoint
);
}
@Test
public void testNoUprade() throws Exception {
verify(
EventQueueBackingStoreFactory.get(
checkpoint, 10, "test", new FileChannelCounter("test"), false
),
Serialization.VERSION_2, pointersInTestCheckpoint
);
}
@Test(expected = BadCheckpointException.class)
public void testDecreaseCapacity() throws Exception {
Assert.assertTrue(checkpoint.delete());
EventQueueBackingStore backingStore =
EventQueueBackingStoreFactory.get(checkpoint, 10, "test", new FileChannelCounter("test"));
backingStore.close();
EventQueueBackingStoreFactory.get(checkpoint, 9, "test", new FileChannelCounter("test"));
Assert.fail();
}
@Test(expected = BadCheckpointException.class)
public void testIncreaseCapacity() throws Exception {
Assert.assertTrue(checkpoint.delete());
EventQueueBackingStore backingStore =
EventQueueBackingStoreFactory.get(checkpoint, 10, "test", new FileChannelCounter("test"));
backingStore.close();
EventQueueBackingStoreFactory.get(checkpoint, 11, "test", new FileChannelCounter("test"));
Assert.fail();
}
@Test
public void testNewCheckpoint() throws Exception {
Assert.assertTrue(checkpoint.delete());
verify(
EventQueueBackingStoreFactory.get(
checkpoint, 10, "test", new FileChannelCounter("test"), false
),
Serialization.VERSION_3, Collections.<Long>emptyList()
);
}
@Test(expected = BadCheckpointException.class)
public void testCheckpointBadVersion() throws Exception {
RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw");
try {
EventQueueBackingStore backingStore =
EventQueueBackingStoreFactory.get(checkpoint, 10, "test", new FileChannelCounter("test"));
backingStore.close();
writer.seek(EventQueueBackingStoreFile.INDEX_VERSION * Serialization.SIZE_OF_LONG);
writer.writeLong(94L);
writer.getFD().sync();
backingStore = EventQueueBackingStoreFactory.get(
checkpoint, 10, "test", new FileChannelCounter("test")
);
} finally {
writer.close();
}
}
@Test(expected = BadCheckpointException.class)
public void testIncompleteCheckpoint() throws Exception {
RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw");
try {
EventQueueBackingStore backingStore =
EventQueueBackingStoreFactory.get(checkpoint, 10, "test", new FileChannelCounter("test"));
backingStore.close();
writer.seek(EventQueueBackingStoreFile.INDEX_CHECKPOINT_MARKER * Serialization.SIZE_OF_LONG);
writer.writeLong(EventQueueBackingStoreFile.CHECKPOINT_INCOMPLETE);
writer.getFD().sync();
backingStore = EventQueueBackingStoreFactory.get(
checkpoint, 10, "test", new FileChannelCounter("test")
);
} finally {
writer.close();
}
}
@Test(expected = BadCheckpointException.class)
public void testCheckpointVersionNotEqualToMeta() throws Exception {
RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw");
try {
EventQueueBackingStore backingStore =
EventQueueBackingStoreFactory.get(checkpoint, 10, "test", new FileChannelCounter("test"));
backingStore.close();
writer.seek(EventQueueBackingStoreFile.INDEX_VERSION * Serialization.SIZE_OF_LONG);
writer.writeLong(2L);
writer.getFD().sync();
backingStore = EventQueueBackingStoreFactory.get(
checkpoint, 10, "test", new FileChannelCounter("test")
);
} finally {
writer.close();
}
}
@Test(expected = BadCheckpointException.class)
public void testCheckpointVersionNotEqualToMeta2() throws Exception {
FileOutputStream os = null;
try {
EventQueueBackingStore backingStore =
EventQueueBackingStoreFactory.get(checkpoint, 10, "test", new FileChannelCounter("test"));
backingStore.close();
Assert.assertTrue(checkpoint.exists());
Assert.assertTrue(Serialization.getMetaDataFile(checkpoint).length() != 0);
FileInputStream is = new FileInputStream(Serialization.getMetaDataFile(checkpoint));
ProtosFactory.Checkpoint meta = ProtosFactory.Checkpoint.parseDelimitedFrom(is);
Assert.assertNotNull(meta);
is.close();
os = new FileOutputStream(Serialization.getMetaDataFile(checkpoint));
meta.toBuilder().setVersion(2).build().writeDelimitedTo(os);
os.flush();
backingStore = EventQueueBackingStoreFactory.get(
checkpoint, 10, "test", new FileChannelCounter("test")
);
} finally {
os.close();
}
}
@Test(expected = BadCheckpointException.class)
public void testCheckpointOrderIdNotEqualToMeta() throws Exception {
RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw");
try {
EventQueueBackingStore backingStore =
EventQueueBackingStoreFactory.get(checkpoint, 10, "test", new FileChannelCounter("test"));
backingStore.close();
writer.seek(EventQueueBackingStoreFile.INDEX_WRITE_ORDER_ID * Serialization.SIZE_OF_LONG);
writer.writeLong(2L);
writer.getFD().sync();
backingStore = EventQueueBackingStoreFactory.get(
checkpoint, 10, "test", new FileChannelCounter("test")
);
} finally {
writer.close();
}
}
@Test(expected = BadCheckpointException.class)
public void testCheckpointOrderIdNotEqualToMeta2() throws Exception {
FileOutputStream os = null;
try {
EventQueueBackingStore backingStore =
EventQueueBackingStoreFactory.get(checkpoint, 10, "test", new FileChannelCounter("test"));
backingStore.close();
Assert.assertTrue(checkpoint.exists());
Assert.assertTrue(Serialization.getMetaDataFile(checkpoint).length() != 0);
FileInputStream is = new FileInputStream(Serialization.getMetaDataFile(checkpoint));
ProtosFactory.Checkpoint meta = ProtosFactory.Checkpoint.parseDelimitedFrom(is);
Assert.assertNotNull(meta);
is.close();
os = new FileOutputStream(
Serialization.getMetaDataFile(checkpoint));
meta.toBuilder().setWriteOrderID(1).build().writeDelimitedTo(os);
os.flush();
backingStore = EventQueueBackingStoreFactory.get(
checkpoint, 10, "test", new FileChannelCounter("test")
);
} finally {
os.close();
}
}
@Test(expected = BadCheckpointException.class)
public void testTruncateMeta() throws Exception {
EventQueueBackingStore backingStore =
EventQueueBackingStoreFactory.get(checkpoint, 10, "test", new FileChannelCounter("test"));
backingStore.close();
Assert.assertTrue(checkpoint.exists());
File metaFile = Serialization.getMetaDataFile(checkpoint);
Assert.assertTrue(metaFile.length() != 0);
RandomAccessFile writer = new RandomAccessFile(metaFile, "rw");
writer.setLength(0);
writer.getFD().sync();
writer.close();
backingStore = EventQueueBackingStoreFactory.get(
checkpoint, 10, "test", new FileChannelCounter("test")
);
}
@Test(expected = InvalidProtocolBufferException.class)
public void testCorruptMeta() throws Throwable {
EventQueueBackingStore backingStore =
EventQueueBackingStoreFactory.get(checkpoint, 10, "test", new FileChannelCounter("test"));
backingStore.close();
Assert.assertTrue(checkpoint.exists());
File metaFile = Serialization.getMetaDataFile(checkpoint);
Assert.assertTrue(metaFile.length() != 0);
RandomAccessFile writer = new RandomAccessFile(metaFile, "rw");
writer.seek(10);
writer.writeLong(new Random().nextLong());
writer.getFD().sync();
writer.close();
try {
backingStore = EventQueueBackingStoreFactory.get(
checkpoint, 10, "test", new FileChannelCounter("test")
);
} catch (BadCheckpointException ex) {
throw ex.getCause();
}
}
private void verify(EventQueueBackingStore backingStore, long expectedVersion,
List<Long> expectedPointers) throws Exception {
FlumeEventQueue queue =
new FlumeEventQueue(backingStore, inflightTakes, inflightPuts, queueSetDir);
List<Long> actualPointers = Lists.newArrayList();
FlumeEventPointer ptr;
while ((ptr = queue.removeHead(0L)) != null) {
actualPointers.add(ptr.toLong());
}
Assert.assertEquals(expectedPointers, actualPointers);
Assert.assertEquals(10, backingStore.getCapacity());
DataInputStream in = new DataInputStream(new FileInputStream(checkpoint));
long actualVersion = in.readLong();
Assert.assertEquals(expectedVersion, actualVersion);
in.close();
}
}
| 9,707 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestLogRecord.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.util.Collections;
import java.util.List;
import junit.framework.Assert;
import org.junit.Test;
import com.google.common.collect.Lists;
public class TestLogRecord {
@Test
public void testConstructor() {
long now = System.currentTimeMillis();
Commit commit = new Commit(now, now + 1);
LogRecord logRecord = new LogRecord(1, 2, commit);
Assert.assertTrue(now == commit.getTransactionID());
Assert.assertTrue(now + 1 == commit.getLogWriteOrderID());
Assert.assertTrue(1 == logRecord.getFileID());
Assert.assertTrue(2 == logRecord.getOffset());
Assert.assertTrue(commit == logRecord.getEvent());
}
@Test
public void testSortOrder() {
// events should sort in the reverse order we put them on
long now = System.currentTimeMillis();
List<LogRecord> records = Lists.newArrayList();
for (int i = 0; i < 3; i++) {
Commit commit = new Commit((long)i, now - i);
LogRecord logRecord = new LogRecord(1, i, commit);
records.add(logRecord);
}
LogRecord logRecord;
logRecord = Collections.min(records);
Assert.assertTrue(String.valueOf(logRecord.getOffset()),
2 == logRecord.getOffset());
records.remove(logRecord);
logRecord = Collections.min(records);
Assert.assertTrue(String.valueOf(logRecord.getOffset()),
1 == logRecord.getOffset());
records.remove(logRecord);
logRecord = Collections.min(records);
Assert.assertTrue(String.valueOf(logRecord.getOffset()),
0 == logRecord.getOffset());
records.remove(logRecord);
}
}
| 9,708 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/encryption/EncryptionTestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file.encryption;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.io.Files;
import com.google.common.io.Resources;
import org.apache.flume.channel.file.TestUtils;
import javax.crypto.KeyGenerator;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.security.Key;
import java.security.KeyStore;
import java.util.List;
import java.util.Map;
public class EncryptionTestUtils {
private static Key newKey() {
KeyGenerator keyGen;
try {
keyGen = KeyGenerator.getInstance("AES");
Key key = keyGen.generateKey();
return key;
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
public static void createKeyStore(File keyStoreFile, File keyStorePasswordFile,
Map<String, File> keyAliasPassword) throws Exception {
KeyStore ks = KeyStore.getInstance("jceks");
ks.load(null);
List<String> keysWithSeperatePasswords = Lists.newArrayList();
for (String alias : keyAliasPassword.keySet()) {
Key key = newKey();
char[] password = null;
File passwordFile = keyAliasPassword.get(alias);
if (passwordFile == null) {
password = Files.toString(keyStorePasswordFile, Charsets.UTF_8).toCharArray();
} else {
keysWithSeperatePasswords.add(alias);
password = Files.toString(passwordFile, Charsets.UTF_8).toCharArray();
}
ks.setKeyEntry(alias, key, password, null);
}
char[] keyStorePassword = Files.toString(keyStorePasswordFile, Charsets.UTF_8).toCharArray();
FileOutputStream outputStream = new FileOutputStream(keyStoreFile);
ks.store(outputStream, keyStorePassword);
outputStream.close();
}
public static Map<String, File> configureTestKeyStore(File baseDir, File keyStoreFile)
throws IOException {
Map<String, File> result = Maps.newHashMap();
if (System.getProperty("java.vendor").contains("IBM")) {
Resources.copy(Resources.getResource("ibm-test.keystore"),
new FileOutputStream(keyStoreFile));
} else {
Resources.copy(Resources.getResource("sun-test.keystore"),
new FileOutputStream(keyStoreFile));
}
/* Commands below:
* keytool -genseckey -alias key-0 -keypass keyPassword -keyalg AES \
* -keysize 128 -validity 9000 -keystore src/test/resources/test.keystore \
* -storetype jceks -storepass keyStorePassword
* keytool -genseckey -alias key-1 -keyalg AES -keysize 128 -validity 9000 \
* -keystore src/test/resources/test.keystore -storetype jceks \
* -storepass keyStorePassword
*/
// key-0 has own password, key-1 used key store password
result.put("key-0", TestUtils.writeStringToFile(baseDir, "key-0", "keyPassword"));
result.put("key-1", null);
return result;
}
public static Map<String, String> configureForKeyStore(File keyStoreFile,
File keyStorePasswordFile,
Map<String, File> keyAliasPassword)
throws Exception {
Map<String, String> context = Maps.newHashMap();
List<String> keys = Lists.newArrayList();
Joiner joiner = Joiner.on(".");
for (String alias : keyAliasPassword.keySet()) {
File passwordFile = keyAliasPassword.get(alias);
if (passwordFile == null) {
keys.add(alias);
} else {
String propertyName = joiner.join(EncryptionConfiguration.KEY_PROVIDER,
EncryptionConfiguration.JCE_FILE_KEYS,
alias,
EncryptionConfiguration.JCE_FILE_KEY_PASSWORD_FILE);
keys.add(alias);
context.put(propertyName, passwordFile.getAbsolutePath());
}
}
context.put(joiner.join(EncryptionConfiguration.KEY_PROVIDER,
EncryptionConfiguration.JCE_FILE_KEY_STORE_FILE),
keyStoreFile.getAbsolutePath());
if (keyStorePasswordFile != null) {
context.put(joiner.join(EncryptionConfiguration.KEY_PROVIDER,
EncryptionConfiguration.JCE_FILE_KEY_STORE_PASSWORD_FILE),
keyStorePasswordFile.getAbsolutePath());
}
context.put(joiner.join(EncryptionConfiguration.KEY_PROVIDER,
EncryptionConfiguration.JCE_FILE_KEYS),
Joiner.on(" ").join(keys));
return context;
}
}
| 9,709 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/encryption/TestAESCTRNoPaddingProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file.encryption;
import java.security.Key;
import javax.crypto.KeyGenerator;
import org.junit.Before;
import org.junit.Test;
public class TestAESCTRNoPaddingProvider {
private Key key;
private CipherProvider.Encryptor encryptor;
private CipherProvider.Decryptor decryptor;
private CipherProviderTestSuite cipherProviderTestSuite;
@Before
public void setup() throws Exception {
KeyGenerator keyGen = KeyGenerator.getInstance("AES");
key = keyGen.generateKey();
encryptor = CipherProviderFactory.getEncrypter(
CipherProviderType.AESCTRNOPADDING.name(), key);
decryptor = CipherProviderFactory.getDecrypter(
CipherProviderType.AESCTRNOPADDING.name(), key, encryptor.getParameters());
cipherProviderTestSuite = new CipherProviderTestSuite(encryptor, decryptor);
}
@Test
public void test() throws Exception {
cipherProviderTestSuite.test();
}
}
| 9,710 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/encryption/CipherProviderTestSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file.encryption;
import junit.framework.Assert;
import com.google.common.base.Charsets;
public class CipherProviderTestSuite {
private final CipherProvider.Encryptor encryptor;
private final CipherProvider.Decryptor decryptor;
public CipherProviderTestSuite(CipherProvider.Encryptor encryptor,
CipherProvider.Decryptor decryptor) {
this.encryptor = encryptor;
this.decryptor = decryptor;
}
public void test() throws Exception {
testBasic();
testEmpty();
testNullPlainText();
testNullCipherText();
}
public void testBasic() throws Exception {
String expected = "mn state fair is the place to be";
byte[] cipherText = encryptor.encrypt(expected.getBytes(Charsets.UTF_8));
byte[] clearText = decryptor.decrypt(cipherText);
Assert.assertEquals(expected, new String(clearText, Charsets.UTF_8));
}
public void testEmpty() throws Exception {
String expected = "";
byte[] cipherText = encryptor.encrypt(new byte[]{});
byte[] clearText = decryptor.decrypt(cipherText);
Assert.assertEquals(expected, new String(clearText));
}
public void testNullPlainText() throws Exception {
try {
encryptor.encrypt(null);
Assert.fail();
} catch (NullPointerException e) {
// expected
}
}
public void testNullCipherText() throws Exception {
try {
decryptor.decrypt(null);
Assert.fail();
} catch (NullPointerException e) {
// expected
}
}
}
| 9,711 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/encryption/TestJCEFileKeyProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file.encryption;
import com.google.common.base.Charsets;
import com.google.common.collect.Maps;
import com.google.common.io.Files;
import junit.framework.Assert;
import org.apache.commons.io.FileUtils;
import org.apache.flume.Context;
import org.apache.flume.channel.file.TestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.security.Key;
import java.util.Map;
public class TestJCEFileKeyProvider {
private CipherProvider.Encryptor encryptor;
private CipherProvider.Decryptor decryptor;
private File baseDir;
private File keyStoreFile;
private File keyStorePasswordFile;
private Map<String, File> keyAliasPassword;
@Before
public void setup() throws Exception {
baseDir = Files.createTempDir();
keyStorePasswordFile = new File(baseDir, "keyStorePasswordFile");
Files.write("keyStorePassword", keyStorePasswordFile, Charsets.UTF_8);
keyAliasPassword = Maps.newHashMap();
keyStoreFile = new File(baseDir, "keyStoreFile");
Assert.assertTrue(keyStoreFile.createNewFile());
}
@After
public void cleanup() {
FileUtils.deleteQuietly(baseDir);
}
private void initializeForKey(Key key) {
encryptor = new AESCTRNoPaddingProvider.EncryptorBuilder()
.setKey(key)
.build();
decryptor = new AESCTRNoPaddingProvider.DecryptorBuilder()
.setKey(key)
.setParameters(encryptor.getParameters())
.build();
}
@Test
public void testWithNewKeyStore() throws Exception {
createNewKeyStore();
EncryptionTestUtils.createKeyStore(keyStoreFile, keyStorePasswordFile,
keyAliasPassword);
Context context = new Context(
EncryptionTestUtils.configureForKeyStore(keyStoreFile,
keyStorePasswordFile,
keyAliasPassword));
Context keyProviderContext = new Context(
context.getSubProperties(EncryptionConfiguration.KEY_PROVIDER + "."));
KeyProvider keyProvider =
KeyProviderFactory.getInstance(KeyProviderType.JCEKSFILE.name(), keyProviderContext);
testKeyProvider(keyProvider);
}
@Test
public void testWithExistingKeyStore() throws Exception {
keyAliasPassword.putAll(EncryptionTestUtils.configureTestKeyStore(baseDir, keyStoreFile));
Context context = new Context(
EncryptionTestUtils.configureForKeyStore(keyStoreFile,
keyStorePasswordFile,
keyAliasPassword));
Context keyProviderContext = new Context(
context.getSubProperties(EncryptionConfiguration.KEY_PROVIDER + "."));
KeyProvider keyProvider =
KeyProviderFactory.getInstance(KeyProviderType.JCEKSFILE.name(), keyProviderContext);
testKeyProvider(keyProvider);
}
private void createNewKeyStore() throws Exception {
for (int i = 0; i < 10; i++) {
// create some with passwords, some without
if (i % 2 == 0) {
String alias = "test-" + i;
String password = String.valueOf(i);
keyAliasPassword.put(alias, TestUtils.writeStringToFile(baseDir, alias, password));
}
}
}
private void testKeyProvider(KeyProvider keyProvider) {
for (String alias : keyAliasPassword.keySet()) {
Key key = keyProvider.getKey(alias);
initializeForKey(key);
String expected = "some text here " + alias;
byte[] cipherText = encryptor.encrypt(expected.getBytes(Charsets.UTF_8));
byte[] clearText = decryptor.decrypt(cipherText);
Assert.assertEquals(expected, new String(clearText, Charsets.UTF_8));
}
}
}
| 9,712 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/encryption/TestFileChannelEncryption.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file.encryption;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
import com.google.common.collect.Maps;
import com.google.common.io.Files;
import org.apache.flume.ChannelException;
import org.apache.flume.FlumeException;
import org.apache.flume.channel.file.FileChannelConfiguration;
import org.apache.flume.channel.file.TestFileChannelBase;
import org.apache.flume.channel.file.TestUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.apache.flume.channel.file.TestUtils.compareInputAndOut;
import static org.apache.flume.channel.file.TestUtils.consumeChannel;
import static org.apache.flume.channel.file.TestUtils.fillChannel;
import static org.apache.flume.channel.file.TestUtils.putEvents;
import static org.apache.flume.channel.file.TestUtils.takeEvents;
public class TestFileChannelEncryption extends TestFileChannelBase {
protected static final Logger LOGGER =
LoggerFactory.getLogger(TestFileChannelEncryption.class);
private File keyStoreFile;
private File keyStorePasswordFile;
private Map<String, File> keyAliasPassword;
@Before
public void setup() throws Exception {
super.setup();
keyStorePasswordFile = new File(baseDir, "keyStorePasswordFile");
Files.write("keyStorePassword", keyStorePasswordFile, Charsets.UTF_8);
keyStoreFile = new File(baseDir, "keyStoreFile");
Assert.assertTrue(keyStoreFile.createNewFile());
keyAliasPassword = Maps.newHashMap();
keyAliasPassword.putAll(EncryptionTestUtils.configureTestKeyStore(baseDir, keyStoreFile));
}
@After
public void teardown() {
super.teardown();
}
private Map<String, String> getOverrides() throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.CAPACITY, String.valueOf(100));
overrides.put(FileChannelConfiguration.TRANSACTION_CAPACITY, String.valueOf(100));
return overrides;
}
private Map<String, String> getOverridesForEncryption() throws Exception {
Map<String, String> overrides = getOverrides();
Map<String, String> encryptionProps =
EncryptionTestUtils.configureForKeyStore(keyStoreFile,
keyStorePasswordFile,
keyAliasPassword);
encryptionProps.put(EncryptionConfiguration.KEY_PROVIDER,
KeyProviderType.JCEKSFILE.name());
encryptionProps.put(EncryptionConfiguration.CIPHER_PROVIDER,
CipherProviderType.AESCTRNOPADDING.name());
encryptionProps.put(EncryptionConfiguration.ACTIVE_KEY, "key-1");
for (String key : encryptionProps.keySet()) {
overrides.put(EncryptionConfiguration.ENCRYPTION_PREFIX + "." + key,
encryptionProps.get(key));
}
return overrides;
}
/**
* Test fails without FLUME-1565
*/
@Test
public void testThreadedConsume() throws Exception {
int numThreads = 20;
Map<String, String> overrides = getOverridesForEncryption();
overrides.put(FileChannelConfiguration.CAPACITY, String.valueOf(10000));
overrides.put(FileChannelConfiguration.TRANSACTION_CAPACITY,
String.valueOf(100));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Executor executor = Executors.newFixedThreadPool(numThreads);
Set<String> in = fillChannel(channel, "threaded-consume");
final AtomicBoolean error = new AtomicBoolean(false);
final CountDownLatch startLatch = new CountDownLatch(numThreads);
final CountDownLatch stopLatch = new CountDownLatch(numThreads);
final Set<String> out = Collections.synchronizedSet(new HashSet<String>());
for (int i = 0; i < numThreads; i++) {
executor.execute(new Runnable() {
@Override
public void run() {
try {
startLatch.countDown();
startLatch.await();
out.addAll(takeEvents(channel, 10));
} catch (Throwable t) {
error.set(true);
LOGGER.error("Error in take thread", t);
} finally {
stopLatch.countDown();
}
}
});
}
stopLatch.await();
Assert.assertFalse(error.get());
compareInputAndOut(in, out);
}
@Test
public void testThreadedProduce() throws Exception {
int numThreads = 20;
Map<String, String> overrides = getOverridesForEncryption();
overrides.put(FileChannelConfiguration.CAPACITY, String.valueOf(10000));
overrides.put(FileChannelConfiguration.TRANSACTION_CAPACITY,
String.valueOf(100));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Executor executor = Executors.newFixedThreadPool(numThreads);
final AtomicBoolean error = new AtomicBoolean(false);
final CountDownLatch startLatch = new CountDownLatch(numThreads);
final CountDownLatch stopLatch = new CountDownLatch(numThreads);
final Set<String> in = Collections.synchronizedSet(new HashSet<String>());
for (int i = 0; i < numThreads; i++) {
executor.execute(new Runnable() {
@Override
public void run() {
try {
startLatch.countDown();
startLatch.await();
in.addAll(putEvents(channel, "thread-produce", 10, 10000, true));
} catch (Throwable t) {
error.set(true);
LOGGER.error("Error in put thread", t);
} finally {
stopLatch.countDown();
}
}
});
}
stopLatch.await();
Set<String> out = consumeChannel(channel);
Assert.assertFalse(error.get());
compareInputAndOut(in, out);
}
@Test
public void testConfiguration() throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put("encryption.activeKey", "key-1");
overrides.put("encryption.cipherProvider", "AESCTRNOPADDING");
overrides.put("encryption.keyProvider", "JCEKSFILE");
overrides.put("encryption.keyProvider.keyStoreFile",
keyStoreFile.getAbsolutePath());
overrides.put("encryption.keyProvider.keyStorePasswordFile",
keyStorePasswordFile.getAbsolutePath());
overrides.put("encryption.keyProvider.keys", "key-0 key-1");
overrides.put("encryption.keyProvider.keys.key-0.passwordFile",
keyAliasPassword.get("key-0").getAbsolutePath());
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = fillChannel(channel, "restart");
channel.stop();
channel = TestUtils.createFileChannel(checkpointDir.getAbsolutePath(),
dataDir, overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> out = consumeChannel(channel);
compareInputAndOut(in, out);
}
@Test
public void testBasicEncyrptionDecryption() throws Exception {
Map<String, String> overrides = getOverridesForEncryption();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = fillChannel(channel, "restart");
channel.stop();
channel = TestUtils.createFileChannel(checkpointDir.getAbsolutePath(),
dataDir, overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> out = consumeChannel(channel);
compareInputAndOut(in, out);
}
@Test
public void testEncryptedChannelWithoutEncryptionConfigFails() throws Exception {
Map<String, String> overrides = getOverridesForEncryption();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
fillChannel(channel, "will-not-restart");
channel.stop();
Map<String, String> noEncryptionOverrides = getOverrides();
channel = createFileChannel(noEncryptionOverrides);
channel.start();
if (channel.isOpen()) {
try {
takeEvents(channel, 1, 1);
Assert.fail("Channel was opened and take did not throw exception");
} catch (ChannelException ex) {
// expected
}
}
}
@Test
public void testUnencyrptedAndEncryptedLogs() throws Exception {
Map<String, String> noEncryptionOverrides = getOverrides();
channel = createFileChannel(noEncryptionOverrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = fillChannel(channel, "unencrypted-and-encrypted");
int numEventsToRemove = in.size() / 2;
for (int i = 0; i < numEventsToRemove; i++) {
Assert.assertTrue(in.removeAll(takeEvents(channel, 1, 1)));
}
// now we have logs with no encryption and the channel is half full
channel.stop();
Map<String, String> overrides = getOverridesForEncryption();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
in.addAll(fillChannel(channel, "unencrypted-and-encrypted"));
Set<String> out = consumeChannel(channel);
compareInputAndOut(in, out);
}
@Test
public void testBadKeyProviderInvalidValue() throws Exception {
Map<String, String> overrides = getOverridesForEncryption();
overrides.put(Joiner.on(".").join(EncryptionConfiguration.ENCRYPTION_PREFIX,
EncryptionConfiguration.KEY_PROVIDER),
"invalid");
try {
channel = createFileChannel(overrides);
Assert.fail();
} catch (FlumeException ex) {
Assert.assertEquals("java.lang.ClassNotFoundException: invalid", ex.getMessage());
}
}
@Test
public void testBadKeyProviderInvalidClass() throws Exception {
Map<String, String> overrides = getOverridesForEncryption();
overrides.put(Joiner.on(".").join(EncryptionConfiguration.ENCRYPTION_PREFIX,
EncryptionConfiguration.KEY_PROVIDER),
String.class.getName());
try {
channel = createFileChannel(overrides);
Assert.fail();
} catch (FlumeException ex) {
Assert.assertEquals("Unable to instantiate Builder from java.lang.String", ex.getMessage());
}
}
@Test
public void testBadCipherProviderInvalidValue() throws Exception {
Map<String, String> overrides = getOverridesForEncryption();
overrides.put(Joiner.on(".").join(EncryptionConfiguration.ENCRYPTION_PREFIX,
EncryptionConfiguration.CIPHER_PROVIDER),
"invalid");
channel = createFileChannel(overrides);
channel.start();
Assert.assertFalse(channel.isOpen());
}
@Test
public void testBadCipherProviderInvalidClass() throws Exception {
Map<String, String> overrides = getOverridesForEncryption();
overrides.put(Joiner.on(".").join(EncryptionConfiguration.ENCRYPTION_PREFIX,
EncryptionConfiguration.CIPHER_PROVIDER), String.class.getName());
channel = createFileChannel(overrides);
channel.start();
Assert.assertFalse(channel.isOpen());
}
@Test
public void testMissingKeyStoreFile() throws Exception {
Map<String, String> overrides = getOverridesForEncryption();
overrides.put(Joiner.on(".").join(EncryptionConfiguration.ENCRYPTION_PREFIX,
EncryptionConfiguration.KEY_PROVIDER,
EncryptionConfiguration.JCE_FILE_KEY_STORE_FILE),
"/path/does/not/exist");
try {
channel = createFileChannel(overrides);
Assert.fail();
} catch (RuntimeException ex) {
Assert.assertTrue("Exception message is incorrect: " + ex.getMessage(),
ex.getMessage().startsWith("java.io.FileNotFoundException: /path/does/not/exist "));
}
}
@Test
public void testMissingKeyStorePasswordFile() throws Exception {
Map<String, String> overrides = getOverridesForEncryption();
overrides.put(Joiner.on(".").join(EncryptionConfiguration.ENCRYPTION_PREFIX,
EncryptionConfiguration.KEY_PROVIDER,
EncryptionConfiguration.JCE_FILE_KEY_STORE_PASSWORD_FILE),
"/path/does/not/exist");
try {
channel = createFileChannel(overrides);
Assert.fail();
} catch (RuntimeException ex) {
Assert.assertTrue("Exception message is incorrect: " + ex.getMessage(),
ex.getMessage().startsWith("java.io.FileNotFoundException: /path/does/not/exist "));
}
}
@Test
public void testBadKeyStorePassword() throws Exception {
Files.write("invalid", keyStorePasswordFile, Charsets.UTF_8);
Map<String, String> overrides = getOverridesForEncryption();
try {
channel = TestUtils.createFileChannel(checkpointDir.getAbsolutePath(),
dataDir, overrides);
Assert.fail();
} catch (RuntimeException ex) {
Assert.assertEquals("java.io.IOException: Keystore was tampered with, or " +
"password was incorrect", ex.getMessage());
}
}
@Test
public void testBadKeyAlias() throws Exception {
Map<String, String> overrides = getOverridesForEncryption();
overrides.put(EncryptionConfiguration.ENCRYPTION_PREFIX + "." +
EncryptionConfiguration.ACTIVE_KEY, "invalid");
channel = TestUtils.createFileChannel(checkpointDir.getAbsolutePath(),
dataDir, overrides);
channel.start();
Assert.assertFalse(channel.isOpen());
}
} | 9,713 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/LogFileRetryableIOException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.io.IOException;
public class LogFileRetryableIOException extends IOException {
private static final long serialVersionUID = -2747112999806160431L;
public LogFileRetryableIOException() {
super();
}
public LogFileRetryableIOException(String msg) {
super(msg);
}
public LogFileRetryableIOException(String msg, Throwable t) {
super(msg, t);
}
}
| 9,714 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/TransactionEventRecord.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.reflect.Constructor;
import java.nio.ByteBuffer;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.apache.flume.channel.file.proto.ProtosFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
/**
* Base class for records in data file: Put, Take, Rollback, Commit
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public abstract class TransactionEventRecord implements Writable {
private static final Logger LOG = LoggerFactory
.getLogger(TransactionEventRecord.class);
private final long transactionID;
private long logWriteOrderID;
protected TransactionEventRecord(long transactionID, long logWriteOrderID) {
this.transactionID = transactionID;
this.logWriteOrderID = logWriteOrderID;
}
@Override
public void readFields(DataInput in) throws IOException {
}
@Override
public void write(DataOutput out) throws IOException {
}
abstract void writeProtos(OutputStream out) throws IOException;
abstract void readProtos(InputStream in) throws IOException, CorruptEventException;
long getLogWriteOrderID() {
return logWriteOrderID;
}
long getTransactionID() {
return transactionID;
}
abstract short getRecordType();
/**
* Provides a minimum guarantee we are not reading complete junk
*/
static final int MAGIC_HEADER = 0xdeadbeef;
static enum Type {
PUT((short)1),
TAKE((short)2),
ROLLBACK((short)3),
COMMIT((short)4);
private short id;
Type(short id) {
this.id = id;
}
public short get() {
return id;
}
}
private static final ImmutableMap<Short, Constructor<? extends TransactionEventRecord>> TYPES;
static {
ImmutableMap.Builder<Short, Constructor<? extends TransactionEventRecord>> builder =
ImmutableMap.<Short, Constructor<? extends TransactionEventRecord>>builder();
try {
builder.put(Type.PUT.get(),
Put.class.getDeclaredConstructor(Long.class, Long.class));
builder.put(Type.TAKE.get(),
Take.class.getDeclaredConstructor(Long.class, Long.class));
builder.put(Type.ROLLBACK.get(),
Rollback.class.getDeclaredConstructor(Long.class, Long.class));
builder.put(Type.COMMIT.get(),
Commit.class.getDeclaredConstructor(Long.class, Long.class));
} catch (Exception e) {
Throwables.propagate(e);
}
TYPES = builder.build();
}
@Deprecated
static ByteBuffer toByteBufferV2(TransactionEventRecord record) {
ByteArrayOutputStream byteOutput = new ByteArrayOutputStream(512);
DataOutputStream dataOutput = new DataOutputStream(byteOutput);
try {
dataOutput.writeInt(MAGIC_HEADER);
dataOutput.writeShort(record.getRecordType());
dataOutput.writeLong(record.getTransactionID());
dataOutput.writeLong(record.getLogWriteOrderID());
record.write(dataOutput);
dataOutput.flush();
// TODO toByteArray does an unneeded copy
return ByteBuffer.wrap(byteOutput.toByteArray());
} catch (IOException e) {
// near impossible
throw Throwables.propagate(e);
} finally {
if (dataOutput != null) {
try {
dataOutput.close();
} catch (IOException e) {
LOG.warn("Error closing byte array output stream", e);
}
}
}
}
@Deprecated
static TransactionEventRecord fromDataInputV2(DataInput in)
throws IOException {
int header = in.readInt();
if (header != MAGIC_HEADER) {
throw new IOException("Header " + Integer.toHexString(header) +
" is not the required value: " + Integer.toHexString(MAGIC_HEADER));
}
short type = in.readShort();
long transactionID = in.readLong();
long writeOrderID = in.readLong();
TransactionEventRecord entry = newRecordForType(type, transactionID,
writeOrderID);
entry.readFields(in);
return entry;
}
static ByteBuffer toByteBuffer(TransactionEventRecord record) {
ByteArrayOutputStream byteOutput = new ByteArrayOutputStream(512);
try {
ProtosFactory.TransactionEventHeader.Builder headerBuilder =
ProtosFactory.TransactionEventHeader.newBuilder();
headerBuilder.setType(record.getRecordType());
headerBuilder.setTransactionID(record.getTransactionID());
headerBuilder.setWriteOrderID(record.getLogWriteOrderID());
headerBuilder.build().writeDelimitedTo(byteOutput);
record.writeProtos(byteOutput);
ProtosFactory.TransactionEventFooter footer =
ProtosFactory.TransactionEventFooter.newBuilder().build();
footer.writeDelimitedTo(byteOutput);
return ByteBuffer.wrap(byteOutput.toByteArray());
} catch (IOException e) {
throw Throwables.propagate(e);
} finally {
if (byteOutput != null) {
try {
byteOutput.close();
} catch (IOException e) {
LOG.warn("Error closing byte array output stream", e);
}
}
}
}
static TransactionEventRecord fromByteArray(byte[] buffer)
throws IOException, CorruptEventException {
ByteArrayInputStream in = new ByteArrayInputStream(buffer);
try {
ProtosFactory.TransactionEventHeader header = Preconditions.checkNotNull(
ProtosFactory.TransactionEventHeader.parseDelimitedFrom(in), "Header cannot be null");
short type = (short)header.getType();
long transactionID = header.getTransactionID();
long writeOrderID = header.getWriteOrderID();
TransactionEventRecord transactionEvent = newRecordForType(type, transactionID, writeOrderID);
transactionEvent.readProtos(in);
@SuppressWarnings("unused")
ProtosFactory.TransactionEventFooter footer = Preconditions.checkNotNull(
ProtosFactory.TransactionEventFooter.parseDelimitedFrom(in), "Footer cannot be null");
return transactionEvent;
} catch (InvalidProtocolBufferException ex) {
throw new CorruptEventException("Could not parse event from data file.", ex);
} finally {
try {
in.close();
} catch (IOException e) {
LOG.warn("Error closing byte array input stream", e);
}
}
}
static String getName(short type) {
Constructor<? extends TransactionEventRecord> constructor = TYPES.get(type);
Preconditions.checkNotNull(constructor, "Unknown action " +
Integer.toHexString(type));
return constructor.getDeclaringClass().getSimpleName();
}
private static TransactionEventRecord newRecordForType(short type,
long transactionID, long writeOrderID) {
Constructor<? extends TransactionEventRecord> constructor = TYPES.get(type);
Preconditions.checkNotNull(constructor, "Unknown action " +
Integer.toHexString(type));
try {
return constructor.newInstance(transactionID, writeOrderID);
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
}
| 9,715 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/LogFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.apache.flume.channel.file.encryption.CipherProvider;
import org.apache.flume.channel.file.encryption.KeyProvider;
import org.apache.flume.tools.DirectMemoryUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import java.io.EOFException;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public abstract class LogFile {
private static final Logger LOG = LoggerFactory.getLogger(LogFile.class);
/**
* This class preallocates the data files 1MB at time to avoid
* the updating of the inode on each write and to avoid the disk
* filling up during a write. It's also faster, so there.
*/
private static final ByteBuffer FILL = DirectMemoryUtils.allocate(1024 * 1024);
public static final byte OP_RECORD = Byte.MAX_VALUE;
public static final byte OP_NOOP = (Byte.MAX_VALUE + Byte.MIN_VALUE) / 2;
public static final byte OP_EOF = Byte.MIN_VALUE;
static {
for (int i = 0; i < FILL.capacity(); i++) {
FILL.put(OP_EOF);
}
}
protected static void skipRecord(RandomAccessFile fileHandle,
int offset) throws IOException {
fileHandle.seek(offset);
int length = fileHandle.readInt();
fileHandle.skipBytes(length);
}
abstract static class MetaDataWriter {
private final File file;
private final int logFileID;
private final RandomAccessFile writeFileHandle;
private long lastCheckpointOffset;
private long lastCheckpointWriteOrderID;
protected MetaDataWriter(File file, int logFileID) throws IOException {
this.file = file;
this.logFileID = logFileID;
writeFileHandle = new RandomAccessFile(file, "rw");
}
protected RandomAccessFile getFileHandle() {
return writeFileHandle;
}
protected void setLastCheckpointOffset(long lastCheckpointOffset) {
this.lastCheckpointOffset = lastCheckpointOffset;
}
protected void setLastCheckpointWriteOrderID(long lastCheckpointWriteOrderID) {
this.lastCheckpointWriteOrderID = lastCheckpointWriteOrderID;
}
protected long getLastCheckpointOffset() {
return lastCheckpointOffset;
}
protected long getLastCheckpointWriteOrderID() {
return lastCheckpointWriteOrderID;
}
protected File getFile() {
return file;
}
protected int getLogFileID() {
return logFileID;
}
void markCheckpoint(long logWriteOrderID)
throws IOException {
markCheckpoint(lastCheckpointOffset, logWriteOrderID);
}
abstract void markCheckpoint(long currentPosition, long logWriteOrderID)
throws IOException;
abstract int getVersion();
void close() {
try {
writeFileHandle.close();
} catch (IOException e) {
LOG.warn("Unable to close " + file, e);
}
}
}
@VisibleForTesting
static class CachedFSUsableSpace {
private final File fs;
private final long interval;
private final AtomicLong lastRefresh;
private final AtomicLong value;
CachedFSUsableSpace(File fs, long interval) {
this.fs = fs;
this.interval = interval;
this.value = new AtomicLong(fs.getUsableSpace());
this.lastRefresh = new AtomicLong(System.currentTimeMillis());
}
void decrement(long numBytes) {
Preconditions.checkArgument(numBytes >= 0, "numBytes less than zero");
value.addAndGet(-numBytes);
}
long getUsableSpace() {
long now = System.currentTimeMillis();
if (now - interval > lastRefresh.get()) {
value.set(fs.getUsableSpace());
lastRefresh.set(now);
}
return Math.max(value.get(), 0L);
}
}
abstract static class Writer {
private final int logFileID;
private final File file;
private final long maxFileSize;
private final RandomAccessFile writeFileHandle;
private final FileChannel writeFileChannel;
private final CipherProvider.Encryptor encryptor;
private final CachedFSUsableSpace usableSpace;
private volatile boolean open;
private long lastCommitPosition;
private long lastSyncPosition;
private final boolean fsyncPerTransaction;
private final int fsyncInterval;
private final ScheduledExecutorService syncExecutor;
private volatile boolean dirty = false;
// To ensure we can count the number of fsyncs.
private long syncCount;
Writer(File file, int logFileID, long maxFileSize,
CipherProvider.Encryptor encryptor, long usableSpaceRefreshInterval,
boolean fsyncPerTransaction, int fsyncInterval) throws IOException {
this.file = file;
this.logFileID = logFileID;
this.maxFileSize = Math.min(maxFileSize,
FileChannelConfiguration.DEFAULT_MAX_FILE_SIZE);
this.encryptor = encryptor;
writeFileHandle = new RandomAccessFile(file, "rw");
writeFileChannel = writeFileHandle.getChannel();
this.fsyncPerTransaction = fsyncPerTransaction;
this.fsyncInterval = fsyncInterval;
if (!fsyncPerTransaction) {
LOG.info("Sync interval = " + fsyncInterval);
syncExecutor = Executors.newSingleThreadScheduledExecutor();
syncExecutor.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
try {
sync();
} catch (Throwable ex) {
LOG.error("Data file, " + getFile().toString() + " could not " +
"be synced to disk due to an error.", ex);
}
}
}, fsyncInterval, fsyncInterval, TimeUnit.SECONDS);
} else {
syncExecutor = null;
}
usableSpace = new CachedFSUsableSpace(file, usableSpaceRefreshInterval);
LOG.info("Opened " + file);
open = true;
}
abstract int getVersion();
protected CipherProvider.Encryptor getEncryptor() {
return encryptor;
}
int getLogFileID() {
return logFileID;
}
File getFile() {
return file;
}
String getParent() {
return file.getParent();
}
long getUsableSpace() {
return usableSpace.getUsableSpace();
}
long getMaxSize() {
return maxFileSize;
}
@VisibleForTesting
long getLastCommitPosition() {
return lastCommitPosition;
}
@VisibleForTesting
long getLastSyncPosition() {
return lastSyncPosition;
}
@VisibleForTesting
long getSyncCount() {
return syncCount;
}
synchronized long position() throws IOException {
return getFileChannel().position();
}
// encrypt and write methods may not be thread safe in the following
// methods, so all methods need to be synchronized.
synchronized FlumeEventPointer put(ByteBuffer buffer) throws IOException {
if (encryptor != null) {
buffer = ByteBuffer.wrap(encryptor.encrypt(buffer.array()));
}
Pair<Integer, Integer> pair = write(buffer);
return new FlumeEventPointer(pair.getLeft(), pair.getRight());
}
synchronized void take(ByteBuffer buffer) throws IOException {
if (encryptor != null) {
buffer = ByteBuffer.wrap(encryptor.encrypt(buffer.array()));
}
write(buffer);
}
synchronized void rollback(ByteBuffer buffer) throws IOException {
if (encryptor != null) {
buffer = ByteBuffer.wrap(encryptor.encrypt(buffer.array()));
}
write(buffer);
}
synchronized void commit(ByteBuffer buffer) throws IOException {
if (encryptor != null) {
buffer = ByteBuffer.wrap(encryptor.encrypt(buffer.array()));
}
write(buffer);
dirty = true;
lastCommitPosition = position();
}
private Pair<Integer, Integer> write(ByteBuffer buffer)
throws IOException {
if (!isOpen()) {
throw new LogFileRetryableIOException("File closed " + file);
}
long length = position();
long expectedLength = length + (long) buffer.limit();
if (expectedLength > maxFileSize) {
throw new LogFileRetryableIOException(expectedLength + " > " +
maxFileSize);
}
int offset = (int) length;
Preconditions.checkState(offset >= 0, String.valueOf(offset));
// OP_RECORD + size + buffer
int recordLength = 1 + (int) Serialization.SIZE_OF_INT + buffer.limit();
usableSpace.decrement(recordLength);
preallocate(recordLength);
ByteBuffer toWrite = ByteBuffer.allocate(recordLength);
toWrite.put(OP_RECORD);
writeDelimitedBuffer(toWrite, buffer);
toWrite.position(0);
int wrote = getFileChannel().write(toWrite);
Preconditions.checkState(wrote == toWrite.limit());
return Pair.of(getLogFileID(), offset);
}
synchronized boolean isRollRequired(ByteBuffer buffer) throws IOException {
return isOpen() && position() + (long) buffer.limit() > getMaxSize();
}
/**
* Sync the underlying log file to disk. Expensive call,
* should be used only on commits. If a sync has already happened after
* the last commit, this method is a no-op
*
* @throws IOException
* @throws LogFileRetryableIOException - if this log file is closed.
*/
synchronized void sync() throws IOException {
if (!fsyncPerTransaction && !dirty) {
if (LOG.isDebugEnabled()) {
LOG.debug(
"No events written to file, " + getFile().toString() +
" in last " + fsyncInterval + " or since last commit.");
}
return;
}
if (!isOpen()) {
throw new LogFileRetryableIOException("File closed " + file);
}
if (lastSyncPosition < lastCommitPosition) {
getFileChannel().force(false);
lastSyncPosition = position();
syncCount++;
dirty = false;
}
}
protected boolean isOpen() {
return open;
}
protected RandomAccessFile getFileHandle() {
return writeFileHandle;
}
protected FileChannel getFileChannel() {
return writeFileChannel;
}
synchronized void close() {
if (open) {
open = false;
if (!fsyncPerTransaction) {
// Shutdown the executor before attempting to close.
if (syncExecutor != null) {
// No need to wait for it to shutdown.
syncExecutor.shutdown();
}
}
if (writeFileChannel.isOpen()) {
LOG.info("Closing " + file);
try {
writeFileChannel.force(true);
} catch (IOException e) {
LOG.warn("Unable to flush to disk " + file, e);
}
try {
writeFileHandle.close();
} catch (IOException e) {
LOG.warn("Unable to close " + file, e);
}
}
}
}
protected void preallocate(int size) throws IOException {
long position = position();
if (position + size > getFileChannel().size()) {
LOG.debug("Preallocating at position " + position);
synchronized (FILL) {
FILL.position(0);
getFileChannel().write(FILL, position);
}
}
}
}
/**
* This is an class meant to be an internal Flume API,
* and can change at any time. Intended to be used only from File Channel Integrity
* test tool. Not to be used for any other purpose.
*/
public static class OperationRecordUpdater {
private final RandomAccessFile fileHandle;
private final File file;
public OperationRecordUpdater(File file) throws FileNotFoundException {
Preconditions.checkState(file.exists(), "File to update, " +
file.toString() + " does not exist.");
this.file = file;
fileHandle = new RandomAccessFile(file, "rw");
}
public void markRecordAsNoop(long offset) throws IOException {
// First ensure that the offset actually is an OP_RECORD. There is a
// small possibility that it still is OP_RECORD,
// but is not actually the beginning of a record. Is there anything we
// can do about it?
fileHandle.seek(offset);
byte byteRead = fileHandle.readByte();
Preconditions.checkState(byteRead == OP_RECORD || byteRead == OP_NOOP,
"Expected to read a record but the byte read indicates EOF");
fileHandle.seek(offset);
LOG.info("Marking event as " + OP_NOOP + " at " + offset + " for file " +
file.toString());
fileHandle.writeByte(OP_NOOP);
}
public void close() {
try {
fileHandle.getFD().sync();
fileHandle.close();
} catch (IOException e) {
LOG.error("Could not close file handle to file " +
fileHandle.toString(), e);
}
}
}
abstract static class RandomReader {
private final File file;
private final BlockingQueue<RandomAccessFile> readFileHandles =
new ArrayBlockingQueue<RandomAccessFile>(50, true);
private final KeyProvider encryptionKeyProvider;
private final boolean fsyncPerTransaction;
private volatile boolean open;
public RandomReader(File file, @Nullable KeyProvider
encryptionKeyProvider, boolean fsyncPerTransaction)
throws IOException {
this.file = file;
this.encryptionKeyProvider = encryptionKeyProvider;
readFileHandles.add(open());
this.fsyncPerTransaction = fsyncPerTransaction;
open = true;
}
protected abstract TransactionEventRecord doGet(RandomAccessFile fileHandle)
throws IOException, CorruptEventException;
abstract int getVersion();
File getFile() {
return file;
}
protected KeyProvider getKeyProvider() {
return encryptionKeyProvider;
}
FlumeEvent get(int offset) throws IOException, InterruptedException,
CorruptEventException, NoopRecordException {
Preconditions.checkState(open, "File closed");
RandomAccessFile fileHandle = checkOut();
boolean error = true;
try {
fileHandle.seek(offset);
byte operation = fileHandle.readByte();
if (operation == OP_NOOP) {
throw new NoopRecordException("No op record found. Corrupt record " +
"may have been repaired by File Channel Integrity tool");
}
if (operation != OP_RECORD) {
throw new CorruptEventException(
"Operation code is invalid. File " +
"is corrupt. Please run File Channel Integrity tool.");
}
TransactionEventRecord record = doGet(fileHandle);
if (!(record instanceof Put)) {
Preconditions.checkState(false, "Record is " +
record.getClass().getSimpleName());
}
error = false;
return ((Put) record).getEvent();
} finally {
if (error) {
close(fileHandle, file);
} else {
checkIn(fileHandle);
}
}
}
synchronized void close() {
if (open) {
open = false;
LOG.info("Closing RandomReader " + file);
List<RandomAccessFile> fileHandles = Lists.newArrayList();
while (readFileHandles.drainTo(fileHandles) > 0) {
for (RandomAccessFile fileHandle : fileHandles) {
synchronized (fileHandle) {
try {
fileHandle.close();
} catch (IOException e) {
LOG.warn("Unable to close fileHandle for " + file, e);
}
}
}
fileHandles.clear();
try {
Thread.sleep(5L);
} catch (InterruptedException e) {
// this is uninterruptable
}
}
}
}
private RandomAccessFile open() throws IOException {
return new RandomAccessFile(file, "r");
}
private void checkIn(RandomAccessFile fileHandle) {
if (!readFileHandles.offer(fileHandle)) {
close(fileHandle, file);
}
}
private RandomAccessFile checkOut()
throws IOException, InterruptedException {
RandomAccessFile fileHandle = readFileHandles.poll();
if (fileHandle != null) {
return fileHandle;
}
int remaining = readFileHandles.remainingCapacity();
if (remaining > 0) {
LOG.info("Opening " + file + " for read, remaining number of file " +
"handles available for reads of this file is " + remaining);
return open();
}
return readFileHandles.take();
}
private static void close(RandomAccessFile fileHandle, File file) {
if (fileHandle != null) {
try {
fileHandle.close();
} catch (IOException e) {
LOG.warn("Unable to close " + file, e);
}
}
}
}
public abstract static class SequentialReader {
private final RandomAccessFile fileHandle;
private final FileChannel fileChannel;
private final File file;
private final KeyProvider encryptionKeyProvider;
private int logFileID;
private long lastCheckpointPosition;
private long lastCheckpointWriteOrderID;
private long backupCheckpointPosition;
private long backupCheckpointWriteOrderID;
/**
* Construct a Sequential Log Reader object
*
* @param file
* @throws IOException if an I/O error occurs
* @throws EOFException if the file is empty
*/
SequentialReader(File file, @Nullable KeyProvider encryptionKeyProvider)
throws IOException, EOFException {
this.file = file;
this.encryptionKeyProvider = encryptionKeyProvider;
fileHandle = new RandomAccessFile(file, "r");
fileChannel = fileHandle.getChannel();
}
abstract LogRecord doNext(int offset) throws IOException, CorruptEventException;
abstract int getVersion();
protected void setLastCheckpointPosition(long lastCheckpointPosition) {
this.lastCheckpointPosition = lastCheckpointPosition;
}
protected void setLastCheckpointWriteOrderID(long lastCheckpointWriteOrderID) {
this.lastCheckpointWriteOrderID = lastCheckpointWriteOrderID;
}
protected void setPreviousCheckpointPosition(
long backupCheckpointPosition) {
this.backupCheckpointPosition = backupCheckpointPosition;
}
protected void setPreviousCheckpointWriteOrderID(
long backupCheckpointWriteOrderID) {
this.backupCheckpointWriteOrderID = backupCheckpointWriteOrderID;
}
protected void setLogFileID(int logFileID) {
this.logFileID = logFileID;
Preconditions.checkArgument(logFileID >= 0, "LogFileID is not positive: "
+ Integer.toHexString(logFileID));
}
protected KeyProvider getKeyProvider() {
return encryptionKeyProvider;
}
protected RandomAccessFile getFileHandle() {
return fileHandle;
}
int getLogFileID() {
return logFileID;
}
void skipToLastCheckpointPosition(long checkpointWriteOrderID)
throws IOException {
if (lastCheckpointPosition > 0L) {
long position = 0;
if (lastCheckpointWriteOrderID <= checkpointWriteOrderID) {
position = lastCheckpointPosition;
} else if (backupCheckpointWriteOrderID <= checkpointWriteOrderID
&& backupCheckpointPosition > 0) {
position = backupCheckpointPosition;
}
fileChannel.position(position);
LOG.info("fast-forward to checkpoint position: " + position);
} else {
LOG.info("Checkpoint for file(" + file.getAbsolutePath() + ") "
+ "is: " + lastCheckpointWriteOrderID + ", which is beyond the "
+ "requested checkpoint time: " + checkpointWriteOrderID
+ " and position " + lastCheckpointPosition);
}
}
public LogRecord next() throws IOException, CorruptEventException {
int offset = -1;
try {
long position = fileChannel.position();
if (position > FileChannelConfiguration.DEFAULT_MAX_FILE_SIZE) {
LOG.info("File position exceeds the threshold: "
+ FileChannelConfiguration.DEFAULT_MAX_FILE_SIZE
+ ", position: " + position);
}
offset = (int) position;
Preconditions.checkState(offset >= 0);
while (offset < fileHandle.length()) {
byte operation = fileHandle.readByte();
if (operation == OP_RECORD) {
break;
} else if (operation == OP_EOF) {
LOG.info("Encountered EOF at " + offset + " in " + file);
return null;
} else if (operation == OP_NOOP) {
LOG.info("No op event found in file: " + file.toString() +
" at " + offset + ". Skipping event.");
skipRecord(fileHandle, offset + 1);
offset = (int) fileHandle.getFilePointer();
continue;
} else {
LOG.error("Encountered non op-record at " + offset + " " +
Integer.toHexString(operation) + " in " + file);
return null;
}
}
if (offset >= fileHandle.length()) {
return null;
}
return doNext(offset);
} catch (EOFException e) {
return null;
} catch (IOException e) {
throw new IOException("Unable to read next Transaction from log file " +
file.getCanonicalPath() + " at offset " + offset, e);
}
}
public long getPosition() throws IOException {
return fileChannel.position();
}
public void close() {
if (fileHandle != null) {
try {
fileHandle.close();
} catch (IOException e) {
}
}
}
}
protected static void writeDelimitedBuffer(ByteBuffer output, ByteBuffer buffer)
throws IOException {
output.putInt(buffer.limit());
output.put(buffer);
}
protected static byte[] readDelimitedBuffer(RandomAccessFile fileHandle)
throws IOException, CorruptEventException {
int length = fileHandle.readInt();
if (length < 0) {
throw new CorruptEventException("Length of event is: " + String.valueOf(length) +
". Event must have length >= 0. Possible corruption of data or partial fsync.");
}
byte[] buffer = new byte[length];
try {
fileHandle.readFully(buffer);
} catch (EOFException ex) {
throw new CorruptEventException("Remaining data in file less than " +
"expected size of event.", ex);
}
return buffer;
}
public static void main(String[] args) throws EOFException, IOException, CorruptEventException {
File file = new File(args[0]);
LogFile.SequentialReader reader = null;
try {
reader = LogFileFactory.getSequentialReader(file, null, false);
LogRecord entry;
FlumeEventPointer ptr;
// for puts the fileId is the fileID of the file they exist in
// for takes the fileId and offset are pointers to a put
int fileId = reader.getLogFileID();
int count = 0;
int readCount = 0;
int putCount = 0;
int takeCount = 0;
int rollbackCount = 0;
int commitCount = 0;
while ((entry = reader.next()) != null) {
int offset = entry.getOffset();
TransactionEventRecord record = entry.getEvent();
short type = record.getRecordType();
long trans = record.getTransactionID();
long ts = record.getLogWriteOrderID();
readCount++;
ptr = null;
if (type == TransactionEventRecord.Type.PUT.get()) {
putCount++;
ptr = new FlumeEventPointer(fileId, offset);
} else if (type == TransactionEventRecord.Type.TAKE.get()) {
takeCount++;
Take take = (Take) record;
ptr = new FlumeEventPointer(take.getFileID(), take.getOffset());
} else if (type == TransactionEventRecord.Type.ROLLBACK.get()) {
rollbackCount++;
} else if (type == TransactionEventRecord.Type.COMMIT.get()) {
commitCount++;
} else {
Preconditions.checkArgument(false, "Unknown record type: "
+ Integer.toHexString(type));
}
System.out.println(Joiner.on(", ").skipNulls().join(
trans, ts, fileId, offset, TransactionEventRecord.getName(type), ptr));
}
System.out.println("Replayed " + count + " from " + file + " read: " + readCount
+ ", put: " + putCount + ", take: "
+ takeCount + ", rollback: " + rollbackCount + ", commit: "
+ commitCount);
} catch (EOFException e) {
System.out.println("Hit EOF on " + file);
} finally {
if (reader != null) {
reader.close();
}
}
}
}
| 9,716 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/Serialization.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import org.apache.commons.io.FileUtils;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.xerial.snappy.SnappyInputStream;
import org.xerial.snappy.SnappyOutputStream;
import javax.annotation.Nullable;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.Collections;
import java.util.Set;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class Serialization {
private Serialization() {
}
static final long SIZE_OF_INT = 4;
static final int SIZE_OF_LONG = 8;
static final int VERSION_2 = 2;
static final int VERSION_3 = 3;
public static final String METADATA_FILENAME = ".meta";
public static final String METADATA_TMP_FILENAME = ".tmp";
public static final String OLD_METADATA_FILENAME = METADATA_FILENAME + ".old";
// 64 K buffer to copy and compress files.
private static final int FILE_BUFFER_SIZE = 64 * 1024;
public static final Logger LOG = LoggerFactory.getLogger(Serialization.class);
static File getMetaDataTempFile(File metaDataFile) {
String metaDataFileName = metaDataFile.getName() + METADATA_TMP_FILENAME;
return new File(metaDataFile.getParentFile(), metaDataFileName);
}
static File getMetaDataFile(File file) {
String metaDataFileName = file.getName() + METADATA_FILENAME;
return new File(file.getParentFile(), metaDataFileName);
}
// Support platforms that cannot do atomic renames - FLUME-1699
static File getOldMetaDataFile(File file) {
String oldMetaDataFileName = file.getName() + OLD_METADATA_FILENAME;
return new File(file.getParentFile(), oldMetaDataFileName);
}
/**
* Deletes all files in given directory.
*
* @param checkpointDir - The directory whose files are to be deleted
* @param excludes - Names of files which should not be deleted from this
* directory.
* @return - true if all files were successfully deleted, false otherwise.
*/
static boolean deleteAllFiles(File checkpointDir,
@Nullable Set<String> excludes) {
if (!checkpointDir.isDirectory()) {
return false;
}
File[] files = checkpointDir.listFiles();
if (files == null) {
return false;
}
StringBuilder builder;
if (files.length == 0) {
return true;
} else {
builder = new StringBuilder("Deleted the following files: ");
}
if (excludes == null) {
excludes = Collections.emptySet();
}
for (File file : files) {
if (excludes.contains(file.getName())) {
LOG.info("Skipping " + file.getName() + " because it is in excludes " +
"set");
continue;
}
if (!FileUtils.deleteQuietly(file)) {
LOG.info(builder.toString());
LOG.error("Error while attempting to delete: " +
file.getAbsolutePath());
return false;
}
builder.append(", ").append(file.getName());
}
builder.append(".");
LOG.info(builder.toString());
return true;
}
/**
* Copy a file using a 64K size buffer. This method will copy the file and
* then fsync to disk
*
* @param from File to copy - this file should exist
* @param to Destination file - this file should not exist
* @return true if the copy was successful
*/
public static boolean copyFile(File from, File to) throws IOException {
Preconditions.checkNotNull(from, "Source file is null, file copy failed.");
Preconditions.checkNotNull(to, "Destination file is null, " +
"file copy failed.");
Preconditions.checkState(from.exists(), "Source file: " + from.toString() +
" does not exist.");
Preconditions.checkState(!to.exists(), "Destination file: "
+ to.toString() + " unexpectedly exists.");
BufferedInputStream in = null;
RandomAccessFile out = null; //use a RandomAccessFile for easy fsync
try {
in = new BufferedInputStream(new FileInputStream(from));
out = new RandomAccessFile(to, "rw");
byte[] buf = new byte[FILE_BUFFER_SIZE];
int total = 0;
while (true) {
int read = in.read(buf);
if (read == -1) {
break;
}
out.write(buf, 0, read);
total += read;
}
out.getFD().sync();
Preconditions.checkState(total == from.length(),
"The size of the origin file and destination file are not equal.");
return true;
} catch (Exception ex) {
LOG.error("Error while attempting to copy " + from.toString() + " to "
+ to.toString() + ".", ex);
Throwables.propagate(ex);
} finally {
Throwable th = null;
try {
if (in != null) {
in.close();
}
} catch (Throwable ex) {
LOG.error("Error while closing input file.", ex);
th = ex;
}
try {
if (out != null) {
out.close();
}
} catch (IOException ex) {
LOG.error("Error while closing output file.", ex);
Throwables.propagate(ex);
}
if (th != null) {
Throwables.propagate(th);
}
}
// Should never reach here.
throw new IOException("Copying file: " + from.toString() + " to: " + to
.toString() + " may have failed.");
}
/**
* Compress file using Snappy
*
* @param uncompressed File to compress - this file should exist
* @param compressed Compressed file - this file should not exist
* @return true if compression was successful
*/
public static boolean compressFile(File uncompressed, File compressed)
throws IOException {
Preconditions.checkNotNull(uncompressed,
"Source file is null, compression failed.");
Preconditions.checkNotNull(compressed,
"Destination file is null, compression failed.");
Preconditions.checkState(uncompressed.exists(), "Source file: " +
uncompressed.toString() + " does not exist.");
Preconditions.checkState(!compressed.exists(),
"Compressed file: " + compressed.toString() + " unexpectedly " + "exists.");
BufferedInputStream in = null;
FileOutputStream out = null;
SnappyOutputStream snappyOut = null;
try {
in = new BufferedInputStream(new FileInputStream(uncompressed));
out = new FileOutputStream(compressed);
snappyOut = new SnappyOutputStream(out);
byte[] buf = new byte[FILE_BUFFER_SIZE];
while (true) {
int read = in.read(buf);
if (read == -1) {
break;
}
snappyOut.write(buf, 0, read);
}
out.getFD().sync();
return true;
} catch (Exception ex) {
LOG.error("Error while attempting to compress " +
uncompressed.toString() + " to " + compressed.toString() + ".", ex);
Throwables.propagate(ex);
} finally {
Throwable th = null;
try {
if (in != null) {
in.close();
}
} catch (Throwable ex) {
LOG.error("Error while closing input file.", ex);
th = ex;
}
try {
if (snappyOut != null) {
snappyOut.close();
}
} catch (IOException ex) {
LOG.error("Error while closing output file.", ex);
Throwables.propagate(ex);
}
if (th != null) {
Throwables.propagate(th);
}
}
// Should never reach here.
throw new IOException("Copying file: " + uncompressed.toString()
+ " to: " + compressed.toString() + " may have failed.");
}
/**
* Decompress file using Snappy
*
* @param compressed File to compress - this file should exist
* @param decompressed Compressed file - this file should not exist
* @return true if decompression was successful
*/
public static boolean decompressFile(File compressed, File decompressed) throws IOException {
Preconditions.checkNotNull(compressed, "Source file is null, decompression failed.");
Preconditions.checkNotNull(decompressed, "Destination file is " +
"null, decompression failed.");
Preconditions.checkState(compressed.exists(), "Source file: " +
compressed.toString() + " does not exist.");
Preconditions.checkState(!decompressed.exists(),
"Decompressed file: " + decompressed.toString() + " unexpectedly exists.");
BufferedInputStream in = null;
SnappyInputStream snappyIn = null;
FileOutputStream out = null;
try {
in = new BufferedInputStream(new FileInputStream(compressed));
snappyIn = new SnappyInputStream(in);
out = new FileOutputStream(decompressed);
byte[] buf = new byte[FILE_BUFFER_SIZE];
while (true) {
int read = snappyIn.read(buf);
if (read == -1) {
break;
}
out.write(buf, 0, read);
}
out.getFD().sync();
return true;
} catch (Exception ex) {
LOG.error("Error while attempting to compress " +
compressed.toString() + " to " + decompressed.toString() +
".", ex);
Throwables.propagate(ex);
} finally {
Throwable th = null;
try {
if (in != null) {
in.close();
}
} catch (Throwable ex) {
LOG.error("Error while closing input file.", ex);
th = ex;
}
try {
if (snappyIn != null) {
snappyIn.close();
}
} catch (IOException ex) {
LOG.error("Error while closing output file.", ex);
Throwables.propagate(ex);
}
if (th != null) {
Throwables.propagate(th);
}
}
// Should never reach here.
throw new IOException("Decompressing file: " +
compressed.toString() + " to: " + decompressed.toString() +
" may have failed.");
}
}
| 9,717 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/EventQueueBackingStoreFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableSortedSet;
import com.google.common.collect.Maps;
import com.google.common.collect.SetMultimap;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.flume.channel.file.instrumentation.FileChannelCounter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.LongBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel.MapMode;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
abstract class EventQueueBackingStoreFile extends EventQueueBackingStore {
private static final Logger LOG = LoggerFactory.getLogger(EventQueueBackingStoreFile.class);
private static final int MAX_ALLOC_BUFFER_SIZE = 2 * 1024 * 1024; // 2MB
protected static final int HEADER_SIZE = 1029;
protected static final int INDEX_VERSION = 0;
protected static final int INDEX_WRITE_ORDER_ID = 1;
protected static final int INDEX_CHECKPOINT_MARKER = 4;
protected static final int CHECKPOINT_COMPLETE = 0;
protected static final int CHECKPOINT_INCOMPLETE = 1;
protected static final String COMPRESSED_FILE_EXTENSION = ".snappy";
protected LongBuffer elementsBuffer;
protected final Map<Integer, Long> overwriteMap = new HashMap<Integer, Long>();
protected final Map<Integer, AtomicInteger> logFileIDReferenceCounts = Maps.newHashMap();
protected final MappedByteBuffer mappedBuffer;
protected final RandomAccessFile checkpointFileHandle;
private final FileChannelCounter fileChannelCounter;
protected final File checkpointFile;
private final Semaphore backupCompletedSema = new Semaphore(1);
protected final boolean shouldBackup;
protected final boolean compressBackup;
private final File backupDir;
private final ExecutorService checkpointBackUpExecutor;
protected EventQueueBackingStoreFile(
int capacity, String name, FileChannelCounter fileChannelCounter, File checkpointFile
) throws IOException, BadCheckpointException {
this(capacity, name, fileChannelCounter, checkpointFile, null, false, false);
}
protected EventQueueBackingStoreFile(
int capacity, String name, FileChannelCounter fileChannelCounter, File checkpointFile,
File checkpointBackupDir, boolean backupCheckpoint, boolean compressBackup
) throws IOException, BadCheckpointException {
super(capacity, name);
this.fileChannelCounter = fileChannelCounter;
this.checkpointFile = checkpointFile;
this.shouldBackup = backupCheckpoint;
this.compressBackup = compressBackup;
this.backupDir = checkpointBackupDir;
checkpointFileHandle = new RandomAccessFile(checkpointFile, "rw");
long totalBytes = (capacity + HEADER_SIZE) * Serialization.SIZE_OF_LONG;
if (checkpointFileHandle.length() == 0) {
allocate(checkpointFile, totalBytes);
checkpointFileHandle.seek(INDEX_VERSION * Serialization.SIZE_OF_LONG);
checkpointFileHandle.writeLong(getVersion());
checkpointFileHandle.getChannel().force(true);
LOG.info("Preallocated " + checkpointFile + " to " + checkpointFileHandle.length()
+ " for capacity " + capacity);
}
if (checkpointFile.length() != totalBytes) {
String msg = "Configured capacity is " + capacity + " but the "
+ " checkpoint file capacity is " +
((checkpointFile.length() / Serialization.SIZE_OF_LONG) - HEADER_SIZE)
+ ". See FileChannel documentation on how to change a channels" +
" capacity.";
throw new BadCheckpointException(msg);
}
mappedBuffer = checkpointFileHandle.getChannel().map(MapMode.READ_WRITE, 0,
checkpointFile.length());
elementsBuffer = mappedBuffer.asLongBuffer();
long version = elementsBuffer.get(INDEX_VERSION);
if (version != (long) getVersion()) {
throw new BadCheckpointException("Invalid version: " + version + " " +
name + ", expected " + getVersion());
}
long checkpointComplete = elementsBuffer.get(INDEX_CHECKPOINT_MARKER);
if (checkpointComplete != (long) CHECKPOINT_COMPLETE) {
throw new BadCheckpointException("Checkpoint was not completed correctly,"
+ " probably because the agent stopped while the channel was"
+ " checkpointing.");
}
if (shouldBackup) {
checkpointBackUpExecutor = Executors.newSingleThreadExecutor(
new ThreadFactoryBuilder().setNameFormat(
getName() + " - CheckpointBackUpThread").build());
} else {
checkpointBackUpExecutor = null;
}
}
protected long getCheckpointLogWriteOrderID() {
return elementsBuffer.get(INDEX_WRITE_ORDER_ID);
}
protected abstract void writeCheckpointMetaData() throws IOException;
/**
* This method backs up the checkpoint and its metadata files. This method
* is called once the checkpoint is completely written and is called
* from a separate thread which runs in the background while the file channel
* continues operation.
*
* @param backupDirectory - the directory to which the backup files should be
* copied.
* @throws IOException - if the copy failed, or if there is not enough disk
* space to copy the checkpoint files over.
*/
protected void backupCheckpoint(File backupDirectory) throws IOException {
int availablePermits = backupCompletedSema.drainPermits();
Preconditions.checkState(availablePermits == 0,
"Expected no permits to be available in the backup semaphore, " +
"but " + availablePermits + " permits were available.");
if (slowdownBackup) {
try {
TimeUnit.SECONDS.sleep(10);
} catch (Exception ex) {
Throwables.propagate(ex);
}
}
File backupFile = new File(backupDirectory, BACKUP_COMPLETE_FILENAME);
if (backupExists(backupDirectory)) {
if (!backupFile.delete()) {
throw new IOException("Error while doing backup of checkpoint. Could " +
"not remove" + backupFile.toString() + ".");
}
}
Serialization.deleteAllFiles(backupDirectory, Log.EXCLUDES);
File checkpointDir = checkpointFile.getParentFile();
File[] checkpointFiles = checkpointDir.listFiles();
Preconditions.checkNotNull(checkpointFiles, "Could not retrieve files " +
"from the checkpoint directory. Cannot complete backup of the " +
"checkpoint.");
for (File origFile : checkpointFiles) {
if (Log.EXCLUDES.contains(origFile.getName())) {
continue;
}
if (compressBackup && origFile.equals(checkpointFile)) {
Serialization.compressFile(origFile, new File(backupDirectory,
origFile.getName() + COMPRESSED_FILE_EXTENSION));
} else {
Serialization.copyFile(origFile, new File(backupDirectory,
origFile.getName()));
}
}
Preconditions.checkState(!backupFile.exists(), "The backup file exists " +
"while it is not supposed to. Are multiple channels configured to use " +
"this directory: " + backupDirectory.toString() + " as backup?");
if (!backupFile.createNewFile()) {
LOG.error("Could not create backup file. Backup of checkpoint will " +
"not be used during replay even if checkpoint is bad.");
}
}
/**
* Restore the checkpoint, if it is found to be bad.
*
* @return true - if the previous backup was successfully completed and
* restore was successfully completed.
* @throws IOException - If restore failed due to IOException
*/
public static boolean restoreBackup(File checkpointDir, File backupDir)
throws IOException {
if (!backupExists(backupDir)) {
return false;
}
Serialization.deleteAllFiles(checkpointDir, Log.EXCLUDES);
File[] backupFiles = backupDir.listFiles();
if (backupFiles == null) {
return false;
} else {
for (File backupFile : backupFiles) {
String fileName = backupFile.getName();
if (!fileName.equals(BACKUP_COMPLETE_FILENAME) &&
!fileName.equals(Log.FILE_LOCK)) {
if (fileName.endsWith(COMPRESSED_FILE_EXTENSION)) {
Serialization.decompressFile(
backupFile, new File(checkpointDir,
fileName.substring(0, fileName.lastIndexOf("."))));
} else {
Serialization.copyFile(backupFile, new File(checkpointDir,
fileName));
}
}
}
return true;
}
}
@Override
void beginCheckpoint() throws IOException {
LOG.info("Start checkpoint for " + checkpointFile +
", elements to sync = " + overwriteMap.size());
if (shouldBackup) {
int permits = backupCompletedSema.drainPermits();
Preconditions.checkState(permits <= 1, "Expected only one or less " +
"permits to checkpoint, but got " + String.valueOf(permits) +
" permits");
if (permits < 1) {
// Force the checkpoint to not happen by throwing an exception.
throw new IOException("Previous backup of checkpoint files is still " +
"in progress. Will attempt to checkpoint only at the end of the " +
"next checkpoint interval. Try increasing the checkpoint interval " +
"if this error happens often.");
}
}
// Start checkpoint
elementsBuffer.put(INDEX_CHECKPOINT_MARKER, CHECKPOINT_INCOMPLETE);
mappedBuffer.force();
}
@Override
void checkpoint() throws IOException {
setLogWriteOrderID(WriteOrderOracle.next());
LOG.info("Updating checkpoint metadata: logWriteOrderID: "
+ getLogWriteOrderID() + ", queueSize: " + getSize() + ", queueHead: "
+ getHead());
elementsBuffer.put(INDEX_WRITE_ORDER_ID, getLogWriteOrderID());
try {
writeCheckpointMetaData();
} catch (IOException e) {
throw new IOException("Error writing metadata", e);
}
Iterator<Integer> it = overwriteMap.keySet().iterator();
while (it.hasNext()) {
int index = it.next();
long value = overwriteMap.get(index);
elementsBuffer.put(index, value);
it.remove();
}
Preconditions.checkState(overwriteMap.isEmpty(),
"concurrent update detected ");
// Finish checkpoint
elementsBuffer.put(INDEX_CHECKPOINT_MARKER, CHECKPOINT_COMPLETE);
mappedBuffer.force();
if (shouldBackup) {
startBackupThread();
}
}
/**
* This method starts backing up the checkpoint in the background.
*/
private void startBackupThread() {
Preconditions.checkNotNull(checkpointBackUpExecutor,
"Expected the checkpoint backup exector to be non-null, " +
"but it is null. Checkpoint will not be backed up.");
LOG.info("Attempting to back up checkpoint.");
checkpointBackUpExecutor.submit(new Runnable() {
@Override
public void run() {
boolean error = false;
try {
backupCheckpoint(backupDir);
} catch (Throwable throwable) {
fileChannelCounter.incrementCheckpointBackupWriteErrorCount();
error = true;
LOG.error("Backing up of checkpoint directory failed.", throwable);
} finally {
backupCompletedSema.release();
}
if (!error) {
LOG.info("Checkpoint backup completed.");
}
}
});
}
@Override
void close() {
mappedBuffer.force();
try {
checkpointFileHandle.close();
} catch (IOException e) {
LOG.info("Error closing " + checkpointFile, e);
}
if (checkpointBackUpExecutor != null && !checkpointBackUpExecutor.isShutdown()) {
checkpointBackUpExecutor.shutdown();
try {
// Wait till the executor dies.
while (!checkpointBackUpExecutor.awaitTermination(1, TimeUnit.SECONDS)) {}
} catch (InterruptedException ex) {
LOG.warn("Interrupted while waiting for checkpoint backup to " +
"complete");
}
}
}
@Override
long get(int index) {
int realIndex = getPhysicalIndex(index);
long result = EMPTY;
if (overwriteMap.containsKey(realIndex)) {
result = overwriteMap.get(realIndex);
} else {
result = elementsBuffer.get(realIndex);
}
return result;
}
@Override
ImmutableSortedSet<Integer> getReferenceCounts() {
return ImmutableSortedSet.copyOf(logFileIDReferenceCounts.keySet());
}
@Override
void put(int index, long value) {
int realIndex = getPhysicalIndex(index);
overwriteMap.put(realIndex, value);
}
@Override
boolean syncRequired() {
return overwriteMap.size() > 0;
}
@Override
protected void incrementFileID(int fileID) {
AtomicInteger counter = logFileIDReferenceCounts.get(fileID);
if (counter == null) {
counter = new AtomicInteger(0);
logFileIDReferenceCounts.put(fileID, counter);
}
counter.incrementAndGet();
}
@Override
protected void decrementFileID(int fileID) {
AtomicInteger counter = logFileIDReferenceCounts.get(fileID);
Preconditions.checkState(counter != null, "null counter ");
int count = counter.decrementAndGet();
if (count == 0) {
logFileIDReferenceCounts.remove(fileID);
}
}
protected int getPhysicalIndex(int index) {
return HEADER_SIZE + (getHead() + index) % getCapacity();
}
protected static void allocate(File file, long totalBytes) throws IOException {
RandomAccessFile checkpointFile = new RandomAccessFile(file, "rw");
boolean success = false;
try {
if (totalBytes <= MAX_ALLOC_BUFFER_SIZE) {
/*
* totalBytes <= MAX_ALLOC_BUFFER_SIZE, so this can be cast to int
* without a problem.
*/
checkpointFile.write(new byte[(int) totalBytes]);
} else {
byte[] initBuffer = new byte[MAX_ALLOC_BUFFER_SIZE];
long remainingBytes = totalBytes;
while (remainingBytes >= MAX_ALLOC_BUFFER_SIZE) {
checkpointFile.write(initBuffer);
remainingBytes -= MAX_ALLOC_BUFFER_SIZE;
}
/*
* At this point, remainingBytes is < MAX_ALLOC_BUFFER_SIZE,
* so casting to int is fine.
*/
if (remainingBytes > 0) {
checkpointFile.write(initBuffer, 0, (int) remainingBytes);
}
}
success = true;
} finally {
try {
checkpointFile.close();
} catch (IOException e) {
if (success) {
throw e;
}
}
}
}
public static boolean backupExists(File backupDir) {
return new File(backupDir, BACKUP_COMPLETE_FILENAME).exists();
}
public static void main(String[] args) throws Exception {
File file = new File(args[0]);
File inflightTakesFile = new File(args[1]);
File inflightPutsFile = new File(args[2]);
File queueSetDir = new File(args[3]);
if (!file.exists()) {
throw new IOException("File " + file + " does not exist");
}
if (file.length() == 0) {
throw new IOException("File " + file + " is empty");
}
int capacity = (int) ((file.length() - (HEADER_SIZE * 8L)) / 8L);
EventQueueBackingStoreFile backingStore = (EventQueueBackingStoreFile)
EventQueueBackingStoreFactory.get(
file, capacity, "debug", new FileChannelCounter("Main"), false
);
System.out.println("File Reference Counts"
+ backingStore.logFileIDReferenceCounts);
System.out.println("Queue Capacity " + backingStore.getCapacity());
System.out.println("Queue Size " + backingStore.getSize());
System.out.println("Queue Head " + backingStore.getHead());
for (int index = 0; index < backingStore.getCapacity(); index++) {
long value = backingStore.get(backingStore.getPhysicalIndex(index));
int fileID = (int) (value >>> 32);
int offset = (int) value;
System.out.println(index + ":" + Long.toHexString(value) + " fileID = "
+ fileID + ", offset = " + offset);
}
FlumeEventQueue queue =
new FlumeEventQueue(backingStore, inflightTakesFile, inflightPutsFile,
queueSetDir);
SetMultimap<Long, Long> putMap = queue.deserializeInflightPuts();
System.out.println("Inflight Puts:");
for (Long txnID : putMap.keySet()) {
Set<Long> puts = putMap.get(txnID);
System.out.println("Transaction ID: " + String.valueOf(txnID));
for (long value : puts) {
int fileID = (int) (value >>> 32);
int offset = (int) value;
System.out.println(Long.toHexString(value) + " fileID = "
+ fileID + ", offset = " + offset);
}
}
SetMultimap<Long, Long> takeMap = queue.deserializeInflightTakes();
System.out.println("Inflight takes:");
for (Long txnID : takeMap.keySet()) {
Set<Long> takes = takeMap.get(txnID);
System.out.println("Transaction ID: " + String.valueOf(txnID));
for (long value : takes) {
int fileID = (int) (value >>> 32);
int offset = (int) value;
System.out.println(Long.toHexString(value) + " fileID = "
+ fileID + ", offset = " + offset);
}
}
}
}
| 9,718 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/FileChannelConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
public class FileChannelConfiguration {
/**
* Directory Checkpoints will be written in
*/
public static final String CHECKPOINT_DIR = "checkpointDir";
/**
* The directory to which the checkpoint must be backed up
*/
public static final String BACKUP_CHECKPOINT_DIR = "backupCheckpointDir";
/**
* Directories data files will be written in. Multiple directories
* can be specified as comma separated values. Writes will
* be written in a round robin fashion.
*/
public static final String DATA_DIRS = "dataDirs";
/**
* Maximum number of put/take events in a transaction. Default: 1000
*/
public static final String TRANSACTION_CAPACITY = "transactionCapacity";
public static final int DEFAULT_TRANSACTION_CAPACITY = 10000;
/**
* Interval at which checkpoints should be taken. Default 30s (ms)
*/
public static final String CHECKPOINT_INTERVAL = "checkpointInterval";
public static final long DEFAULT_CHECKPOINT_INTERVAL = 30L * 1000L;
/**
* Max file size for data files, cannot exceed the default. Default~ 1.5GB
*/
public static final String MAX_FILE_SIZE = "maxFileSize";
public static final long DEFAULT_MAX_FILE_SIZE =
Integer.MAX_VALUE - (500L * 1024L * 1024L); // ~1.52 G
public static final String MINIMUM_REQUIRED_SPACE = "minimumRequiredSpace";
/**
* Minimum space required defaults to 500MB
*/
public static final long DEFAULT_MINIMUM_REQUIRED_SPACE = 500L * 1024L * 1024L;
/**
* Minimum space floor is 1MB
*/
public static final long FLOOR_MINIMUM_REQUIRED_SPACE = 1L * 1024L * 1024L;
/**
* Maximum capacity of the channel.
* Default: 1,000,000
*/
public static final String CAPACITY = "capacity";
public static final int DEFAULT_CAPACITY = 1000000;
/**
* The length of time we will wait for space available to do a Put.
* Default: 3 (seconds)
*/
public static final String KEEP_ALIVE = "keep-alive";
public static final int DEFAULT_KEEP_ALIVE = 3;
/**
* Turn on Flume 1.2 log replay logic
*/
public static final String USE_LOG_REPLAY_V1 = "use-log-replay-v1";
public static final boolean DEFAULT_USE_LOG_REPLAY_V1 = false;
public static final String USE_FAST_REPLAY = "use-fast-replay";
public static final boolean DEFAULT_USE_FAST_REPLAY = false;
public static final String USE_DUAL_CHECKPOINTS = "useDualCheckpoints";
public static final boolean DEFAULT_USE_DUAL_CHECKPOINTS = false;
public static final String COMPRESS_BACKUP_CHECKPOINT = "compressBackupCheckpoint";
public static final boolean DEFAULT_COMPRESS_BACKUP_CHECKPOINT = false;
public static final String FSYNC_PER_TXN = "fsyncPerTransaction";
public static final boolean DEFAULT_FSYNC_PRE_TXN = true;
public static final String FSYNC_INTERVAL = "fsyncInterval";
public static final int DEFAULT_FSYNC_INTERVAL = 5; // seconds.
public static final String CHKPT_ONCLOSE = "checkpointOnClose";
public static final Boolean DEFAULT_CHKPT_ONCLOSE = true;
}
| 9,719 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/Take.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.flume.channel.file.proto.ProtosFactory;
import com.google.common.base.Preconditions;
/**
* Represents a Take on disk
*/
class Take extends TransactionEventRecord {
private int offset;
private int fileID;
Take(Long transactionID, Long logWriteOrderID) {
super(transactionID, logWriteOrderID);
}
Take(Long transactionID, Long logWriteOrderID, int offset, int fileID) {
this(transactionID, logWriteOrderID);
this.offset = offset;
this.fileID = fileID;
}
int getOffset() {
return offset;
}
int getFileID() {
return fileID;
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
offset = in.readInt();
fileID = in.readInt();
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
out.writeInt(offset);
out.writeInt(fileID);
}
@Override
void writeProtos(OutputStream out) throws IOException {
ProtosFactory.Take.Builder takeBuilder = ProtosFactory.Take.newBuilder();
takeBuilder.setFileID(fileID);
takeBuilder.setOffset(offset);
takeBuilder.build().writeDelimitedTo(out);
}
@Override
void readProtos(InputStream in) throws IOException {
ProtosFactory.Take take = Preconditions.checkNotNull(
ProtosFactory.Take.parseDelimitedFrom(in), "Take cannot be null");
fileID = take.getFileID();
offset = take.getOffset();
}
@Override
short getRecordType() {
return Type.TAKE.get();
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("Take [offset=");
builder.append(offset);
builder.append(", fileID=");
builder.append(fileID);
builder.append(", getLogWriteOrderID()=");
builder.append(getLogWriteOrderID());
builder.append(", getTransactionID()=");
builder.append(getTransactionID());
builder.append("]");
return builder.toString();
}
}
| 9,720 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/LogFileFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.base.Preconditions;
import org.apache.flume.channel.file.encryption.KeyProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import java.io.EOFException;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.security.Key;
@SuppressWarnings("deprecation")
class LogFileFactory {
private static final Logger LOGGER =
LoggerFactory.getLogger(LogFileFactory.class);
private LogFileFactory() {
}
static LogFile.MetaDataWriter getMetaDataWriter(File file, int logFileID)
throws IOException {
RandomAccessFile logFile = null;
try {
File metaDataFile = Serialization.getMetaDataFile(file);
if (metaDataFile.exists()) {
return new LogFileV3.MetaDataWriter(file, logFileID);
}
logFile = new RandomAccessFile(file, "r");
int version = logFile.readInt();
if (Serialization.VERSION_2 == version) {
return new LogFileV2.MetaDataWriter(file, logFileID);
}
throw new IOException("File " + file + " has bad version " +
Integer.toHexString(version));
} finally {
if (logFile != null) {
try {
logFile.close();
} catch (IOException e) {
LOGGER.warn("Unable to close " + file, e);
}
}
}
}
static LogFile.Writer getWriter(File file, int logFileID,
long maxFileSize, @Nullable Key encryptionKey,
@Nullable String encryptionKeyAlias,
@Nullable String encryptionCipherProvider,
long usableSpaceRefreshInterval, boolean fsyncPerTransaction,
int fsyncInterval) throws IOException {
Preconditions.checkState(!file.exists(), "File already exists " +
file.getAbsolutePath());
Preconditions.checkState(file.createNewFile(), "File could not be created "
+ file.getAbsolutePath());
return new LogFileV3.Writer(file, logFileID, maxFileSize, encryptionKey,
encryptionKeyAlias, encryptionCipherProvider,
usableSpaceRefreshInterval, fsyncPerTransaction, fsyncInterval);
}
static LogFile.RandomReader getRandomReader(File file,
@Nullable KeyProvider encryptionKeyProvider,
boolean fsyncPerTransaction)
throws IOException {
RandomAccessFile logFile = new RandomAccessFile(file, "r");
try {
File metaDataFile = Serialization.getMetaDataFile(file);
// either this is a rr for a just created file or
// the metadata file exists and as such it's V3
if (logFile.length() == 0L || metaDataFile.exists()) {
return new LogFileV3.RandomReader(file, encryptionKeyProvider,
fsyncPerTransaction);
}
int version = logFile.readInt();
if (Serialization.VERSION_2 == version) {
return new LogFileV2.RandomReader(file);
}
throw new IOException("File " + file + " has bad version " +
Integer.toHexString(version));
} finally {
if (logFile != null) {
try {
logFile.close();
} catch (IOException e) {
LOGGER.warn("Unable to close " + file, e);
}
}
}
}
static LogFile.SequentialReader getSequentialReader(File file,
@Nullable KeyProvider encryptionKeyProvider,
boolean fsyncPerTransaction)
throws IOException {
RandomAccessFile logFile = null;
try {
File metaDataFile = Serialization.getMetaDataFile(file);
File oldMetadataFile = Serialization.getOldMetaDataFile(file);
File tempMetadataFile = Serialization.getMetaDataTempFile(file);
boolean hasMeta = false;
// FLUME-1699:
// If the platform does not support atomic rename, then we
// renamed log.meta -> log.meta.old followed by log.meta.tmp -> log.meta
// I am not sure if all platforms maintain file integrity during
// file metadata update operations. So:
// 1. check if meta file exists
// 2. If 1 returns false, check if temp exists
// 3. if 2 is also false (maybe the machine died during temp->meta,
// then check if old exists.
// In the above, we assume that if a file exists, it's integrity is ok.
if (metaDataFile.exists()) {
hasMeta = true;
} else if (tempMetadataFile.exists()) {
if (tempMetadataFile.renameTo(metaDataFile)) {
hasMeta = true;
} else {
throw new IOException("Renaming of " + tempMetadataFile.getName()
+ " to " + metaDataFile.getName() + " failed");
}
} else if (oldMetadataFile.exists()) {
if (oldMetadataFile.renameTo(metaDataFile)) {
hasMeta = true;
} else {
throw new IOException("Renaming of " + oldMetadataFile.getName()
+ " to " + metaDataFile.getName() + " failed");
}
}
if (hasMeta) {
// Now the metadata file has been found, delete old or temp files
// so it does not interfere with normal operation.
if (oldMetadataFile.exists()) {
oldMetadataFile.delete();
}
if (tempMetadataFile.exists()) {
tempMetadataFile.delete();
}
if (metaDataFile.length() == 0L) {
if (file.length() != 0L) {
String msg = String.format("MetaData file %s is empty, but log %s" +
" is of size %d", metaDataFile, file, file.length());
throw new IllegalStateException(msg);
}
throw new EOFException(String.format("MetaData file %s is empty",
metaDataFile));
}
return new LogFileV3.SequentialReader(file, encryptionKeyProvider,
fsyncPerTransaction);
}
logFile = new RandomAccessFile(file, "r");
int version = logFile.readInt();
if (Serialization.VERSION_2 == version) {
return new LogFileV2.SequentialReader(file);
}
throw new IOException("File " + file + " has bad version " +
Integer.toHexString(version));
} finally {
if (logFile != null) {
try {
logFile.close();
} catch (IOException e) {
LOGGER.warn("Unable to close " + file, e);
}
}
}
}
}
| 9,721 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/Pair.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
class Pair<L,R> {
private final L left;
private final R right;
Pair(L l, R r) {
left = l;
right = r;
}
L getLeft() {
return left;
}
R getRight() {
return right;
}
static <L, R> Pair<L, R> of(L left, R right) {
return new Pair<L, R>(left, right);
}
}
| 9,722 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/LogRecord.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.util.Arrays;
public class LogRecord implements Comparable<LogRecord> {
private int fileID;
private int offset;
private TransactionEventRecord event;
public LogRecord(int fileID, int offset, TransactionEventRecord event) {
this.fileID = fileID;
this.offset = offset;
this.event = event;
}
public int getFileID() {
return fileID;
}
public int getOffset() {
return offset;
}
public TransactionEventRecord getEvent() {
return event;
}
@Override
public int compareTo(LogRecord o) {
int result = new Long(event.getLogWriteOrderID()).compareTo(o.getEvent().getLogWriteOrderID());
if (result == 0) {
// oops we have hit a flume-1.2 bug. let's try and use the txid
// to replay the events
result = new Long(event.getTransactionID()).compareTo(o.getEvent().getTransactionID());
if (result == 0) {
// events are within the same transaction. Basically we want commit
// and rollback to come after take and put
Integer thisIndex = Arrays.binarySearch(replaySortOrder, event.getRecordType());
Integer thatIndex = Arrays.binarySearch(replaySortOrder, o.getEvent().getRecordType());
return thisIndex.compareTo(thatIndex);
}
}
return result;
}
private static final short[] replaySortOrder = new short[] {
TransactionEventRecord.Type.TAKE.get(),
TransactionEventRecord.Type.PUT.get(),
TransactionEventRecord.Type.ROLLBACK.get(),
TransactionEventRecord.Type.COMMIT.get(),
};
}
| 9,723 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/FlumeEvent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CharsetEncoder;
import java.nio.charset.CodingErrorAction;
import java.util.HashMap;
import java.util.Map;
import org.apache.flume.Event;
/**
* Persistable wrapper for Event
*/
class FlumeEvent implements Event, Writable {
private static final byte EVENT_MAP_TEXT_WRITABLE_ID =
Byte.valueOf(Integer.valueOf(-116).byteValue());
private static ThreadLocal<CharsetEncoder> ENCODER_FACTORY = new ThreadLocal<CharsetEncoder>() {
@Override
protected CharsetEncoder initialValue() {
return Charset.forName("UTF-8").newEncoder()
.onMalformedInput(CodingErrorAction.REPLACE)
.onUnmappableCharacter(CodingErrorAction.REPLACE);
}
};
private static ThreadLocal<CharsetDecoder> DECODER_FACTORY =
new ThreadLocal<CharsetDecoder>() {
@Override
protected CharsetDecoder initialValue() {
return Charset.forName("UTF-8").newDecoder()
.onMalformedInput(CodingErrorAction.REPLACE)
.onUnmappableCharacter(CodingErrorAction.REPLACE);
}
};
private Map<String, String> headers;
private byte[] body;
private FlumeEvent() {
this(null, null);
}
FlumeEvent(Map<String, String> headers, byte[] body) {
this.headers = headers;
this.body = body;
}
@Override
public Map<String, String> getHeaders() {
return headers;
}
@Override
public void setHeaders(Map<String, String> headers) {
this.headers = headers;
}
@Override
public byte[] getBody() {
return body;
}
@Override
public void setBody(byte[] body) {
this.body = body;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeByte(0);
Map<String,String> writeHeaders = getHeaders();
if (null != writeHeaders) {
out.writeInt(headers.size());
CharsetEncoder encoder = ENCODER_FACTORY.get();
for (String key : headers.keySet()) {
out.writeByte(EVENT_MAP_TEXT_WRITABLE_ID);
ByteBuffer keyBytes = encoder.encode(CharBuffer.wrap(key.toCharArray()));
int keyLength = keyBytes.limit();
WritableUtils.writeVInt(out, keyLength);
out.write(keyBytes.array(), 0, keyLength);
String value = headers.get(key);
out.write(EVENT_MAP_TEXT_WRITABLE_ID);
ByteBuffer valueBytes = encoder.encode(CharBuffer.wrap(value.toCharArray()));
int valueLength = valueBytes.limit();
WritableUtils.writeVInt(out, valueLength );
out.write(valueBytes.array(), 0, valueLength);
}
} else {
out.writeInt( 0 );
}
byte[] body = getBody();
if (body == null) {
out.writeInt(-1);
} else {
out.writeInt(body.length);
out.write(body);
}
}
@Override
public void readFields(DataInput in) throws IOException {
// newClasses from AbstractMapWritable in Hadoop Common
byte newClasses = in.readByte();
// skip over newClasses since only Text is used
for (byte i = 0; i < newClasses; i++) {
in.readByte();
in.readUTF();
}
Map<String,String> newHeaders = new HashMap<String,String>();
int numEntries = in.readInt();
CharsetDecoder decoder = DECODER_FACTORY.get();
for (int i = 0; i < numEntries; i++) {
byte keyClassId = in.readByte();
assert (keyClassId == EVENT_MAP_TEXT_WRITABLE_ID);
int keyLength = WritableUtils.readVInt(in);
byte[] keyBytes = new byte[ keyLength ];
in.readFully( keyBytes, 0, keyLength );
String key = decoder.decode( ByteBuffer.wrap(keyBytes) ).toString();
byte valueClassId = in.readByte();
assert (valueClassId == EVENT_MAP_TEXT_WRITABLE_ID);
int valueLength = WritableUtils.readVInt(in);
byte[] valueBytes = new byte[ valueLength ];
in.readFully(valueBytes, 0, valueLength);
String value = decoder.decode(ByteBuffer.wrap(valueBytes)).toString();
newHeaders.put(key, value);
}
setHeaders(newHeaders);
byte[] body = null;
int bodyLength = in.readInt();
if (bodyLength != -1) {
body = new byte[bodyLength];
in.readFully(body);
}
setBody(body);
}
static FlumeEvent from(DataInput in) throws IOException {
FlumeEvent event = new FlumeEvent();
event.readFields(in);
return event;
}
}
| 9,724 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/CheckpointRebuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.base.Preconditions;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.SetMultimap;
import com.google.common.collect.Sets;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.flume.channel.file.instrumentation.FileChannelCounter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.EOFException;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.Set;
public class CheckpointRebuilder {
private final List<File> logFiles;
private final FlumeEventQueue queue;
private final Set<ComparableFlumeEventPointer> committedPuts = Sets.newHashSet();
private final Set<ComparableFlumeEventPointer> pendingTakes = Sets.newHashSet();
private final SetMultimap<Long, ComparableFlumeEventPointer> uncommittedPuts =
HashMultimap.create();
private final SetMultimap<Long, ComparableFlumeEventPointer>
uncommittedTakes = HashMultimap.create();
private final boolean fsyncPerTransaction;
private static Logger LOG = LoggerFactory.getLogger(CheckpointRebuilder.class);
public CheckpointRebuilder(List<File> logFiles, FlumeEventQueue queue,
boolean fsyncPerTransaction) throws IOException {
this.logFiles = logFiles;
this.queue = queue;
this.fsyncPerTransaction = fsyncPerTransaction;
}
public boolean rebuild() throws IOException, Exception {
LOG.info("Attempting to fast replay the log files.");
List<LogFile.SequentialReader> logReaders = Lists.newArrayList();
for (File logFile : logFiles) {
try {
logReaders.add(LogFileFactory.getSequentialReader(logFile, null,
fsyncPerTransaction));
} catch (EOFException e) {
LOG.warn("Ignoring " + logFile + " due to EOF", e);
}
}
long transactionIDSeed = 0;
long writeOrderIDSeed = 0;
try {
for (LogFile.SequentialReader log : logReaders) {
LogRecord entry;
int fileID = log.getLogFileID();
while ((entry = log.next()) != null) {
int offset = entry.getOffset();
TransactionEventRecord record = entry.getEvent();
long trans = record.getTransactionID();
long writeOrderID = record.getLogWriteOrderID();
transactionIDSeed = Math.max(trans, transactionIDSeed);
writeOrderIDSeed = Math.max(writeOrderID, writeOrderIDSeed);
if (record.getRecordType() == TransactionEventRecord.Type.PUT.get()) {
uncommittedPuts.put(record.getTransactionID(),
new ComparableFlumeEventPointer(
new FlumeEventPointer(fileID, offset),
record.getLogWriteOrderID()));
} else if (record.getRecordType() == TransactionEventRecord.Type.TAKE.get()) {
Take take = (Take) record;
uncommittedTakes.put(record.getTransactionID(),
new ComparableFlumeEventPointer(
new FlumeEventPointer(take.getFileID(), take.getOffset()),
record.getLogWriteOrderID()));
} else if (record.getRecordType() == TransactionEventRecord.Type.COMMIT.get()) {
Commit commit = (Commit) record;
if (commit.getType() == TransactionEventRecord.Type.PUT.get()) {
Set<ComparableFlumeEventPointer> puts =
uncommittedPuts.get(record.getTransactionID());
if (puts != null) {
for (ComparableFlumeEventPointer put : puts) {
if (!pendingTakes.remove(put)) {
committedPuts.add(put);
}
}
}
} else {
Set<ComparableFlumeEventPointer> takes =
uncommittedTakes.get(record.getTransactionID());
if (takes != null) {
for (ComparableFlumeEventPointer take : takes) {
if (!committedPuts.remove(take)) {
pendingTakes.add(take);
}
}
}
}
} else if (record.getRecordType() == TransactionEventRecord.Type.ROLLBACK.get()) {
if (uncommittedPuts.containsKey(record.getTransactionID())) {
uncommittedPuts.removeAll(record.getTransactionID());
} else {
uncommittedTakes.removeAll(record.getTransactionID());
}
}
}
}
} catch (Exception e) {
LOG.warn("Error while generating checkpoint using fast generation logic", e);
return false;
} finally {
TransactionIDOracle.setSeed(transactionIDSeed);
WriteOrderOracle.setSeed(writeOrderIDSeed);
for (LogFile.SequentialReader reader : logReaders) {
reader.close();
}
}
Set<ComparableFlumeEventPointer> sortedPuts = Sets.newTreeSet(committedPuts);
int count = 0;
for (ComparableFlumeEventPointer put : sortedPuts) {
queue.addTail(put.pointer);
count++;
}
LOG.info("Replayed {} events using fast replay logic.", count);
return true;
}
private void writeCheckpoint() throws IOException {
long checkpointLogOrderID = 0;
List<LogFile.MetaDataWriter> metaDataWriters = Lists.newArrayList();
for (File logFile : logFiles) {
String name = logFile.getName();
metaDataWriters.add(LogFileFactory.getMetaDataWriter(logFile,
Integer.parseInt(name.substring(name.lastIndexOf('-') + 1))));
}
try {
if (queue.checkpoint(true)) {
checkpointLogOrderID = queue.getLogWriteOrderID();
for (LogFile.MetaDataWriter metaDataWriter : metaDataWriters) {
metaDataWriter.markCheckpoint(checkpointLogOrderID);
}
}
} catch (Exception e) {
LOG.warn("Error while generating checkpoint using fast generation logic", e);
} finally {
for (LogFile.MetaDataWriter metaDataWriter : metaDataWriters) {
metaDataWriter.close();
}
}
}
private final class ComparableFlumeEventPointer
implements Comparable<ComparableFlumeEventPointer> {
private final FlumeEventPointer pointer;
private final long orderID;
public ComparableFlumeEventPointer(FlumeEventPointer pointer, long orderID) {
Preconditions.checkNotNull(pointer, "FlumeEventPointer cannot be"
+ "null while creating a ComparableFlumeEventPointer");
this.pointer = pointer;
this.orderID = orderID;
}
@Override
public int compareTo(ComparableFlumeEventPointer o) {
if (orderID < o.orderID) {
return -1;
} else { //Unfortunately same log order id does not mean same event
//for older logs.
return 1;
}
}
@Override
public int hashCode() {
return pointer.hashCode();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null) {
return false;
}
if (o.getClass() != this.getClass()) {
return false;
}
return pointer.equals(((ComparableFlumeEventPointer) o).pointer);
}
}
public static void main(String[] args) throws Exception {
Options options = new Options();
Option opt = new Option("c", true, "checkpoint directory");
opt.setRequired(true);
options.addOption(opt);
opt = new Option("l", true, "comma-separated list of log directories");
opt.setRequired(true);
options.addOption(opt);
options.addOption(opt);
opt = new Option("t", true, "capacity of the channel");
opt.setRequired(true);
options.addOption(opt);
CommandLineParser parser = new GnuParser();
CommandLine cli = parser.parse(options, args);
File checkpointDir = new File(cli.getOptionValue("c"));
String[] logDirs = cli.getOptionValue("l").split(",");
List<File> logFiles = Lists.newArrayList();
for (String logDir : logDirs) {
logFiles.addAll(LogUtils.getLogs(new File(logDir)));
}
int capacity = Integer.parseInt(cli.getOptionValue("t"));
File checkpointFile = new File(checkpointDir, "checkpoint");
if (checkpointFile.exists()) {
LOG.error("Cannot execute fast replay",
new IllegalStateException("Checkpoint exists" + checkpointFile));
} else {
EventQueueBackingStore backingStore =
EventQueueBackingStoreFactory.get(checkpointFile,
capacity, "channel", new FileChannelCounter("Main"));
FlumeEventQueue queue = new FlumeEventQueue(backingStore,
new File(checkpointDir, "inflighttakes"),
new File(checkpointDir, "inflightputs"),
new File(checkpointDir, Log.QUEUE_SET));
CheckpointRebuilder rebuilder = new CheckpointRebuilder(logFiles, queue, true);
if (rebuilder.rebuild()) {
rebuilder.writeCheckpoint();
} else {
LOG.error("Could not rebuild the checkpoint due to errors.");
}
}
}
}
| 9,725 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/EventQueueBackingStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.io.IOException;
import com.google.common.collect.ImmutableSortedSet;
abstract class EventQueueBackingStore {
protected static final int EMPTY = 0;
private int queueSize;
private int queueHead;
private long logWriteOrderID;
private final int capacity;
private final String name;
public static final String BACKUP_COMPLETE_FILENAME = "backupComplete";
protected Boolean slowdownBackup = false;
protected EventQueueBackingStore(int capacity, String name) {
this.capacity = capacity;
this.name = name;
}
abstract void beginCheckpoint() throws IOException;
abstract void checkpoint() throws IOException;
abstract void incrementFileID(int fileID);
abstract void decrementFileID(int fileID);
abstract ImmutableSortedSet<Integer> getReferenceCounts();
abstract long get(int index);
abstract void put(int index, long value);
abstract boolean syncRequired();
abstract void close() throws IOException;
protected abstract int getVersion();
int getSize() {
return queueSize;
}
void setSize(int size) {
queueSize = size;
}
int getHead() {
return queueHead;
}
void setHead(int head) {
queueHead = head;
}
int getCapacity() {
return capacity;
}
String getName() {
return name;
}
protected void setLogWriteOrderID(long logWriteOrderID) {
this.logWriteOrderID = logWriteOrderID;
}
long getLogWriteOrderID() {
return logWriteOrderID;
}
}
| 9,726 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/CorruptEventException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
public class CorruptEventException extends Exception {
private static final long serialVersionUID = -2986946303540798416L;
public CorruptEventException() {
super();
}
public CorruptEventException(String msg) {
super(msg);
}
public CorruptEventException(String msg, Throwable th) {
super(msg, th);
}
}
| 9,727 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/EventUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import org.apache.flume.Event;
/**
*
*/
public class EventUtils {
/**
* Returns the Event encapsulated by a Put wrapper
*
* @param transactionEventRecord TransactionEvent
* @return Event if Put instance is present, null otherwise
*/
public static Event getEventFromTransactionEvent(TransactionEventRecord transactionEventRecord) {
if (transactionEventRecord instanceof Put) {
return ((Put)transactionEventRecord).getEvent();
}
return null;
}
}
| 9,728 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/ReplayHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.SetMultimap;
import com.google.common.collect.Sets;
import org.apache.commons.collections.MultiMap;
import org.apache.commons.collections.map.MultiValueMap;
import org.apache.flume.channel.file.encryption.KeyProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import java.io.EOFException;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.Set;
/**
* Processes a set of data logs, replaying said logs into the queue.
*/
class ReplayHandler {
private static final Logger LOG = LoggerFactory
.getLogger(ReplayHandler.class);
private final FlumeEventQueue queue;
private final long lastCheckpoint;
private final Map<Integer, LogFile.SequentialReader> readers;
private final PriorityQueue<LogRecord> logRecordBuffer;
private final KeyProvider encryptionKeyProvider;
private final boolean fsyncPerTransaction;
/**
* This data structure stores takes for which we found a commit in the log
* files before we found a commit for the put. This can happen if the channel
* is configured for multiple directories.
*
* Consider the following:
*
* logdir1, logdir2
*
* Put goes to logdir2 Commit of Put goes to logdir2 Take goes to logdir1
* Commit of Take goes to logdir1
*
* When replaying we will start with log1 and find the take and commit before
* finding the put and commit in logdir2.
*/
private final List<Long> pendingTakes;
int readCount = 0;
int putCount = 0;
int takeCount = 0;
int rollbackCount = 0;
int commitCount = 0;
int skipCount = 0;
@VisibleForTesting
public int getReadCount() {
return readCount;
}
@VisibleForTesting
public int getPutCount() {
return putCount;
}
@VisibleForTesting
public int getTakeCount() {
return takeCount;
}
@VisibleForTesting
public int getCommitCount() {
return commitCount;
}
@VisibleForTesting
public int getRollbackCount() {
return rollbackCount;
}
ReplayHandler(FlumeEventQueue queue,
@Nullable KeyProvider encryptionKeyProvider,
boolean fsyncPerTransaction) {
this.queue = queue;
this.lastCheckpoint = queue.getLogWriteOrderID();
pendingTakes = Lists.newArrayList();
readers = Maps.newHashMap();
logRecordBuffer = new PriorityQueue<LogRecord>();
this.encryptionKeyProvider = encryptionKeyProvider;
this.fsyncPerTransaction = fsyncPerTransaction;
}
/**
* Replay logic from Flume1.2 which can be activated if the v2 logic
* is failing on ol logs for some reason.
*/
@Deprecated
void replayLogv1(List<File> logs) throws Exception {
int total = 0;
int count = 0;
MultiMap transactionMap = new MultiValueMap();
//Read inflight puts to see if they were committed
SetMultimap<Long, Long> inflightPuts = queue.deserializeInflightPuts();
for (Long txnID : inflightPuts.keySet()) {
Set<Long> eventPointers = inflightPuts.get(txnID);
for (Long eventPointer : eventPointers) {
transactionMap.put(txnID, FlumeEventPointer.fromLong(eventPointer));
}
}
SetMultimap<Long, Long> inflightTakes = queue.deserializeInflightTakes();
LOG.info("Starting replay of " + logs);
for (File log : logs) {
LOG.info("Replaying " + log);
LogFile.SequentialReader reader = null;
try {
reader = LogFileFactory.getSequentialReader(log,
encryptionKeyProvider, fsyncPerTransaction);
reader.skipToLastCheckpointPosition(queue.getLogWriteOrderID());
LogRecord entry;
FlumeEventPointer ptr;
// for puts the fileId is the fileID of the file they exist in
// for takes the fileId and offset are pointers to a put
int fileId = reader.getLogFileID();
while ((entry = reader.next()) != null) {
int offset = entry.getOffset();
TransactionEventRecord record = entry.getEvent();
short type = record.getRecordType();
long trans = record.getTransactionID();
readCount++;
if (record.getLogWriteOrderID() > lastCheckpoint) {
if (type == TransactionEventRecord.Type.PUT.get()) {
putCount++;
ptr = new FlumeEventPointer(fileId, offset);
transactionMap.put(trans, ptr);
} else if (type == TransactionEventRecord.Type.TAKE.get()) {
takeCount++;
Take take = (Take) record;
ptr = new FlumeEventPointer(take.getFileID(), take.getOffset());
transactionMap.put(trans, ptr);
} else if (type == TransactionEventRecord.Type.ROLLBACK.get()) {
rollbackCount++;
transactionMap.remove(trans);
} else if (type == TransactionEventRecord.Type.COMMIT.get()) {
commitCount++;
@SuppressWarnings("unchecked")
Collection<FlumeEventPointer> pointers =
(Collection<FlumeEventPointer>) transactionMap.remove(trans);
if (((Commit) record).getType() == TransactionEventRecord.Type.TAKE.get()) {
if (inflightTakes.containsKey(trans)) {
if (pointers == null) {
pointers = Sets.newHashSet();
}
Set<Long> takes = inflightTakes.removeAll(trans);
Iterator<Long> it = takes.iterator();
while (it.hasNext()) {
Long take = it.next();
pointers.add(FlumeEventPointer.fromLong(take));
}
}
}
if (pointers != null && pointers.size() > 0) {
processCommit(((Commit) record).getType(), pointers);
count += pointers.size();
}
} else {
Preconditions.checkArgument(false,
"Unknown record type: " + Integer.toHexString(type));
}
} else {
skipCount++;
}
}
LOG.info("Replayed " + count + " from " + log);
if (LOG.isDebugEnabled()) {
LOG.debug("read: " + readCount + ", put: " + putCount + ", take: "
+ takeCount + ", rollback: " + rollbackCount + ", commit: "
+ commitCount + ", skipp: " + skipCount);
}
} catch (EOFException e) {
LOG.warn("Hit EOF on " + log);
} finally {
total += count;
count = 0;
if (reader != null) {
reader.close();
}
}
}
//re-insert the events in the take map,
//since the takes were not committed.
int uncommittedTakes = 0;
for (Long inflightTxnId : inflightTakes.keySet()) {
Set<Long> inflightUncommittedTakes =
inflightTakes.get(inflightTxnId);
for (Long inflightUncommittedTake : inflightUncommittedTakes) {
queue.addHead(FlumeEventPointer.fromLong(inflightUncommittedTake));
uncommittedTakes++;
}
}
inflightTakes.clear();
count += uncommittedTakes;
int pendingTakesSize = pendingTakes.size();
if (pendingTakesSize > 0) {
String msg = "Pending takes " + pendingTakesSize
+ " exist after the end of replay";
if (LOG.isDebugEnabled()) {
for (Long pointer : pendingTakes) {
LOG.debug("Pending take " + FlumeEventPointer.fromLong(pointer));
}
} else {
LOG.error(msg + ". Duplicate messages will exist in destination.");
}
}
LOG.info("Replayed " + total);
}
/**
* Replay logs in order records were written
* @param logs
* @throws IOException
*/
void replayLog(List<File> logs) throws Exception {
int count = 0;
MultiMap transactionMap = new MultiValueMap();
// seed both with the highest known sequence of either the tnxid or woid
long transactionIDSeed = lastCheckpoint, writeOrderIDSeed = lastCheckpoint;
LOG.info("Starting replay of " + logs);
//Load the inflight puts into the transaction map to see if they were
//committed in one of the logs.
SetMultimap<Long, Long> inflightPuts = queue.deserializeInflightPuts();
for (Long txnID : inflightPuts.keySet()) {
Set<Long> eventPointers = inflightPuts.get(txnID);
for (Long eventPointer : eventPointers) {
transactionMap.put(txnID, FlumeEventPointer.fromLong(eventPointer));
}
}
SetMultimap<Long, Long> inflightTakes = queue.deserializeInflightTakes();
try {
for (File log : logs) {
LOG.info("Replaying " + log);
try {
LogFile.SequentialReader reader =
LogFileFactory.getSequentialReader(log, encryptionKeyProvider, fsyncPerTransaction);
reader.skipToLastCheckpointPosition(queue.getLogWriteOrderID());
Preconditions.checkState(!readers.containsKey(reader.getLogFileID()),
"Readers " + readers + " already contains "
+ reader.getLogFileID());
readers.put(reader.getLogFileID(), reader);
LogRecord logRecord = reader.next();
if (logRecord == null) {
readers.remove(reader.getLogFileID());
reader.close();
} else {
logRecordBuffer.add(logRecord);
}
} catch (EOFException e) {
LOG.warn("Ignoring " + log + " due to EOF", e);
}
}
LogRecord entry = null;
FlumeEventPointer ptr = null;
while ((entry = next()) != null) {
// for puts the fileId is the fileID of the file they exist in
// for takes the fileId and offset are pointers to a put
int fileId = entry.getFileID();
int offset = entry.getOffset();
TransactionEventRecord record = entry.getEvent();
short type = record.getRecordType();
long trans = record.getTransactionID();
transactionIDSeed = Math.max(transactionIDSeed, trans);
writeOrderIDSeed = Math.max(writeOrderIDSeed,
record.getLogWriteOrderID());
readCount++;
if (readCount % 10000 == 0 && readCount > 0) {
LOG.info("read: " + readCount + ", put: " + putCount + ", take: "
+ takeCount + ", rollback: " + rollbackCount + ", commit: "
+ commitCount + ", skip: " + skipCount + ", eventCount:" + count);
}
if (record.getLogWriteOrderID() > lastCheckpoint) {
if (type == TransactionEventRecord.Type.PUT.get()) {
putCount++;
ptr = new FlumeEventPointer(fileId, offset);
transactionMap.put(trans, ptr);
} else if (type == TransactionEventRecord.Type.TAKE.get()) {
takeCount++;
Take take = (Take) record;
ptr = new FlumeEventPointer(take.getFileID(), take.getOffset());
transactionMap.put(trans, ptr);
} else if (type == TransactionEventRecord.Type.ROLLBACK.get()) {
rollbackCount++;
transactionMap.remove(trans);
} else if (type == TransactionEventRecord.Type.COMMIT.get()) {
commitCount++;
@SuppressWarnings("unchecked")
Collection<FlumeEventPointer> pointers =
(Collection<FlumeEventPointer>) transactionMap.remove(trans);
if (((Commit) record).getType()
== TransactionEventRecord.Type.TAKE.get()) {
if (inflightTakes.containsKey(trans)) {
if (pointers == null) {
pointers = Sets.newHashSet();
}
Set<Long> takes = inflightTakes.removeAll(trans);
Iterator<Long> it = takes.iterator();
while (it.hasNext()) {
Long take = it.next();
pointers.add(FlumeEventPointer.fromLong(take));
}
}
}
if (pointers != null && pointers.size() > 0) {
processCommit(((Commit) record).getType(), pointers);
count += pointers.size();
}
} else {
Preconditions.checkArgument(false, "Unknown record type: "
+ Integer.toHexString(type));
}
} else {
skipCount++;
}
}
LOG.info("read: " + readCount + ", put: " + putCount + ", take: "
+ takeCount + ", rollback: " + rollbackCount + ", commit: "
+ commitCount + ", skip: " + skipCount + ", eventCount:" + count);
queue.replayComplete();
} finally {
TransactionIDOracle.setSeed(transactionIDSeed);
WriteOrderOracle.setSeed(writeOrderIDSeed);
for (LogFile.SequentialReader reader : readers.values()) {
if (reader != null) {
reader.close();
}
}
}
//re-insert the events in the take map,
//since the takes were not committed.
int uncommittedTakes = 0;
for (Long inflightTxnId : inflightTakes.keySet()) {
Set<Long> inflightUncommittedTakes =
inflightTakes.get(inflightTxnId);
for (Long inflightUncommittedTake : inflightUncommittedTakes) {
queue.addHead(FlumeEventPointer.fromLong(inflightUncommittedTake));
uncommittedTakes++;
}
}
inflightTakes.clear();
count += uncommittedTakes;
int pendingTakesSize = pendingTakes.size();
if (pendingTakesSize > 0) {
LOG.info("Pending takes " + pendingTakesSize + " exist after the" +
" end of replay. Duplicate messages will exist in" +
" destination.");
}
}
private LogRecord next() throws IOException, CorruptEventException {
LogRecord resultLogRecord = logRecordBuffer.poll();
if (resultLogRecord != null) {
// there is more log records to read
LogFile.SequentialReader reader = readers.get(resultLogRecord.getFileID());
LogRecord nextLogRecord;
if ((nextLogRecord = reader.next()) != null) {
logRecordBuffer.add(nextLogRecord);
}
}
return resultLogRecord;
}
private void processCommit(short type, Collection<FlumeEventPointer> pointers) {
if (type == TransactionEventRecord.Type.PUT.get()) {
for (FlumeEventPointer pointer : pointers) {
if (!queue.addTail(pointer)) {
throw new IllegalStateException("Unable to add "
+ pointer + ". Queue depth = " + queue.getSize()
+ ", Capacity = " + queue.getCapacity());
}
if (pendingTakes.remove(pointer.toLong())) {
Preconditions.checkState(queue.remove(pointer),
"Take was pending and pointer was successfully added to the"
+ " queue but could not be removed: " + pointer);
}
}
} else if (type == TransactionEventRecord.Type.TAKE.get()) {
for (FlumeEventPointer pointer : pointers) {
boolean removed = queue.remove(pointer);
if (!removed) {
pendingTakes.add(pointer.toLong());
}
}
} else {
Preconditions.checkArgument(false,
"Unknown record type: " + Integer.toHexString(type));
}
}
}
| 9,729 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/Commit.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.flume.channel.file.proto.ProtosFactory;
import com.google.common.base.Preconditions;
/**
* Represents a Commit on disk
*/
class Commit extends TransactionEventRecord {
/**
* Type of Commit Take|Put
*/
private short type;
Commit(Long transactionID, Long logWriteOrderID) {
super(transactionID, logWriteOrderID);
}
Commit(Long transactionID, Long logWriteOrderID, short type) {
this(transactionID, logWriteOrderID);
this.type = type;
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
type = in.readShort();
}
@Override
void writeProtos(OutputStream out) throws IOException {
ProtosFactory.Commit.Builder commitBuilder = ProtosFactory.Commit.newBuilder();
commitBuilder.setType(type);
commitBuilder.build().writeDelimitedTo(out);
}
@Override
void readProtos(InputStream in) throws IOException {
ProtosFactory.Commit commit =
Preconditions.checkNotNull(ProtosFactory.Commit.parseDelimitedFrom(in),
"Commit cannot be null");
type = (short) commit.getType();
}
short getType() {
return type;
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
out.writeShort(type);
}
@Override
short getRecordType() {
return Type.COMMIT.get();
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("Commit [type=");
builder.append(type);
builder.append(", getLogWriteOrderID()=");
builder.append(getLogWriteOrderID());
builder.append(", getTransactionID()=");
builder.append(getTransactionID());
builder.append("]");
return builder.toString();
}
}
| 9,730 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/Writable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* Defines methods for reading from or writing to streams <p>
*
* Based on org.apache.hadoop.io.Writable
*/
interface Writable {
/**
* Serialize the fields of this object to <code>out</code>
*
* @param out <code>DataOutput</code> to serialize this object into.
* @throws IOException
*/
public void write(DataOutput out) throws IOException;
/**
* Deserialize the fields of this object from <code>in</code>
*
* @param in <code>DataInput</code> to deserialize this object from.
* @throws IOException
*/
public void readFields(DataInput in) throws IOException;
}
| 9,731 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/FileChannel.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.Iterables;
import org.apache.flume.Channel;
import org.apache.flume.ChannelException;
import org.apache.flume.ChannelFullException;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.annotations.Disposable;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.apache.flume.channel.BasicChannelSemantics;
import org.apache.flume.channel.BasicTransactionSemantics;
import org.apache.flume.channel.file.Log.Builder;
import org.apache.flume.channel.file.encryption.EncryptionConfiguration;
import org.apache.flume.channel.file.encryption.KeyProvider;
import org.apache.flume.channel.file.encryption.KeyProviderFactory;
import org.apache.flume.channel.file.instrumentation.FileChannelCounter;
import org.apache.flume.conf.TransactionCapacitySupported;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
/**
* <p>
* A durable {@link Channel} implementation that uses the local file system for
* its storage.
* </p>
* <p>
* FileChannel works by writing all transactions to a set of directories
* specified in the configuration. Additionally, when a commit occurs
* the transaction is synced to disk.
* </p>
* <p>
* FileChannel is marked
* {@link org.apache.flume.annotations.InterfaceAudience.Private} because it
* should only be instantiated via a configuration. For example, users should
* certainly use FileChannel but not by instantiating FileChannel objects.
* Meaning the label Private applies to user-developers not user-operators.
* In cases where a Channel is required by instantiated by user-developers
* {@link org.apache.flume.channel.MemoryChannel} should be used.
* </p>
*/
@InterfaceAudience.Private
@InterfaceStability.Stable
@Disposable
public class FileChannel extends BasicChannelSemantics implements TransactionCapacitySupported {
private static final Logger LOG = LoggerFactory.getLogger(FileChannel.class);
private Integer capacity = 0;
private int keepAlive;
protected Integer transactionCapacity = 0;
private Long checkpointInterval = 0L;
private long maxFileSize;
private long minimumRequiredSpace;
private File checkpointDir;
private File backupCheckpointDir;
private File[] dataDirs;
private Log log;
private volatile boolean open;
private volatile Throwable startupError;
private Semaphore queueRemaining;
private final ThreadLocal<FileBackedTransaction> transactions =
new ThreadLocal<FileBackedTransaction>();
private String channelNameDescriptor = "[channel=unknown]";
private FileChannelCounter channelCounter;
private boolean useLogReplayV1;
private boolean useFastReplay = false;
private KeyProvider encryptionKeyProvider;
private String encryptionActiveKey;
private String encryptionCipherProvider;
private boolean useDualCheckpoints;
private boolean compressBackupCheckpoint;
private boolean fsyncPerTransaction;
private int fsyncInterval;
private boolean checkpointOnClose = true;
@Override
public synchronized void setName(String name) {
channelNameDescriptor = "[channel=" + name + "]";
super.setName(name);
}
@Override
public void configure(Context context) {
useDualCheckpoints = context.getBoolean(
FileChannelConfiguration.USE_DUAL_CHECKPOINTS,
FileChannelConfiguration.DEFAULT_USE_DUAL_CHECKPOINTS);
compressBackupCheckpoint = context.getBoolean(
FileChannelConfiguration.COMPRESS_BACKUP_CHECKPOINT,
FileChannelConfiguration.DEFAULT_COMPRESS_BACKUP_CHECKPOINT);
String homePath = System.getProperty("user.home").replace('\\', '/');
String strCheckpointDir =
context.getString(FileChannelConfiguration.CHECKPOINT_DIR,
homePath + "/.flume/file-channel/checkpoint").trim();
String strBackupCheckpointDir =
context.getString(FileChannelConfiguration.BACKUP_CHECKPOINT_DIR, "").trim();
String[] strDataDirs = Iterables.toArray(
Splitter.on(",").trimResults().omitEmptyStrings().split(
context.getString(FileChannelConfiguration.DATA_DIRS,
homePath + "/.flume/file-channel/data")), String.class);
checkpointDir = new File(strCheckpointDir);
if (useDualCheckpoints) {
Preconditions.checkState(!strBackupCheckpointDir.isEmpty(),
"Dual checkpointing is enabled, but the backup directory is not set. " +
"Please set " + FileChannelConfiguration.BACKUP_CHECKPOINT_DIR + " " +
"to enable dual checkpointing");
backupCheckpointDir = new File(strBackupCheckpointDir);
/*
* If the backup directory is the same as the checkpoint directory,
* then throw an exception and force the config system to ignore this
* channel.
*/
Preconditions.checkState(!backupCheckpointDir.equals(checkpointDir),
"Could not configure " + getName() + ". The checkpoint backup " +
"directory and the checkpoint directory are " +
"configured to be the same.");
}
dataDirs = new File[strDataDirs.length];
for (int i = 0; i < strDataDirs.length; i++) {
dataDirs[i] = new File(strDataDirs[i]);
}
capacity = context.getInteger(FileChannelConfiguration.CAPACITY,
FileChannelConfiguration.DEFAULT_CAPACITY);
if (capacity <= 0) {
capacity = FileChannelConfiguration.DEFAULT_CAPACITY;
LOG.warn("Invalid capacity specified, initializing channel to "
+ "default capacity of {}", capacity);
}
keepAlive =
context.getInteger(FileChannelConfiguration.KEEP_ALIVE,
FileChannelConfiguration.DEFAULT_KEEP_ALIVE);
transactionCapacity =
context.getInteger(FileChannelConfiguration.TRANSACTION_CAPACITY,
FileChannelConfiguration.DEFAULT_TRANSACTION_CAPACITY);
if (transactionCapacity <= 0) {
transactionCapacity =
FileChannelConfiguration.DEFAULT_TRANSACTION_CAPACITY;
LOG.warn("Invalid transaction capacity specified, " +
"initializing channel to default " +
"capacity of {}", transactionCapacity);
}
Preconditions.checkState(transactionCapacity <= capacity,
"File Channel transaction capacity cannot be greater than the " +
"capacity of the channel.");
checkpointInterval =
context.getLong(FileChannelConfiguration.CHECKPOINT_INTERVAL,
FileChannelConfiguration.DEFAULT_CHECKPOINT_INTERVAL);
if (checkpointInterval <= 0) {
LOG.warn("Checkpoint interval is invalid: " + checkpointInterval
+ ", using default: "
+ FileChannelConfiguration.DEFAULT_CHECKPOINT_INTERVAL);
checkpointInterval =
FileChannelConfiguration.DEFAULT_CHECKPOINT_INTERVAL;
}
// cannot be over FileChannelConfiguration.DEFAULT_MAX_FILE_SIZE
maxFileSize = Math.min(
context.getLong(FileChannelConfiguration.MAX_FILE_SIZE,
FileChannelConfiguration.DEFAULT_MAX_FILE_SIZE),
FileChannelConfiguration.DEFAULT_MAX_FILE_SIZE);
minimumRequiredSpace = Math.max(
context.getLong(FileChannelConfiguration.MINIMUM_REQUIRED_SPACE,
FileChannelConfiguration.DEFAULT_MINIMUM_REQUIRED_SPACE),
FileChannelConfiguration.FLOOR_MINIMUM_REQUIRED_SPACE);
useLogReplayV1 = context.getBoolean(
FileChannelConfiguration.USE_LOG_REPLAY_V1,
FileChannelConfiguration.DEFAULT_USE_LOG_REPLAY_V1);
useFastReplay = context.getBoolean(
FileChannelConfiguration.USE_FAST_REPLAY,
FileChannelConfiguration.DEFAULT_USE_FAST_REPLAY);
Context encryptionContext = new Context(
context.getSubProperties(EncryptionConfiguration.ENCRYPTION_PREFIX +
"."));
String encryptionKeyProviderName = encryptionContext.getString(
EncryptionConfiguration.KEY_PROVIDER);
encryptionActiveKey = encryptionContext.getString(
EncryptionConfiguration.ACTIVE_KEY);
encryptionCipherProvider = encryptionContext.getString(
EncryptionConfiguration.CIPHER_PROVIDER);
if (encryptionKeyProviderName != null) {
Preconditions.checkState(!Strings.isNullOrEmpty(encryptionActiveKey),
"Encryption configuration problem: " +
EncryptionConfiguration.ACTIVE_KEY + " is missing");
Preconditions.checkState(!Strings.isNullOrEmpty(encryptionCipherProvider),
"Encryption configuration problem: " +
EncryptionConfiguration.CIPHER_PROVIDER + " is missing");
Context keyProviderContext = new Context(
encryptionContext.getSubProperties(EncryptionConfiguration.KEY_PROVIDER + "."));
encryptionKeyProvider = KeyProviderFactory.getInstance(
encryptionKeyProviderName, keyProviderContext);
} else {
Preconditions.checkState(encryptionActiveKey == null,
"Encryption configuration problem: " +
EncryptionConfiguration.ACTIVE_KEY + " is present while key " +
"provider name is not.");
Preconditions.checkState(encryptionCipherProvider == null,
"Encryption configuration problem: " +
EncryptionConfiguration.CIPHER_PROVIDER + " is present while " +
"key provider name is not.");
}
fsyncPerTransaction = context.getBoolean(FileChannelConfiguration
.FSYNC_PER_TXN, FileChannelConfiguration.DEFAULT_FSYNC_PRE_TXN);
fsyncInterval = context.getInteger(FileChannelConfiguration
.FSYNC_INTERVAL, FileChannelConfiguration.DEFAULT_FSYNC_INTERVAL);
checkpointOnClose = context.getBoolean(FileChannelConfiguration
.CHKPT_ONCLOSE, FileChannelConfiguration.DEFAULT_CHKPT_ONCLOSE);
if (queueRemaining == null) {
queueRemaining = new Semaphore(capacity, true);
}
if (log != null) {
log.setCheckpointInterval(checkpointInterval);
log.setMaxFileSize(maxFileSize);
}
if (channelCounter == null) {
channelCounter = new FileChannelCounter(getName());
}
channelCounter.setUnhealthy(0);
}
@Override
public synchronized void start() {
LOG.info("Starting {}...", this);
channelCounter.start();
try {
Builder builder = createLogBuilder();
log = builder.build();
log.replay();
setOpen(true);
int depth = getDepth();
Preconditions.checkState(queueRemaining.tryAcquire(depth),
"Unable to acquire " + depth + " permits " + channelNameDescriptor);
LOG.info("Queue Size after replay: " + depth + " "
+ channelNameDescriptor);
} catch (Throwable t) {
setOpen(false);
channelCounter.setUnhealthy(1);
startupError = t;
LOG.error("Failed to start the file channel " + channelNameDescriptor, t);
if (t instanceof Error) {
throw (Error) t;
}
}
if (open) {
channelCounter.setChannelSize(getDepth());
channelCounter.setChannelCapacity(capacity);
}
super.start();
}
@VisibleForTesting
Builder createLogBuilder() {
Builder builder = new Log.Builder();
builder.setCheckpointInterval(checkpointInterval);
builder.setMaxFileSize(maxFileSize);
builder.setMinimumRequiredSpace(minimumRequiredSpace);
builder.setQueueSize(capacity);
builder.setCheckpointDir(checkpointDir);
builder.setLogDirs(dataDirs);
builder.setChannelName(getName());
builder.setUseLogReplayV1(useLogReplayV1);
builder.setUseFastReplay(useFastReplay);
builder.setEncryptionKeyProvider(encryptionKeyProvider);
builder.setEncryptionKeyAlias(encryptionActiveKey);
builder.setEncryptionCipherProvider(encryptionCipherProvider);
builder.setUseDualCheckpoints(useDualCheckpoints);
builder.setCompressBackupCheckpoint(compressBackupCheckpoint);
builder.setBackupCheckpointDir(backupCheckpointDir);
builder.setFsyncPerTransaction(fsyncPerTransaction);
builder.setFsyncInterval(fsyncInterval);
builder.setCheckpointOnClose(checkpointOnClose);
builder.setChannelCounter(channelCounter);
return builder;
}
@Override
public synchronized void stop() {
LOG.info("Stopping {}...", this);
startupError = null;
int size = getDepth();
close();
if (!open) {
channelCounter.setChannelSize(size);
channelCounter.stop();
}
super.stop();
}
@Override
public String toString() {
return "FileChannel " + getName() + " { dataDirs: " +
Arrays.toString(dataDirs) + " }";
}
@Override
protected BasicTransactionSemantics createTransaction() {
if (!open) {
String msg = "Channel closed " + channelNameDescriptor;
if (startupError != null) {
msg += ". Due to " + startupError.getClass().getName() + ": " +
startupError.getMessage();
throw new IllegalStateException(msg, startupError);
}
throw new IllegalStateException(msg);
}
FileBackedTransaction trans = transactions.get();
if (trans != null && !trans.isClosed()) {
Preconditions.checkState(false,
"Thread has transaction which is still open: " +
trans.getStateAsString() + channelNameDescriptor);
}
trans = new FileBackedTransaction(log, TransactionIDOracle.next(),
transactionCapacity, keepAlive, queueRemaining, getName(),
fsyncPerTransaction, channelCounter);
transactions.set(trans);
return trans;
}
protected int getDepth() {
Preconditions.checkState(open, "Channel closed" + channelNameDescriptor);
Preconditions.checkNotNull(log, "log");
FlumeEventQueue queue = log.getFlumeEventQueue();
Preconditions.checkNotNull(queue, "queue");
return queue.getSize();
}
void close() {
if (open) {
setOpen(false);
try {
log.close();
} catch (Exception e) {
LOG.error("Error while trying to close the log.", e);
Throwables.propagate(e);
}
log = null;
queueRemaining = null;
}
}
@VisibleForTesting
boolean didFastReplay() {
return log.didFastReplay();
}
@VisibleForTesting
boolean didFullReplayDueToBadCheckpointException() {
return log.didFullReplayDueToBadCheckpointException();
}
public boolean isOpen() {
return open;
}
/**
* This method makes sure that <code>this.open</code> and <code>channelCounter.open</code>
* are in sync.
* Only for internal use, call from synchronized methods only. It also assumes that
* <code>channelCounter</code> is not null.
* @param open
*/
private void setOpen(boolean open) {
this.open = open;
channelCounter.setOpen(this.open);
}
/**
* Did this channel recover a backup of the checkpoint to restart?
*
* @return true if the channel recovered using a backup.
*/
@VisibleForTesting
boolean checkpointBackupRestored() {
if (log != null) {
return log.backupRestored();
}
return false;
}
@VisibleForTesting
Log getLog() {
return log;
}
@VisibleForTesting
FileChannelCounter getChannelCounter() {
return channelCounter;
}
@Override
public long getTransactionCapacity() {
return transactionCapacity;
}
/**
* Transaction backed by a file. This transaction supports either puts
* or takes but not both.
*/
static class FileBackedTransaction extends BasicTransactionSemantics {
private final LinkedBlockingDeque<FlumeEventPointer> takeList;
private final LinkedBlockingDeque<FlumeEventPointer> putList;
private final long transactionID;
private final int keepAlive;
private final Log log;
private final FlumeEventQueue queue;
private final Semaphore queueRemaining;
private final String channelNameDescriptor;
private final FileChannelCounter channelCounter;
private final boolean fsyncPerTransaction;
public FileBackedTransaction(Log log, long transactionID,
int transCapacity, int keepAlive, Semaphore queueRemaining,
String name, boolean fsyncPerTransaction, FileChannelCounter
counter) {
this.log = log;
queue = log.getFlumeEventQueue();
this.transactionID = transactionID;
this.keepAlive = keepAlive;
this.queueRemaining = queueRemaining;
putList = new LinkedBlockingDeque<FlumeEventPointer>(transCapacity);
takeList = new LinkedBlockingDeque<FlumeEventPointer>(transCapacity);
this.fsyncPerTransaction = fsyncPerTransaction;
channelNameDescriptor = "[channel=" + name + "]";
this.channelCounter = counter;
}
private boolean isClosed() {
return State.CLOSED.equals(getState());
}
private String getStateAsString() {
return String.valueOf(getState());
}
@Override
protected void doPut(Event event) throws InterruptedException {
channelCounter.incrementEventPutAttemptCount();
if (putList.remainingCapacity() == 0) {
throw new ChannelException("Put queue for FileBackedTransaction " +
"of capacity " + putList.size() + " full, consider " +
"committing more frequently, increasing capacity or " +
"increasing thread count. " + channelNameDescriptor);
}
// this does not need to be in the critical section as it does not
// modify the structure of the log or queue.
if (!queueRemaining.tryAcquire(keepAlive, TimeUnit.SECONDS)) {
throw new ChannelFullException("The channel has reached it's capacity. "
+ "This might be the result of a sink on the channel having too "
+ "low of batch size, a downstream system running slower than "
+ "normal, or that the channel capacity is just too low. "
+ channelNameDescriptor);
}
boolean success = false;
log.lockShared();
try {
FlumeEventPointer ptr = log.put(transactionID, event);
Preconditions.checkState(putList.offer(ptr), "putList offer failed "
+ channelNameDescriptor);
queue.addWithoutCommit(ptr, transactionID);
success = true;
} catch (IOException e) {
channelCounter.incrementEventPutErrorCount();
throw new ChannelException("Put failed due to IO error "
+ channelNameDescriptor, e);
} finally {
log.unlockShared();
if (!success) {
// release slot obtained in the case
// the put fails for any reason
queueRemaining.release();
}
}
}
@Override
protected Event doTake() throws InterruptedException {
channelCounter.incrementEventTakeAttemptCount();
if (takeList.remainingCapacity() == 0) {
throw new ChannelException("Take list for FileBackedTransaction, capacity " +
takeList.size() + " full, consider committing more frequently, " +
"increasing capacity, or increasing thread count. "
+ channelNameDescriptor);
}
log.lockShared();
/*
* 1. Take an event which is in the queue.
* 2. If getting that event does not throw NoopRecordException,
* then return it.
* 3. Else try to retrieve the next event from the queue
* 4. Repeat 2 and 3 until queue is empty or an event is returned.
*/
try {
while (true) {
FlumeEventPointer ptr = queue.removeHead(transactionID);
if (ptr == null) {
return null;
} else {
try {
// first add to takeList so that if write to disk
// fails rollback actually does it's work
Preconditions.checkState(takeList.offer(ptr),
"takeList offer failed "
+ channelNameDescriptor);
log.take(transactionID, ptr); // write take to disk
Event event = log.get(ptr);
return event;
} catch (IOException e) {
channelCounter.incrementEventTakeErrorCount();
throw new ChannelException("Take failed due to IO error "
+ channelNameDescriptor, e);
} catch (NoopRecordException e) {
LOG.warn("Corrupt record replaced by File Channel Integrity " +
"tool found. Will retrieve next event", e);
takeList.remove(ptr);
} catch (CorruptEventException ex) {
channelCounter.incrementEventTakeErrorCount();
if (fsyncPerTransaction) {
throw new ChannelException(ex);
}
LOG.warn("Corrupt record found. Event will be " +
"skipped, and next event will be read.", ex);
takeList.remove(ptr);
}
}
}
} finally {
log.unlockShared();
}
}
@Override
protected void doCommit() throws InterruptedException {
int puts = putList.size();
int takes = takeList.size();
if (puts > 0) {
Preconditions.checkState(takes == 0, "nonzero puts and takes "
+ channelNameDescriptor);
log.lockShared();
try {
log.commitPut(transactionID);
channelCounter.addToEventPutSuccessCount(puts);
synchronized (queue) {
while (!putList.isEmpty()) {
if (!queue.addTail(putList.removeFirst())) {
StringBuilder msg = new StringBuilder();
msg.append("Queue add failed, this shouldn't be able to ");
msg.append("happen. A portion of the transaction has been ");
msg.append("added to the queue but the remaining portion ");
msg.append("cannot be added. Those messages will be consumed ");
msg.append("despite this transaction failing. Please report.");
msg.append(channelNameDescriptor);
LOG.error(msg.toString());
Preconditions.checkState(false, msg.toString());
}
}
queue.completeTransaction(transactionID);
}
} catch (IOException e) {
throw new ChannelException("Commit failed due to IO error "
+ channelNameDescriptor, e);
} finally {
log.unlockShared();
}
} else if (takes > 0) {
log.lockShared();
try {
log.commitTake(transactionID);
queue.completeTransaction(transactionID);
channelCounter.addToEventTakeSuccessCount(takes);
} catch (IOException e) {
throw new ChannelException("Commit failed due to IO error "
+ channelNameDescriptor, e);
} finally {
log.unlockShared();
}
queueRemaining.release(takes);
}
putList.clear();
takeList.clear();
channelCounter.setChannelSize(queue.getSize());
}
@Override
protected void doRollback() throws InterruptedException {
int puts = putList.size();
int takes = takeList.size();
log.lockShared();
try {
if (takes > 0) {
Preconditions.checkState(puts == 0, "nonzero puts and takes "
+ channelNameDescriptor);
synchronized (queue) {
while (!takeList.isEmpty()) {
Preconditions.checkState(queue.addHead(takeList.removeLast()),
"Queue add failed, this shouldn't be able to happen "
+ channelNameDescriptor);
}
}
}
putList.clear();
takeList.clear();
queue.completeTransaction(transactionID);
channelCounter.setChannelSize(queue.getSize());
log.rollback(transactionID);
} catch (IOException e) {
throw new ChannelException("Commit failed due to IO error "
+ channelNameDescriptor, e);
} finally {
log.unlockShared();
// since rollback is being called, puts will never make it on
// to the queue and we need to be sure to release the resources
queueRemaining.release(puts);
}
}
}
}
| 9,732 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/Rollback.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.flume.channel.file.proto.ProtosFactory;
import com.google.common.base.Preconditions;
/**
* Represents a Rollback on disk
*/
class Rollback extends TransactionEventRecord {
Rollback(Long transactionID, Long logWriteOrderID) {
super(transactionID, logWriteOrderID);
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
}
@Override
void writeProtos(OutputStream out) throws IOException {
ProtosFactory.Rollback.Builder rollbackBuilder =
ProtosFactory.Rollback.newBuilder();
rollbackBuilder.build().writeDelimitedTo(out);
}
@Override
void readProtos(InputStream in) throws IOException {
@SuppressWarnings("unused")
ProtosFactory.Rollback rollback = Preconditions.checkNotNull(
ProtosFactory.Rollback.parseDelimitedFrom(in), "Rollback cannot be null");
}
@Override
short getRecordType() {
return Type.ROLLBACK.get();
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("Rollback [getLogWriteOrderID()=");
builder.append(getLogWriteOrderID());
builder.append(", getTransactionID()=");
builder.append(getTransactionID());
builder.append("]");
return builder.toString();
}
}
| 9,733 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/FlumeEventQueue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.SetMultimap;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang.ArrayUtils;
import org.mapdb.DB;
import org.mapdb.DBMaker;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
import java.nio.LongBuffer;
import java.security.MessageDigest;
import java.util.Arrays;
import java.util.Collection;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
/**
* Queue of events in the channel. This queue stores only
* {@link FlumeEventPointer} objects which are represented
* as 8 byte longs internally. Additionally the queue itself
* of longs is stored as a memory mapped file with a fixed
* header and circular queue semantics. The header of the queue
* contains the timestamp of last sync, the queue size and
* the head position.
*/
final class FlumeEventQueue {
private static final Logger LOG = LoggerFactory
.getLogger(FlumeEventQueue.class);
private static final int EMPTY = 0;
private final EventQueueBackingStore backingStore;
private final String channelNameDescriptor;
private final InflightEventWrapper inflightTakes;
private final InflightEventWrapper inflightPuts;
private long searchTime = 0;
private long searchCount = 0;
private long copyTime = 0;
private long copyCount = 0;
private DB db;
private Set<Long> queueSet;
/**
* @param capacity max event capacity of queue
* @throws IOException
*/
FlumeEventQueue(EventQueueBackingStore backingStore, File inflightTakesFile,
File inflightPutsFile, File queueSetDBDir) throws Exception {
Preconditions.checkArgument(backingStore.getCapacity() > 0,
"Capacity must be greater than zero");
Preconditions.checkNotNull(backingStore, "backingStore");
this.channelNameDescriptor = "[channel=" + backingStore.getName() + "]";
Preconditions.checkNotNull(inflightTakesFile, "inflightTakesFile");
Preconditions.checkNotNull(inflightPutsFile, "inflightPutsFile");
Preconditions.checkNotNull(queueSetDBDir, "queueSetDBDir");
this.backingStore = backingStore;
try {
inflightPuts = new InflightEventWrapper(inflightPutsFile);
inflightTakes = new InflightEventWrapper(inflightTakesFile);
} catch (Exception e) {
LOG.error("Could not read checkpoint.", e);
throw e;
}
if (queueSetDBDir.isDirectory()) {
FileUtils.deleteDirectory(queueSetDBDir);
} else if (queueSetDBDir.isFile() && !queueSetDBDir.delete()) {
throw new IOException("QueueSetDir " + queueSetDBDir + " is a file and"
+ " could not be deleted");
}
if (!queueSetDBDir.mkdirs()) {
throw new IllegalStateException("Could not create QueueSet Dir "
+ queueSetDBDir);
}
File dbFile = new File(queueSetDBDir, "db");
db = DBMaker.newFileDB(dbFile)
.closeOnJvmShutdown()
.transactionDisable()
.syncOnCommitDisable()
.deleteFilesAfterClose()
.cacheDisable()
.mmapFileEnableIfSupported()
.make();
queueSet =
db.createHashSet("QueueSet " + " - " + backingStore.getName()).make();
long start = System.currentTimeMillis();
for (int i = 0; i < backingStore.getSize(); i++) {
queueSet.add(get(i));
}
LOG.info("QueueSet population inserting " + backingStore.getSize()
+ " took " + (System.currentTimeMillis() - start));
}
SetMultimap<Long, Long> deserializeInflightPuts()
throws IOException, BadCheckpointException {
return inflightPuts.deserialize();
}
SetMultimap<Long, Long> deserializeInflightTakes()
throws IOException, BadCheckpointException {
return inflightTakes.deserialize();
}
synchronized long getLogWriteOrderID() {
return backingStore.getLogWriteOrderID();
}
synchronized boolean checkpoint(boolean force) throws Exception {
if (!backingStore.syncRequired()
&& !inflightTakes.syncRequired()
&& !force) { //No need to check inflight puts, since that would
//cause elements.syncRequired() to return true.
LOG.debug("Checkpoint not required");
return false;
}
backingStore.beginCheckpoint();
inflightPuts.serializeAndWrite();
inflightTakes.serializeAndWrite();
backingStore.checkpoint();
return true;
}
/**
* Retrieve and remove the head of the queue.
*
* @return FlumeEventPointer or null if queue is empty
*/
synchronized FlumeEventPointer removeHead(long transactionID) {
if (backingStore.getSize() == 0) {
return null;
}
long value = remove(0, transactionID);
Preconditions.checkState(value != EMPTY, "Empty value "
+ channelNameDescriptor);
FlumeEventPointer ptr = FlumeEventPointer.fromLong(value);
backingStore.decrementFileID(ptr.getFileID());
return ptr;
}
/**
* Add a FlumeEventPointer to the head of the queue.
* Called during rollbacks.
*
* @param FlumeEventPointer to be added
* @return true if space was available and pointer was
* added to the queue
*/
synchronized boolean addHead(FlumeEventPointer e) {
//Called only during rollback, so should not consider inflight takes' size,
//because normal puts through addTail method already account for these
//events since they are in the inflight takes. So puts will not happen
//in such a way that these takes cannot go back in. If this if returns true,
//there is a buuuuuuuug!
if (backingStore.getSize() == backingStore.getCapacity()) {
LOG.error("Could not reinsert to queue, events which were taken but "
+ "not committed. Please report this issue.");
return false;
}
long value = e.toLong();
Preconditions.checkArgument(value != EMPTY);
backingStore.incrementFileID(e.getFileID());
add(0, value);
return true;
}
/**
* Add a FlumeEventPointer to the tail of the queue.
*
* @param FlumeEventPointer to be added
* @return true if space was available and pointer
* was added to the queue
*/
synchronized boolean addTail(FlumeEventPointer e) {
if (getSize() == backingStore.getCapacity()) {
return false;
}
long value = e.toLong();
Preconditions.checkArgument(value != EMPTY);
backingStore.incrementFileID(e.getFileID());
add(backingStore.getSize(), value);
return true;
}
/**
* Must be called when a put happens to the log. This ensures that put commits
* after checkpoints will retrieve all events committed in that txn.
*
* @param e
* @param transactionID
*/
synchronized void addWithoutCommit(FlumeEventPointer e, long transactionID) {
inflightPuts.addEvent(transactionID, e.toLong());
}
/**
* Remove FlumeEventPointer from queue, will
* only be used when recovering from a crash. It is not
* legal to call this method after replayComplete has been
* called.
*
* @param FlumeEventPointer to be removed
* @return true if the FlumeEventPointer was found
* and removed
*/
// remove() overloads should not be split, according to checkstyle.
// CHECKSTYLE:OFF
synchronized boolean remove(FlumeEventPointer e) {
long value = e.toLong();
Preconditions.checkArgument(value != EMPTY);
if (queueSet == null) {
throw new IllegalStateException("QueueSet is null, thus replayComplete"
+ " has been called which is illegal");
}
if (!queueSet.contains(value)) {
return false;
}
searchCount++;
long start = System.currentTimeMillis();
for (int i = 0; i < backingStore.getSize(); i++) {
if (get(i) == value) {
remove(i, 0);
FlumeEventPointer ptr = FlumeEventPointer.fromLong(value);
backingStore.decrementFileID(ptr.getFileID());
searchTime += System.currentTimeMillis() - start;
return true;
}
}
searchTime += System.currentTimeMillis() - start;
return false;
}
// CHECKSTYLE:ON
/**
* @return a copy of the set of fileIDs which are currently on the queue
* will be normally be used when deciding which data files can
* be deleted
*/
synchronized SortedSet<Integer> getFileIDs() {
//Java implements clone pretty well. The main place this is used
//in checkpointing and deleting old files, so best
//to use a sorted set implementation.
SortedSet<Integer> fileIDs =
new TreeSet<Integer>(backingStore.getReferenceCounts());
fileIDs.addAll(inflightPuts.getFileIDs());
fileIDs.addAll(inflightTakes.getFileIDs());
return fileIDs;
}
protected long get(int index) {
if (index < 0 || index > backingStore.getSize() - 1) {
throw new IndexOutOfBoundsException(String.valueOf(index)
+ channelNameDescriptor);
}
return backingStore.get(index);
}
private void set(int index, long value) {
if (index < 0 || index > backingStore.getSize() - 1) {
throw new IndexOutOfBoundsException(String.valueOf(index)
+ channelNameDescriptor);
}
backingStore.put(index, value);
}
protected boolean add(int index, long value) {
if (index < 0 || index > backingStore.getSize()) {
throw new IndexOutOfBoundsException(String.valueOf(index)
+ channelNameDescriptor);
}
if (backingStore.getSize() == backingStore.getCapacity()) {
return false;
}
backingStore.setSize(backingStore.getSize() + 1);
if (index <= backingStore.getSize() / 2) {
// Shift left
backingStore.setHead(backingStore.getHead() - 1);
if (backingStore.getHead() < 0) {
backingStore.setHead(backingStore.getCapacity() - 1);
}
for (int i = 0; i < index; i++) {
set(i, get(i + 1));
}
} else {
// Sift right
for (int i = backingStore.getSize() - 1; i > index; i--) {
set(i, get(i - 1));
}
}
set(index, value);
if (queueSet != null) {
queueSet.add(value);
}
return true;
}
/**
* Must be called when a transaction is being committed or rolled back.
*
* @param transactionID
*/
synchronized void completeTransaction(long transactionID) {
if (!inflightPuts.completeTransaction(transactionID)) {
inflightTakes.completeTransaction(transactionID);
}
}
protected synchronized long remove(int index, long transactionID) {
if (index < 0 || index > backingStore.getSize() - 1) {
throw new IndexOutOfBoundsException("index = " + index
+ ", queueSize " + backingStore.getSize() + " " + channelNameDescriptor);
}
copyCount++;
long start = System.currentTimeMillis();
long value = get(index);
if (queueSet != null) {
queueSet.remove(value);
}
//if txn id = 0, we are recovering from a crash.
if (transactionID != 0) {
inflightTakes.addEvent(transactionID, value);
}
if (index > backingStore.getSize() / 2) {
// Move tail part to left
for (int i = index; i < backingStore.getSize() - 1; i++) {
long rightValue = get(i + 1);
set(i, rightValue);
}
set(backingStore.getSize() - 1, EMPTY);
} else {
// Move head part to right
for (int i = index - 1; i >= 0; i--) {
long leftValue = get(i);
set(i + 1, leftValue);
}
set(0, EMPTY);
backingStore.setHead(backingStore.getHead() + 1);
if (backingStore.getHead() == backingStore.getCapacity()) {
backingStore.setHead(0);
}
}
backingStore.setSize(backingStore.getSize() - 1);
copyTime += System.currentTimeMillis() - start;
return value;
}
protected synchronized int getSize() {
return backingStore.getSize() + inflightTakes.getSize();
}
/**
* @return max capacity of the queue
*/
public int getCapacity() {
return backingStore.getCapacity();
}
synchronized void close() throws IOException {
try {
if (db != null) {
db.close();
}
} catch (Exception ex) {
LOG.warn("Error closing db", ex);
}
try {
backingStore.close();
inflightPuts.close();
inflightTakes.close();
} catch (IOException e) {
LOG.warn("Error closing backing store", e);
}
}
/**
* Called when ReplayHandler has completed and thus remove(FlumeEventPointer)
* will no longer be called.
*/
synchronized void replayComplete() {
String msg = "Search Count = " + searchCount + ", Search Time = " +
searchTime + ", Copy Count = " + copyCount + ", Copy Time = " +
copyTime;
LOG.info(msg);
if (db != null) {
db.close();
}
queueSet = null;
db = null;
}
@VisibleForTesting
long getSearchCount() {
return searchCount;
}
@VisibleForTesting
long getCopyCount() {
return copyCount;
}
/**
* A representation of in flight events which have not yet been committed.
* None of the methods are thread safe, and should be called from thread
* safe methods only.
*/
class InflightEventWrapper {
private SetMultimap<Long, Long> inflightEvents = HashMultimap.create();
// Both these are volatile for safe publication, they are never accessed by
// more than 1 thread at a time.
private volatile RandomAccessFile file;
private volatile java.nio.channels.FileChannel fileChannel;
private final MessageDigest digest;
private final File inflightEventsFile;
private volatile boolean syncRequired = false;
private SetMultimap<Long, Integer> inflightFileIDs = HashMultimap.create();
public InflightEventWrapper(File inflightEventsFile) throws Exception {
if (!inflightEventsFile.exists()) {
Preconditions.checkState(inflightEventsFile.createNewFile(), "Could not"
+ "create inflight events file: "
+ inflightEventsFile.getCanonicalPath());
}
this.inflightEventsFile = inflightEventsFile;
file = new RandomAccessFile(inflightEventsFile, "rw");
fileChannel = file.getChannel();
digest = MessageDigest.getInstance("MD5");
}
/**
* Complete the transaction, and remove all events from inflight list.
*
* @param transactionID
*/
public boolean completeTransaction(Long transactionID) {
if (!inflightEvents.containsKey(transactionID)) {
return false;
}
inflightEvents.removeAll(transactionID);
inflightFileIDs.removeAll(transactionID);
syncRequired = true;
return true;
}
/**
* Add an event pointer to the inflights list.
*
* @param transactionID
* @param pointer
*/
public void addEvent(Long transactionID, Long pointer) {
inflightEvents.put(transactionID, pointer);
inflightFileIDs.put(transactionID,
FlumeEventPointer.fromLong(pointer).getFileID());
syncRequired = true;
}
/**
* Serialize the set of in flights into a byte longBuffer.
*
* @return Returns the checksum of the buffer that is being
* asynchronously written to disk.
*/
public void serializeAndWrite() throws Exception {
Collection<Long> values = inflightEvents.values();
if (!fileChannel.isOpen()) {
file = new RandomAccessFile(inflightEventsFile, "rw");
fileChannel = file.getChannel();
}
if (values.isEmpty()) {
file.setLength(0L);
}
//What is written out?
//Checksum - 16 bytes
//and then each key-value pair from the map:
//transactionid numberofeventsforthistxn listofeventpointers
try {
int expectedFileSize = (((inflightEvents.keySet().size() * 2) //for transactionIDs and
//events per txn ID
+ values.size()) * 8) //Event pointers
+ 16; //Checksum
//There is no real need of filling the channel with 0s, since we
//will write the exact number of bytes as expected file size.
file.setLength(expectedFileSize);
Preconditions.checkState(file.length() == expectedFileSize,
"Expected File size of inflight events file does not match the "
+ "current file size. Checkpoint is incomplete.");
file.seek(0);
final ByteBuffer buffer = ByteBuffer.allocate(expectedFileSize);
LongBuffer longBuffer = buffer.asLongBuffer();
for (Long txnID : inflightEvents.keySet()) {
Set<Long> pointers = inflightEvents.get(txnID);
longBuffer.put(txnID);
longBuffer.put((long) pointers.size());
LOG.debug("Number of events inserted into "
+ "inflights file: " + String.valueOf(pointers.size())
+ " file: " + inflightEventsFile.getCanonicalPath());
long[] written = ArrayUtils.toPrimitive(
pointers.toArray(new Long[0]));
longBuffer.put(written);
}
byte[] checksum = digest.digest(buffer.array());
file.write(checksum);
buffer.position(0);
fileChannel.write(buffer);
fileChannel.force(true);
syncRequired = false;
} catch (IOException ex) {
LOG.error("Error while writing checkpoint to disk.", ex);
throw ex;
}
}
/**
* Read the inflights file and return a
* {@link com.google.common.collect.SetMultimap}
* of transactionIDs to events that were inflight.
*
* @return - map of inflight events per txnID.
*/
public SetMultimap<Long, Long> deserialize()
throws IOException, BadCheckpointException {
SetMultimap<Long, Long> inflights = HashMultimap.create();
if (!fileChannel.isOpen()) {
file = new RandomAccessFile(inflightEventsFile, "rw");
fileChannel = file.getChannel();
}
if (file.length() == 0) {
return inflights;
}
file.seek(0);
byte[] checksum = new byte[16];
file.read(checksum);
ByteBuffer buffer = ByteBuffer.allocate(
(int) (file.length() - file.getFilePointer()));
fileChannel.read(buffer);
byte[] fileChecksum = digest.digest(buffer.array());
if (!Arrays.equals(checksum, fileChecksum)) {
throw new BadCheckpointException("Checksum of inflights file differs"
+ " from the checksum expected.");
}
buffer.position(0);
LongBuffer longBuffer = buffer.asLongBuffer();
try {
while (true) {
long txnID = longBuffer.get();
int numEvents = (int) (longBuffer.get());
for (int i = 0; i < numEvents; i++) {
long val = longBuffer.get();
inflights.put(txnID, val);
}
}
} catch (BufferUnderflowException ex) {
LOG.debug("Reached end of inflights buffer. Long buffer position ="
+ String.valueOf(longBuffer.position()));
}
return inflights;
}
public int getSize() {
return inflightEvents.size();
}
public boolean syncRequired() {
return syncRequired;
}
public Collection<Integer> getFileIDs() {
return inflightFileIDs.values();
}
//Needed for testing.
public Collection<Long> getInFlightPointers() {
return inflightEvents.values();
}
public void close() throws IOException {
file.close();
}
}
}
| 9,734 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/WriteOrderOracle.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.util.concurrent.atomic.AtomicLong;
public final class WriteOrderOracle {
private WriteOrderOracle() {
}
private static final AtomicLong WRITER_ORDERER =
new AtomicLong(System.currentTimeMillis());
public static void setSeed(long highest) {
long previous;
while (highest > (previous = WRITER_ORDERER.get())) {
WRITER_ORDERER.compareAndSet(previous, highest);
}
}
public static long next() {
return WRITER_ORDERER.incrementAndGet();
}
}
| 9,735 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/LogFileV2.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.io.EOFException;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Represents a single data file on disk. Has methods to write,
* read sequentially (replay), and read randomly (channel takes).
*/
@Deprecated
class LogFileV2 extends LogFile {
protected static final Logger LOGGER =
LoggerFactory.getLogger(LogFileV2.class);
private static final long OFFSET_CHECKPOINT = 2 * Serialization.SIZE_OF_INT;
private LogFileV2() {
}
static class MetaDataWriter extends LogFile.MetaDataWriter {
protected MetaDataWriter(File file, int logFileID) throws IOException {
super(file, logFileID);
boolean error = true;
try {
RandomAccessFile writeFileHandle = getFileHandle();
int version = writeFileHandle.readInt();
if (version != getVersion()) {
throw new IOException("The version of log file: "
+ file.getCanonicalPath() + " is different from expected "
+ " version: expected = " + getVersion() + ", found = " + version);
}
int fid = writeFileHandle.readInt();
if (fid != logFileID) {
throw new IOException("The file id of log file: "
+ file.getCanonicalPath() + " is different from expected "
+ " id: expected = " + logFileID + ", found = " + fid);
}
setLastCheckpointOffset(writeFileHandle.readLong());
setLastCheckpointWriteOrderID(writeFileHandle.readLong());
LOGGER.info("File: " + file.getCanonicalPath() + " was last checkpointed "
+ "at position: " + getLastCheckpointOffset()
+ ", logWriteOrderID: " + getLastCheckpointWriteOrderID());
error = false;
} finally {
if (error) {
close();
}
}
}
@Override
int getVersion() {
return Serialization.VERSION_2;
}
@Override
void markCheckpoint(long currentPosition, long logWriteOrderID)
throws IOException {
RandomAccessFile writeFileHandle = getFileHandle();
writeFileHandle.seek(OFFSET_CHECKPOINT);
writeFileHandle.writeLong(currentPosition);
writeFileHandle.writeLong(logWriteOrderID);
writeFileHandle.getChannel().force(true);
LOGGER.info("Noted checkpoint for file: " + getFile() + ", id: "
+ getLogFileID() + ", checkpoint position: " + currentPosition
+ ", logWriteOrderID: " + logWriteOrderID);
}
}
static class Writer extends LogFile.Writer {
Writer(File file, int logFileID, long maxFileSize,
long usableSpaceRefreshInterval)
throws IOException {
super(file, logFileID, maxFileSize, null, usableSpaceRefreshInterval,
true, 0);
RandomAccessFile writeFileHandle = getFileHandle();
writeFileHandle.writeInt(getVersion());
writeFileHandle.writeInt(logFileID);
// checkpoint marker
writeFileHandle.writeLong(0L);
// timestamp placeholder
writeFileHandle.writeLong(0L);
getFileChannel().force(true);
}
@Override
int getVersion() {
return Serialization.VERSION_2;
}
}
static class RandomReader extends LogFile.RandomReader {
RandomReader(File file) throws IOException {
super(file, null, true);
}
@Override
int getVersion() {
return Serialization.VERSION_2;
}
@Override
protected TransactionEventRecord doGet(RandomAccessFile fileHandle) throws IOException {
return TransactionEventRecord.fromDataInputV2(fileHandle);
}
}
static class SequentialReader extends LogFile.SequentialReader {
SequentialReader(File file) throws EOFException, IOException {
super(file, null);
RandomAccessFile fileHandle = getFileHandle();
int version = fileHandle.readInt();
if (version != getVersion()) {
throw new IOException("Version is " + Integer.toHexString(version) +
" expected " + Integer.toHexString(getVersion())
+ " file: " + file.getCanonicalPath());
}
setLogFileID(fileHandle.readInt());
setLastCheckpointPosition(fileHandle.readLong());
setLastCheckpointWriteOrderID(fileHandle.readLong());
}
@Override
public int getVersion() {
return Serialization.VERSION_2;
}
@Override
LogRecord doNext(int offset) throws IOException {
TransactionEventRecord event =
TransactionEventRecord.fromDataInputV2(getFileHandle());
return new LogRecord(getLogFileID(), offset, event);
}
}
}
| 9,736 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/EventQueueBackingStoreFileV3.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.base.Preconditions;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.flume.channel.file.instrumentation.FileChannelCounter;
import org.apache.flume.channel.file.proto.ProtosFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
final class EventQueueBackingStoreFileV3 extends EventQueueBackingStoreFile {
private static final Logger LOG = LoggerFactory.getLogger(EventQueueBackingStoreFileV3.class);
private final File metaDataFile;
EventQueueBackingStoreFileV3(
File checkpointFile, int capacity, String name, FileChannelCounter counter
) throws IOException, BadCheckpointException {
this(checkpointFile, capacity, name, counter, null, false, false);
}
EventQueueBackingStoreFileV3(File checkpointFile, int capacity,
String name, FileChannelCounter counter, File checkpointBackupDir,
boolean backupCheckpoint, boolean compressBackup)
throws IOException, BadCheckpointException {
super(capacity, name, counter, checkpointFile, checkpointBackupDir, backupCheckpoint,
compressBackup);
Preconditions.checkArgument(capacity > 0,
"capacity must be greater than 0 " + capacity);
metaDataFile = Serialization.getMetaDataFile(checkpointFile);
LOG.info("Starting up with " + checkpointFile + " and " + metaDataFile);
if (metaDataFile.exists()) {
FileInputStream inputStream = new FileInputStream(metaDataFile);
try {
LOG.info("Reading checkpoint metadata from " + metaDataFile);
ProtosFactory.Checkpoint checkpoint =
ProtosFactory.Checkpoint.parseDelimitedFrom(inputStream);
if (checkpoint == null) {
throw new BadCheckpointException("The checkpoint metadata file does "
+ "not exist or has zero length");
}
int version = checkpoint.getVersion();
if (version != getVersion()) {
throw new BadCheckpointException("Invalid version: " + version +
" " + name + ", expected " + getVersion());
}
long logWriteOrderID = checkpoint.getWriteOrderID();
if (logWriteOrderID != getCheckpointLogWriteOrderID()) {
String msg = "Checkpoint and Meta files have differing " +
"logWriteOrderIDs " + getCheckpointLogWriteOrderID() + ", and "
+ logWriteOrderID;
LOG.warn(msg);
throw new BadCheckpointException(msg);
}
WriteOrderOracle.setSeed(logWriteOrderID);
setLogWriteOrderID(logWriteOrderID);
setSize(checkpoint.getQueueSize());
setHead(checkpoint.getQueueHead());
for (ProtosFactory.ActiveLog activeLog : checkpoint.getActiveLogsList()) {
Integer logFileID = activeLog.getLogFileID();
Integer count = activeLog.getCount();
logFileIDReferenceCounts.put(logFileID, new AtomicInteger(count));
}
} catch (InvalidProtocolBufferException ex) {
throw new BadCheckpointException("Checkpoint metadata file is invalid. "
+ "The agent might have been stopped while it was being "
+ "written", ex);
} finally {
try {
inputStream.close();
} catch (IOException e) {
LOG.warn("Unable to close " + metaDataFile, e);
}
}
} else {
if (backupExists(checkpointBackupDir) && shouldBackup) {
// If a backup exists, then throw an exception to recover checkpoint
throw new BadCheckpointException("The checkpoint metadata file does " +
"not exist, but a backup exists");
}
ProtosFactory.Checkpoint.Builder checkpointBuilder =
ProtosFactory.Checkpoint.newBuilder();
checkpointBuilder.setVersion(getVersion());
checkpointBuilder.setQueueHead(getHead());
checkpointBuilder.setQueueSize(getSize());
checkpointBuilder.setWriteOrderID(getLogWriteOrderID());
FileOutputStream outputStream = new FileOutputStream(metaDataFile);
try {
checkpointBuilder.build().writeDelimitedTo(outputStream);
outputStream.getChannel().force(true);
} finally {
try {
outputStream.close();
} catch (IOException e) {
LOG.warn("Unable to close " + metaDataFile, e);
}
}
}
}
File getMetaDataFile() {
return metaDataFile;
}
@Override
protected int getVersion() {
return Serialization.VERSION_3;
}
@Override
protected void writeCheckpointMetaData() throws IOException {
ProtosFactory.Checkpoint.Builder checkpointBuilder =
ProtosFactory.Checkpoint.newBuilder();
checkpointBuilder.setVersion(getVersion());
checkpointBuilder.setQueueHead(getHead());
checkpointBuilder.setQueueSize(getSize());
checkpointBuilder.setWriteOrderID(getLogWriteOrderID());
for (Integer logFileID : logFileIDReferenceCounts.keySet()) {
int count = logFileIDReferenceCounts.get(logFileID).get();
if (count != 0) {
ProtosFactory.ActiveLog.Builder activeLogBuilder =
ProtosFactory.ActiveLog.newBuilder();
activeLogBuilder.setLogFileID(logFileID);
activeLogBuilder.setCount(count);
checkpointBuilder.addActiveLogs(activeLogBuilder.build());
}
}
FileOutputStream outputStream = new FileOutputStream(metaDataFile);
try {
checkpointBuilder.build().writeDelimitedTo(outputStream);
outputStream.getChannel().force(true);
} finally {
try {
outputStream.close();
} catch (IOException e) {
LOG.warn("Unable to close " + metaDataFile, e);
}
}
}
static void upgrade(EventQueueBackingStoreFileV2 backingStoreV2,
File checkpointFile, File metaDataFile)
throws IOException {
int head = backingStoreV2.getHead();
int size = backingStoreV2.getSize();
long writeOrderID = backingStoreV2.getLogWriteOrderID();
Map<Integer, AtomicInteger> referenceCounts =
backingStoreV2.logFileIDReferenceCounts;
ProtosFactory.Checkpoint.Builder checkpointBuilder =
ProtosFactory.Checkpoint.newBuilder();
checkpointBuilder.setVersion(Serialization.VERSION_3);
checkpointBuilder.setQueueHead(head);
checkpointBuilder.setQueueSize(size);
checkpointBuilder.setWriteOrderID(writeOrderID);
for (Integer logFileID : referenceCounts.keySet()) {
int count = referenceCounts.get(logFileID).get();
if (count > 0) {
ProtosFactory.ActiveLog.Builder activeLogBuilder =
ProtosFactory.ActiveLog.newBuilder();
activeLogBuilder.setLogFileID(logFileID);
activeLogBuilder.setCount(count);
checkpointBuilder.addActiveLogs(activeLogBuilder.build());
}
}
FileOutputStream outputStream = new FileOutputStream(metaDataFile);
try {
checkpointBuilder.build().writeDelimitedTo(outputStream);
outputStream.getChannel().force(true);
} finally {
try {
outputStream.close();
} catch (IOException e) {
LOG.warn("Unable to close " + metaDataFile, e);
}
}
RandomAccessFile checkpointFileHandle =
new RandomAccessFile(checkpointFile, "rw");
try {
checkpointFileHandle.seek(INDEX_VERSION * Serialization.SIZE_OF_LONG);
checkpointFileHandle.writeLong(Serialization.VERSION_3);
checkpointFileHandle.getChannel().force(true);
} finally {
try {
checkpointFileHandle.close();
} catch (IOException e) {
LOG.warn("Unable to close " + checkpointFile, e);
}
}
}
}
| 9,737 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/EventQueueBackingStoreFileV2.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import com.google.common.base.Preconditions;
import org.apache.flume.channel.file.instrumentation.FileChannelCounter;
final class EventQueueBackingStoreFileV2 extends EventQueueBackingStoreFile {
private static final int INDEX_SIZE = 2;
private static final int INDEX_HEAD = 3;
private static final int INDEX_ACTIVE_LOG = 5;
private static final int MAX_ACTIVE_LOGS = 1024;
EventQueueBackingStoreFileV2(
File checkpointFile, int capacity, String name, FileChannelCounter counter
) throws IOException, BadCheckpointException {
super(capacity, name, counter, checkpointFile);
Preconditions.checkArgument(capacity > 0,
"capacity must be greater than 0 " + capacity);
setLogWriteOrderID(elementsBuffer.get(INDEX_WRITE_ORDER_ID));
setSize((int) elementsBuffer.get(INDEX_SIZE));
setHead((int) elementsBuffer.get(INDEX_HEAD));
int indexMaxLog = INDEX_ACTIVE_LOG + MAX_ACTIVE_LOGS;
for (int i = INDEX_ACTIVE_LOG; i < indexMaxLog; i++) {
long nextFileCode = elementsBuffer.get(i);
if (nextFileCode != EMPTY) {
Pair<Integer, Integer> idAndCount =
deocodeActiveLogCounter(nextFileCode);
logFileIDReferenceCounts.put(idAndCount.getLeft(),
new AtomicInteger(idAndCount.getRight()));
}
}
}
@Override
protected int getVersion() {
return Serialization.VERSION_2;
}
@Override
protected void incrementFileID(int fileID) {
super.incrementFileID(fileID);
Preconditions.checkState(logFileIDReferenceCounts.size() < MAX_ACTIVE_LOGS,
"Too many active logs ");
}
private Pair<Integer, Integer> deocodeActiveLogCounter(long value) {
int fileId = (int) (value >>> 32);
int count = (int) value;
return Pair.of(fileId, count);
}
private long encodeActiveLogCounter(int fileId, int count) {
long result = fileId;
result = (long)fileId << 32;
result += (long) count;
return result;
}
@Override
protected void writeCheckpointMetaData() {
elementsBuffer.put(INDEX_SIZE, getSize());
elementsBuffer.put(INDEX_HEAD, getHead());
List<Long> fileIdAndCountEncoded = new ArrayList<Long>();
for (Integer fileId : logFileIDReferenceCounts.keySet()) {
Integer count = logFileIDReferenceCounts.get(fileId).get();
long value = encodeActiveLogCounter(fileId, count);
fileIdAndCountEncoded.add(value);
}
int emptySlots = MAX_ACTIVE_LOGS - fileIdAndCountEncoded.size();
for (int i = 0; i < emptySlots; i++) {
fileIdAndCountEncoded.add(0L);
}
for (int i = 0; i < MAX_ACTIVE_LOGS; i++) {
elementsBuffer.put(i + INDEX_ACTIVE_LOG, fileIdAndCountEncoded.get(i));
}
}
}
| 9,738 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/FlumeEventPointer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
/**
* Pointer to an Event on disk. This is represented in memory
* as a long. As such there are methods to convert from this
* object to a long and from a long to this object.
*/
class FlumeEventPointer {
private final int fileID;
private final int offset;
FlumeEventPointer(int fileID, int offset) {
this.fileID = fileID;
this.offset = offset;
/*
* Log files used to have a header, now metadata is in
* a separate file so data starts at offset 0.
*/
if (offset < 0) {
throw new IllegalArgumentException("offset = " + offset + "(" +
Integer.toHexString(offset) + ")" + ", fileID = " + fileID
+ "(" + Integer.toHexString(fileID) + ")");
}
}
int getFileID() {
return fileID;
}
int getOffset() {
return offset;
}
public long toLong() {
long result = fileID;
result = (long)fileID << 32;
result += (long)offset;
return result;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + fileID;
result = prime * result + offset;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
FlumeEventPointer other = (FlumeEventPointer) obj;
if (fileID != other.fileID) {
return false;
}
if (offset != other.offset) {
return false;
}
return true;
}
@Override
public String toString() {
return "FlumeEventPointer [fileID=" + fileID + ", offset=" + offset + "]";
}
public static FlumeEventPointer fromLong(long value) {
int fileID = (int)(value >>> 32);
int offset = (int)value;
return new FlumeEventPointer(fileID, offset);
}
}
| 9,739 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/Log.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.commons.io.FileUtils;
import org.apache.flume.Event;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.apache.flume.channel.file.encryption.KeyProvider;
import org.apache.flume.channel.file.instrumentation.FileChannelCounter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileLock;
import java.nio.channels.OverlappingFileLockException;
import java.security.Key;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
/**
* Stores FlumeEvents on disk and pointers to the events in a in memory queue.
* Once a log object is created the replay method should be called to reconcile
* the on disk write ahead log with the last checkpoint of the queue.
* <p>
* Before calling any of commitPut/commitTake/get/put/rollback/take
* {@linkplain org.apache.flume.channel.file.Log#lockShared()}
* should be called. After
* the operation and any additional modifications of the
* FlumeEventQueue, the Log.unlockShared method should be called.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class Log {
public static final String PREFIX = "log-";
private static final Logger LOGGER = LoggerFactory.getLogger(Log.class);
private static final int MIN_NUM_LOGS = 2;
public static final String FILE_LOCK = "in_use.lock";
public static final String QUEUE_SET = "queueset";
// for reader
private final Map<Integer, LogFile.RandomReader> idLogFileMap = Collections
.synchronizedMap(new HashMap<Integer, LogFile.RandomReader>());
private final AtomicInteger nextFileID = new AtomicInteger(0);
private final File checkpointDir;
private final File backupCheckpointDir;
private final File[] logDirs;
private final int queueCapacity;
private final AtomicReferenceArray<LogFile.Writer> logFiles;
private final ScheduledExecutorService workerExecutor;
private volatile boolean open;
private FlumeEventQueue queue;
private long checkpointInterval;
private long maxFileSize;
private final boolean useFastReplay;
private final long minimumRequiredSpace;
private final Map<String, FileLock> locks;
private final ReentrantReadWriteLock checkpointLock =
new ReentrantReadWriteLock(true);
/**
* Set of files that should be excluded from backup and restores.
*/
public static final Set<String> EXCLUDES = Sets.newHashSet(FILE_LOCK,
QUEUE_SET);
/**
* Shared lock
*/
private final ReadLock checkpointReadLock = checkpointLock.readLock();
/**
* Exclusive lock
*/
private final WriteLock checkpointWriterLock = checkpointLock.writeLock();
private final String channelNameDescriptor;
private boolean useLogReplayV1;
private KeyProvider encryptionKeyProvider;
private String encryptionCipherProvider;
private String encryptionKeyAlias;
private Key encryptionKey;
private final long usableSpaceRefreshInterval;
private boolean didFastReplay = false;
private boolean didFullReplayDueToBadCheckpointException = false;
private final boolean useDualCheckpoints;
private final boolean compressBackupCheckpoint;
private volatile boolean backupRestored = false;
private final boolean fsyncPerTransaction;
private final int fsyncInterval;
private final boolean checkpointOnClose;
private int readCount;
private int putCount;
private int takeCount;
private int committedCount;
private int rollbackCount;
private final List<File> pendingDeletes = Lists.newArrayList();
private final FileChannelCounter channelCounter;
static class Builder {
private long bCheckpointInterval;
private long bMinimumRequiredSpace;
private long bMaxFileSize;
private int bQueueCapacity;
private File bCheckpointDir;
private File[] bLogDirs;
private String bName;
private boolean useLogReplayV1;
private boolean useFastReplay;
private KeyProvider bEncryptionKeyProvider;
private String bEncryptionKeyAlias;
private String bEncryptionCipherProvider;
private long bUsableSpaceRefreshInterval = 15L * 1000L;
private boolean bUseDualCheckpoints = false;
private boolean bCompressBackupCheckpoint = false;
private File bBackupCheckpointDir = null;
private boolean fsyncPerTransaction = true;
private int fsyncInterval;
private boolean checkpointOnClose = true;
private FileChannelCounter channelCounter;
boolean isFsyncPerTransaction() {
return fsyncPerTransaction;
}
void setFsyncPerTransaction(boolean fsyncPerTransaction) {
this.fsyncPerTransaction = fsyncPerTransaction;
}
int getFsyncInterval() {
return fsyncInterval;
}
void setFsyncInterval(int fsyncInterval) {
this.fsyncInterval = fsyncInterval;
}
Builder setUsableSpaceRefreshInterval(long usableSpaceRefreshInterval) {
bUsableSpaceRefreshInterval = usableSpaceRefreshInterval;
return this;
}
Builder setCheckpointInterval(long interval) {
bCheckpointInterval = interval;
return this;
}
Builder setMaxFileSize(long maxSize) {
bMaxFileSize = maxSize;
return this;
}
Builder setQueueSize(int capacity) {
bQueueCapacity = capacity;
return this;
}
Builder setCheckpointDir(File cpDir) {
bCheckpointDir = cpDir;
return this;
}
Builder setLogDirs(File[] dirs) {
bLogDirs = dirs;
return this;
}
Builder setChannelName(String name) {
bName = name;
return this;
}
Builder setMinimumRequiredSpace(long minimumRequiredSpace) {
bMinimumRequiredSpace = minimumRequiredSpace;
return this;
}
Builder setUseLogReplayV1(boolean useLogReplayV1) {
this.useLogReplayV1 = useLogReplayV1;
return this;
}
Builder setUseFastReplay(boolean useFastReplay) {
this.useFastReplay = useFastReplay;
return this;
}
Builder setEncryptionKeyProvider(KeyProvider encryptionKeyProvider) {
bEncryptionKeyProvider = encryptionKeyProvider;
return this;
}
Builder setEncryptionKeyAlias(String encryptionKeyAlias) {
bEncryptionKeyAlias = encryptionKeyAlias;
return this;
}
Builder setEncryptionCipherProvider(String encryptionCipherProvider) {
bEncryptionCipherProvider = encryptionCipherProvider;
return this;
}
Builder setUseDualCheckpoints(boolean UseDualCheckpoints) {
this.bUseDualCheckpoints = UseDualCheckpoints;
return this;
}
Builder setCompressBackupCheckpoint(boolean compressBackupCheckpoint) {
this.bCompressBackupCheckpoint = compressBackupCheckpoint;
return this;
}
Builder setBackupCheckpointDir(File backupCheckpointDir) {
this.bBackupCheckpointDir = backupCheckpointDir;
return this;
}
Builder setCheckpointOnClose(boolean enableCheckpointOnClose) {
this.checkpointOnClose = enableCheckpointOnClose;
return this;
}
Builder setChannelCounter(FileChannelCounter channelCounter) {
this.channelCounter = channelCounter;
return this;
}
Log build() throws IOException {
return new Log(bCheckpointInterval, bMaxFileSize, bQueueCapacity,
bUseDualCheckpoints, bCompressBackupCheckpoint, bCheckpointDir,
bBackupCheckpointDir, bName, useLogReplayV1, useFastReplay,
bMinimumRequiredSpace, bEncryptionKeyProvider, bEncryptionKeyAlias,
bEncryptionCipherProvider, bUsableSpaceRefreshInterval,
fsyncPerTransaction, fsyncInterval, checkpointOnClose, channelCounter, bLogDirs);
}
}
private Log(long checkpointInterval, long maxFileSize, int queueCapacity,
boolean useDualCheckpoints, boolean compressBackupCheckpoint,
File checkpointDir, File backupCheckpointDir,
String name, boolean useLogReplayV1, boolean useFastReplay,
long minimumRequiredSpace, @Nullable KeyProvider encryptionKeyProvider,
@Nullable String encryptionKeyAlias,
@Nullable String encryptionCipherProvider,
long usableSpaceRefreshInterval, boolean fsyncPerTransaction,
int fsyncInterval, boolean checkpointOnClose, FileChannelCounter channelCounter,
File... logDirs)
throws IOException {
Preconditions.checkArgument(checkpointInterval > 0,
"checkpointInterval <= 0");
Preconditions.checkArgument(queueCapacity > 0, "queueCapacity <= 0");
Preconditions.checkArgument(maxFileSize > 0, "maxFileSize <= 0");
Preconditions.checkNotNull(checkpointDir, "checkpointDir");
Preconditions.checkArgument(usableSpaceRefreshInterval > 0,
"usableSpaceRefreshInterval <= 0");
Preconditions.checkArgument(
checkpointDir.isDirectory() || checkpointDir.mkdirs(), "CheckpointDir "
+ checkpointDir + " could not be created");
if (useDualCheckpoints) {
Preconditions.checkNotNull(backupCheckpointDir, "backupCheckpointDir is" +
" null while dual checkpointing is enabled.");
Preconditions.checkArgument(
backupCheckpointDir.isDirectory() || backupCheckpointDir.mkdirs(),
"Backup CheckpointDir " + backupCheckpointDir +
" could not be created");
}
Preconditions.checkNotNull(logDirs, "logDirs");
Preconditions.checkArgument(logDirs.length > 0, "logDirs empty");
Preconditions.checkArgument(name != null && !name.trim().isEmpty(),
"channel name should be specified");
Preconditions.checkNotNull(channelCounter, "ChannelCounter must be not null");
this.channelNameDescriptor = "[channel=" + name + "]";
this.useLogReplayV1 = useLogReplayV1;
this.useFastReplay = useFastReplay;
this.minimumRequiredSpace = minimumRequiredSpace;
this.usableSpaceRefreshInterval = usableSpaceRefreshInterval;
for (File logDir : logDirs) {
Preconditions.checkArgument(logDir.isDirectory() || logDir.mkdirs(),
"LogDir " + logDir + " could not be created");
}
locks = Maps.newHashMap();
try {
lock(checkpointDir);
if (useDualCheckpoints) {
lock(backupCheckpointDir);
}
for (File logDir : logDirs) {
lock(logDir);
}
} catch (IOException e) {
unlock(checkpointDir);
for (File logDir : logDirs) {
unlock(logDir);
}
throw e;
}
if (encryptionKeyProvider != null && encryptionKeyAlias != null &&
encryptionCipherProvider != null) {
LOGGER.info("Encryption is enabled with encryptionKeyProvider = " +
encryptionKeyProvider + ", encryptionKeyAlias = " + encryptionKeyAlias
+ ", encryptionCipherProvider = " + encryptionCipherProvider);
this.encryptionKeyProvider = encryptionKeyProvider;
this.encryptionKeyAlias = encryptionKeyAlias;
this.encryptionCipherProvider = encryptionCipherProvider;
this.encryptionKey = encryptionKeyProvider.getKey(encryptionKeyAlias);
} else if (encryptionKeyProvider == null && encryptionKeyAlias == null &&
encryptionCipherProvider == null) {
LOGGER.info("Encryption is not enabled");
} else {
throw new IllegalArgumentException("Encryption configuration must all " +
"null or all not null: encryptionKeyProvider = " +
encryptionKeyProvider + ", encryptionKeyAlias = " +
encryptionKeyAlias + ", encryptionCipherProvider = " +
encryptionCipherProvider);
}
open = false;
this.checkpointInterval = Math.max(checkpointInterval, 1000);
this.maxFileSize = maxFileSize;
this.queueCapacity = queueCapacity;
this.useDualCheckpoints = useDualCheckpoints;
this.compressBackupCheckpoint = compressBackupCheckpoint;
this.checkpointDir = checkpointDir;
this.backupCheckpointDir = backupCheckpointDir;
this.logDirs = logDirs;
this.fsyncPerTransaction = fsyncPerTransaction;
this.fsyncInterval = fsyncInterval;
this.checkpointOnClose = checkpointOnClose;
this.channelCounter = channelCounter;
logFiles = new AtomicReferenceArray<LogFile.Writer>(this.logDirs.length);
workerExecutor = Executors.newSingleThreadScheduledExecutor(new
ThreadFactoryBuilder().setNameFormat("Log-BackgroundWorker-" + name)
.build());
workerExecutor.scheduleWithFixedDelay(new BackgroundWorker(this),
this.checkpointInterval, this.checkpointInterval,
TimeUnit.MILLISECONDS);
}
/**
* Read checkpoint and data files from disk replaying them to the state
* directly before the shutdown or crash.
*
* @throws IOException
*/
void replay() throws IOException {
Preconditions.checkState(!open, "Cannot replay after Log has been opened");
lockExclusive();
try {
/*
* First we are going to look through the data directories
* and find all log files. We will store the highest file id
* (at the end of the filename) we find and use that when we
* create additional log files.
*
* Also store up the list of files so we can replay them later.
*/
LOGGER.info("Replay started");
nextFileID.set(0);
List<File> dataFiles = Lists.newArrayList();
for (File logDir : logDirs) {
for (File file : LogUtils.getLogs(logDir)) {
int id = LogUtils.getIDForFile(file);
dataFiles.add(file);
nextFileID.set(Math.max(nextFileID.get(), id));
idLogFileMap.put(id, LogFileFactory.getRandomReader(new File(logDir,
PREFIX + id), encryptionKeyProvider, fsyncPerTransaction));
}
}
LOGGER.info("Found NextFileID " + nextFileID +
", from " + dataFiles);
/*
* sort the data files by file id so we can replay them by file id
* which should approximately give us sequential events
*/
LogUtils.sort(dataFiles);
boolean shouldFastReplay = this.useFastReplay;
/*
* Read the checkpoint (in memory queue) from one of two alternating
* locations. We will read the last one written to disk.
*/
File checkpointFile = new File(checkpointDir, "checkpoint");
if (shouldFastReplay) {
if (checkpointFile.exists()) {
LOGGER.debug("Disabling fast full replay because checkpoint " +
"exists: " + checkpointFile);
shouldFastReplay = false;
} else {
LOGGER.debug("Not disabling fast full replay because checkpoint " +
" does not exist: " + checkpointFile);
}
}
File inflightTakesFile = new File(checkpointDir, "inflighttakes");
File inflightPutsFile = new File(checkpointDir, "inflightputs");
File queueSetDir = new File(checkpointDir, QUEUE_SET);
EventQueueBackingStore backingStore = null;
try {
backingStore =
EventQueueBackingStoreFactory.get(checkpointFile,
backupCheckpointDir, queueCapacity, channelNameDescriptor,
channelCounter, true, this.useDualCheckpoints,
this.compressBackupCheckpoint);
queue = new FlumeEventQueue(backingStore, inflightTakesFile,
inflightPutsFile, queueSetDir);
LOGGER.info("Last Checkpoint " + new Date(checkpointFile.lastModified())
+ ", queue depth = " + queue.getSize());
/*
* We now have everything we need to actually replay the log files
* the queue, the timestamp the queue was written to disk, and
* the list of data files.
*
* This will throw if and only if checkpoint file was fine,
* but the inflights were not. If the checkpoint was bad, the backing
* store factory would have thrown.
*/
doReplay(queue, dataFiles, encryptionKeyProvider, shouldFastReplay);
} catch (BadCheckpointException ex) {
backupRestored = false;
if (useDualCheckpoints) {
LOGGER.warn("Checkpoint may not have completed successfully. "
+ "Restoring checkpoint and starting up.", ex);
if (EventQueueBackingStoreFile.backupExists(backupCheckpointDir)) {
backupRestored = EventQueueBackingStoreFile.restoreBackup(
checkpointDir, backupCheckpointDir);
}
}
if (!backupRestored) {
LOGGER.warn("Checkpoint may not have completed successfully. "
+ "Forcing full replay, this may take a while.", ex);
if (!Serialization.deleteAllFiles(checkpointDir, EXCLUDES)) {
throw new IOException("Could not delete files in checkpoint " +
"directory to recover from a corrupt or incomplete checkpoint");
}
}
backingStore = EventQueueBackingStoreFactory.get(
checkpointFile, backupCheckpointDir, queueCapacity,
channelNameDescriptor, channelCounter, true, useDualCheckpoints,
compressBackupCheckpoint);
queue = new FlumeEventQueue(backingStore, inflightTakesFile,
inflightPutsFile, queueSetDir);
// If the checkpoint was deleted due to BadCheckpointException, then
// trigger fast replay if the channel is configured to.
shouldFastReplay = this.useFastReplay;
doReplay(queue, dataFiles, encryptionKeyProvider, shouldFastReplay);
if (!shouldFastReplay) {
didFullReplayDueToBadCheckpointException = true;
}
}
for (int index = 0; index < logDirs.length; index++) {
LOGGER.info("Rolling " + logDirs[index]);
roll(index);
}
/*
* Now that we have replayed, write the current queue to disk
*/
writeCheckpoint(true);
open = true;
} catch (Exception ex) {
LOGGER.error("Failed to initialize Log on " + channelNameDescriptor, ex);
if (ex instanceof IOException) {
throw (IOException) ex;
}
Throwables.propagate(ex);
} finally {
unlockExclusive();
}
}
@SuppressWarnings("deprecation")
private void doReplay(FlumeEventQueue queue, List<File> dataFiles,
KeyProvider encryptionKeyProvider,
boolean useFastReplay) throws Exception {
CheckpointRebuilder rebuilder = new CheckpointRebuilder(dataFiles,
queue, fsyncPerTransaction);
if (useFastReplay && rebuilder.rebuild()) {
didFastReplay = true;
LOGGER.info("Fast replay successful.");
} else {
ReplayHandler replayHandler = new ReplayHandler(queue,
encryptionKeyProvider, fsyncPerTransaction);
if (useLogReplayV1) {
LOGGER.info("Replaying logs with v1 replay logic");
replayHandler.replayLogv1(dataFiles);
} else {
LOGGER.info("Replaying logs with v2 replay logic");
replayHandler.replayLog(dataFiles);
}
readCount = replayHandler.getReadCount();
putCount = replayHandler.getPutCount();
takeCount = replayHandler.getTakeCount();
rollbackCount = replayHandler.getRollbackCount();
committedCount = replayHandler.getCommitCount();
}
}
@VisibleForTesting
boolean didFastReplay() {
return didFastReplay;
}
@VisibleForTesting
public int getReadCount() {
return readCount;
}
@VisibleForTesting
public int getPutCount() {
return putCount;
}
@VisibleForTesting
public int getTakeCount() {
return takeCount;
}
@VisibleForTesting
public int getCommittedCount() {
return committedCount;
}
@VisibleForTesting
public int getRollbackCount() {
return rollbackCount;
}
/**
* Was a checkpoint backup used to replay?
*
* @return true if a checkpoint backup was used to replay.
*/
@VisibleForTesting
boolean backupRestored() {
return backupRestored;
}
@VisibleForTesting
boolean didFullReplayDueToBadCheckpointException() {
return didFullReplayDueToBadCheckpointException;
}
int getNextFileID() {
Preconditions.checkState(open, "Log is closed");
return nextFileID.get();
}
FlumeEventQueue getFlumeEventQueue() {
Preconditions.checkState(open, "Log is closed");
return queue;
}
/**
* Return the FlumeEvent for an event pointer. This method is
* non-transactional. It is assumed the client has obtained this
* FlumeEventPointer via FlumeEventQueue.
*
* @param pointer
* @return FlumeEventPointer
* @throws IOException
* @throws InterruptedException
*/
FlumeEvent get(FlumeEventPointer pointer) throws IOException,
InterruptedException, NoopRecordException, CorruptEventException {
Preconditions.checkState(open, "Log is closed");
int id = pointer.getFileID();
LogFile.RandomReader logFile = idLogFileMap.get(id);
Preconditions.checkNotNull(logFile, "LogFile is null for id " + id);
try {
return logFile.get(pointer.getOffset());
} catch (CorruptEventException ex) {
if (fsyncPerTransaction) {
open = false;
throw new IOException("Corrupt event found. Please run File Channel " +
"Integrity tool.", ex);
}
throw ex;
}
}
/**
* Log a put of an event
* <p>
* Synchronization not required as this method is atomic
*
* @param transactionID
* @param event
* @return FlumeEventPointer
* @throws IOException
*/
FlumeEventPointer put(long transactionID, Event event)
throws IOException {
Preconditions.checkState(open, "Log is closed");
FlumeEvent flumeEvent = new FlumeEvent(
event.getHeaders(), event.getBody());
Put put = new Put(transactionID, WriteOrderOracle.next(), flumeEvent);
ByteBuffer buffer = TransactionEventRecord.toByteBuffer(put);
int logFileIndex = nextLogWriter(transactionID);
long usableSpace = logFiles.get(logFileIndex).getUsableSpace();
long requiredSpace = minimumRequiredSpace + buffer.limit();
if (usableSpace <= requiredSpace) {
throw new IOException("Usable space exhausted, only " + usableSpace +
" bytes remaining, required " + requiredSpace + " bytes");
}
boolean error = true;
try {
try {
FlumeEventPointer ptr = logFiles.get(logFileIndex).put(buffer);
error = false;
return ptr;
} catch (LogFileRetryableIOException e) {
if (!open) {
throw e;
}
roll(logFileIndex, buffer);
FlumeEventPointer ptr = logFiles.get(logFileIndex).put(buffer);
error = false;
return ptr;
}
} finally {
if (error && open) {
roll(logFileIndex);
}
}
}
/**
* Log a take of an event, pointer points at the corresponding put
* <p>
* Synchronization not required as this method is atomic
*
* @param transactionID
* @param pointer
* @throws IOException
*/
void take(long transactionID, FlumeEventPointer pointer)
throws IOException {
Preconditions.checkState(open, "Log is closed");
Take take = new Take(transactionID, WriteOrderOracle.next(),
pointer.getOffset(), pointer.getFileID());
ByteBuffer buffer = TransactionEventRecord.toByteBuffer(take);
int logFileIndex = nextLogWriter(transactionID);
long usableSpace = logFiles.get(logFileIndex).getUsableSpace();
long requiredSpace = minimumRequiredSpace + buffer.limit();
if (usableSpace <= requiredSpace) {
throw new IOException("Usable space exhausted, only " + usableSpace +
" bytes remaining, required " + requiredSpace + " bytes");
}
boolean error = true;
try {
try {
logFiles.get(logFileIndex).take(buffer);
error = false;
} catch (LogFileRetryableIOException e) {
if (!open) {
throw e;
}
roll(logFileIndex, buffer);
logFiles.get(logFileIndex).take(buffer);
error = false;
}
} finally {
if (error && open) {
roll(logFileIndex);
}
}
}
/**
* Log a rollback of a transaction
* <p>
* Synchronization not required as this method is atomic
*
* @param transactionID
* @throws IOException
*/
void rollback(long transactionID) throws IOException {
Preconditions.checkState(open, "Log is closed");
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Rolling back " + transactionID);
}
Rollback rollback = new Rollback(transactionID, WriteOrderOracle.next());
ByteBuffer buffer = TransactionEventRecord.toByteBuffer(rollback);
int logFileIndex = nextLogWriter(transactionID);
long usableSpace = logFiles.get(logFileIndex).getUsableSpace();
long requiredSpace = minimumRequiredSpace + buffer.limit();
if (usableSpace <= requiredSpace) {
throw new IOException("Usable space exhausted, only " + usableSpace +
" bytes remaining, required " + requiredSpace + " bytes");
}
boolean error = true;
try {
try {
logFiles.get(logFileIndex).rollback(buffer);
error = false;
} catch (LogFileRetryableIOException e) {
if (!open) {
throw e;
}
roll(logFileIndex, buffer);
logFiles.get(logFileIndex).rollback(buffer);
error = false;
}
} finally {
if (error && open) {
roll(logFileIndex);
}
}
}
/**
* Log commit of put, we need to know which type of commit
* so we know if the pointers corresponding to the events
* should be added or removed from the flume queue. We
* could infer but it's best to be explicit.
* <p>
* Synchronization not required as this method is atomic
*
* @param transactionID
* @throws IOException
* @throws InterruptedException
*/
void commitPut(long transactionID) throws IOException,
InterruptedException {
Preconditions.checkState(open, "Log is closed");
commit(transactionID, TransactionEventRecord.Type.PUT.get());
}
/**
* Log commit of take, we need to know which type of commit
* so we know if the pointers corresponding to the events
* should be added or removed from the flume queue. We
* could infer but it's best to be explicit.
* <p>
* Synchronization not required as this method is atomic
*
* @param transactionID
* @throws IOException
* @throws InterruptedException
*/
void commitTake(long transactionID) throws IOException,
InterruptedException {
Preconditions.checkState(open, "Log is closed");
commit(transactionID, TransactionEventRecord.Type.TAKE.get());
}
private void unlockExclusive() {
checkpointWriterLock.unlock();
}
void lockShared() {
checkpointReadLock.lock();
}
void unlockShared() {
checkpointReadLock.unlock();
}
private void lockExclusive() {
checkpointWriterLock.lock();
}
/**
* Synchronization not required since this method gets the write lock,
* so checkpoint and this method cannot run at the same time.
*/
void close() throws IOException {
lockExclusive();
try {
open = false;
try {
if (checkpointOnClose) {
writeCheckpoint(true); // do this before acquiring exclusive lock
}
} catch (Exception err) {
LOGGER.warn("Failed creating checkpoint on close of channel " + channelNameDescriptor +
"Replay will take longer next time channel is started.", err);
}
shutdownWorker();
if (logFiles != null) {
for (int index = 0; index < logFiles.length(); index++) {
LogFile.Writer writer = logFiles.get(index);
if (writer != null) {
writer.close();
}
}
}
synchronized (idLogFileMap) {
for (Integer logId : idLogFileMap.keySet()) {
LogFile.RandomReader reader = idLogFileMap.get(logId);
if (reader != null) {
reader.close();
}
}
}
queue.close();
try {
unlock(checkpointDir);
} catch (IOException ex) {
LOGGER.warn("Error unlocking " + checkpointDir, ex);
}
if (useDualCheckpoints) {
try {
unlock(backupCheckpointDir);
} catch (IOException ex) {
LOGGER.warn("Error unlocking " + checkpointDir, ex);
}
}
for (File logDir : logDirs) {
try {
unlock(logDir);
} catch (IOException ex) {
LOGGER.warn("Error unlocking " + logDir, ex);
}
}
} finally {
unlockExclusive();
}
}
void shutdownWorker() {
String msg = "Attempting to shutdown background worker.";
System.out.println(msg);
LOGGER.info(msg);
workerExecutor.shutdown();
try {
workerExecutor.awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
LOGGER.error("Interrupted while waiting for worker to die.");
}
}
void setCheckpointInterval(long checkpointInterval) {
this.checkpointInterval = checkpointInterval;
}
void setMaxFileSize(long maxFileSize) {
this.maxFileSize = maxFileSize;
}
/**
* Synchronization not required as this method is atomic
*
* @param transactionID
* @param type
* @throws IOException
*/
private void commit(long transactionID, short type) throws IOException {
Preconditions.checkState(open, "Log is closed");
Commit commit = new Commit(transactionID, WriteOrderOracle.next(), type);
ByteBuffer buffer = TransactionEventRecord.toByteBuffer(commit);
int logFileIndex = nextLogWriter(transactionID);
long usableSpace = logFiles.get(logFileIndex).getUsableSpace();
long requiredSpace = minimumRequiredSpace + buffer.limit();
if (usableSpace <= requiredSpace) {
throw new IOException("Usable space exhausted, only " + usableSpace +
" bytes remaining, required " + requiredSpace + " bytes");
}
boolean error = true;
try {
try {
LogFile.Writer logFileWriter = logFiles.get(logFileIndex);
// If multiple transactions are committing at the same time,
// this ensures that the number of actual fsyncs is small and a
// number of them are grouped together into one.
logFileWriter.commit(buffer);
logFileWriter.sync();
error = false;
} catch (LogFileRetryableIOException e) {
if (!open) {
throw e;
}
roll(logFileIndex, buffer);
LogFile.Writer logFileWriter = logFiles.get(logFileIndex);
logFileWriter.commit(buffer);
logFileWriter.sync();
error = false;
}
} finally {
if (error && open) {
roll(logFileIndex);
}
}
}
/**
* Atomic so not synchronization required.
*
* @return
*/
private int nextLogWriter(long transactionID) {
return (int) Math.abs(transactionID % (long) logFiles.length());
}
/**
* Unconditionally roll
* Synchronization done internally
*
* @param index
* @throws IOException
*/
private void roll(int index) throws IOException {
roll(index, null);
}
/**
* Roll a log if needed. Roll always occurs if the log at the index
* does not exist (typically on startup), or buffer is null. Otherwise
* LogFile.Writer.isRollRequired is checked again to ensure we don't
* have threads pile up on this log resulting in multiple successive
* rolls
* <p>
* Synchronization required since both synchronized and unsynchronized
* methods call this method, and this method acquires only a
* read lock. The synchronization guarantees that multiple threads don't
* roll at the same time.
*
* @param index
* @throws IOException
*/
private synchronized void roll(int index, ByteBuffer buffer)
throws IOException {
lockShared();
try {
LogFile.Writer oldLogFile = logFiles.get(index);
// check to make sure a roll is actually required due to
// the possibility of multiple writes waiting on lock
if (oldLogFile == null || buffer == null ||
oldLogFile.isRollRequired(buffer)) {
try {
LOGGER.info("Roll start " + logDirs[index]);
int fileID = nextFileID.incrementAndGet();
File file = new File(logDirs[index], PREFIX + fileID);
LogFile.Writer writer = LogFileFactory.getWriter(file, fileID,
maxFileSize, encryptionKey, encryptionKeyAlias,
encryptionCipherProvider, usableSpaceRefreshInterval,
fsyncPerTransaction, fsyncInterval);
idLogFileMap.put(fileID, LogFileFactory.getRandomReader(file,
encryptionKeyProvider, fsyncPerTransaction));
// writer from this point on will get new reference
logFiles.set(index, writer);
// close out old log
if (oldLogFile != null) {
oldLogFile.close();
}
} finally {
LOGGER.info("Roll end");
}
}
} finally {
unlockShared();
}
}
private boolean writeCheckpoint() throws Exception {
return writeCheckpoint(false);
}
/**
* Write the current checkpoint object and then swap objects so that
* the next checkpoint occurs on the other checkpoint directory.
* <p>
* Synchronization is not required because this method acquires a
* write lock. So this method gets exclusive access to all the
* data structures this method accesses.
*
* @param force a flag to force the writing of checkpoint
* @throws IOException if we are unable to write the checkpoint out to disk
*/
private Boolean writeCheckpoint(Boolean force) throws Exception {
boolean checkpointCompleted = false;
long usableSpace = checkpointDir.getUsableSpace();
if (usableSpace <= minimumRequiredSpace) {
throw new IOException("Usable space exhausted, only " + usableSpace +
" bytes remaining, required " + minimumRequiredSpace + " bytes");
}
lockExclusive();
SortedSet<Integer> logFileRefCountsAll = null;
SortedSet<Integer> logFileRefCountsActive = null;
try {
if (queue.checkpoint(force)) {
long logWriteOrderID = queue.getLogWriteOrderID();
//Since the active files might also be in the queue's fileIDs,
//we need to either move each one to a new set or remove each one
//as we do here. Otherwise we cannot make sure every element in
//fileID set from the queue have been updated.
//Since clone is smarter than insert, better to make
//a copy of the set first so that we can use it later.
logFileRefCountsAll = queue.getFileIDs();
logFileRefCountsActive = new TreeSet<Integer>(logFileRefCountsAll);
int numFiles = logFiles.length();
for (int i = 0; i < numFiles; i++) {
LogFile.Writer logWriter = logFiles.get(i);
int logFileID = logWriter.getLogFileID();
File logFile = logWriter.getFile();
LogFile.MetaDataWriter writer =
LogFileFactory.getMetaDataWriter(logFile, logFileID);
try {
writer.markCheckpoint(logWriter.position(), logWriteOrderID);
} finally {
writer.close();
}
logFileRefCountsAll.remove(logFileID);
LOGGER.info("Updated checkpoint for file: " + logFile + " position: "
+ logWriter.position() + " logWriteOrderID: " + logWriteOrderID);
}
// Update any inactive data files as well
Iterator<Integer> idIterator = logFileRefCountsAll.iterator();
while (idIterator.hasNext()) {
int id = idIterator.next();
LogFile.RandomReader reader = idLogFileMap.remove(id);
File file = reader.getFile();
reader.close();
LogFile.MetaDataWriter writer =
LogFileFactory.getMetaDataWriter(file, id);
try {
writer.markCheckpoint(logWriteOrderID);
} finally {
reader = LogFileFactory.getRandomReader(file,
encryptionKeyProvider, fsyncPerTransaction);
idLogFileMap.put(id, reader);
writer.close();
}
LOGGER.debug("Updated checkpoint for file: " + file
+ "logWriteOrderID " + logWriteOrderID);
idIterator.remove();
}
Preconditions.checkState(logFileRefCountsAll.size() == 0,
"Could not update all data file timestamps: " + logFileRefCountsAll);
//Add files from all log directories
for (int index = 0; index < logDirs.length; index++) {
logFileRefCountsActive.add(logFiles.get(index).getLogFileID());
}
checkpointCompleted = true;
}
} finally {
unlockExclusive();
}
//Do the deletes outside the checkpointWriterLock
//Delete logic is expensive.
if (open && checkpointCompleted) {
removeOldLogs(logFileRefCountsActive);
}
//Since the exception is not caught, this will not be returned if
//an exception is thrown from the try.
return true;
}
private void removeOldLogs(SortedSet<Integer> fileIDs) {
Preconditions.checkState(open, "Log is closed");
// To maintain a single code path for deletes, if backup of checkpoint is
// enabled or not, we will track the files which can be deleted after the
// current checkpoint (since the one which just got backed up still needs
// these files) and delete them only after the next (since the current
// checkpoint will become the backup at that time,
// and thus these files are no longer needed).
for (File fileToDelete : pendingDeletes) {
LOGGER.info("Removing old file: " + fileToDelete);
FileUtils.deleteQuietly(fileToDelete);
}
pendingDeletes.clear();
// we will find the smallest fileID currently in use and
// won't delete any files with an id larger than the min
int minFileID = fileIDs.first();
LOGGER.debug("Files currently in use: " + fileIDs);
for (File logDir : logDirs) {
List<File> logs = LogUtils.getLogs(logDir);
// sort oldset to newest
LogUtils.sort(logs);
// ensure we always keep two logs per dir
int size = logs.size() - MIN_NUM_LOGS;
for (int index = 0; index < size; index++) {
File logFile = logs.get(index);
int logFileID = LogUtils.getIDForFile(logFile);
if (logFileID < minFileID) {
LogFile.RandomReader reader = idLogFileMap.remove(logFileID);
if (reader != null) {
reader.close();
}
File metaDataFile = Serialization.getMetaDataFile(logFile);
pendingDeletes.add(logFile);
pendingDeletes.add(metaDataFile);
}
}
}
}
/**
* Lock storage to provide exclusive access.
* <p>
* <p> Locking is not supported by all file systems.
* E.g., NFS does not consistently support exclusive locks.
* <p>
* <p> If locking is supported we guarantee exclusive access to the
* storage directory. Otherwise, no guarantee is given.
*
* @throws IOException if locking fails
*/
private void lock(File dir) throws IOException {
FileLock lock = tryLock(dir);
if (lock == null) {
String msg = "Cannot lock " + dir
+ ". The directory is already locked. "
+ channelNameDescriptor;
LOGGER.info(msg);
throw new IOException(msg);
}
FileLock secondLock = tryLock(dir);
if (secondLock != null) {
LOGGER.warn("Directory " + dir + " does not support locking");
secondLock.release();
secondLock.channel().close();
}
locks.put(dir.getAbsolutePath(), lock);
}
/**
* Attempts to acquire an exclusive lock on the directory.
*
* @return A lock object representing the newly-acquired lock or
* <code>null</code> if directory is already locked.
* @throws IOException if locking fails.
*/
@SuppressWarnings("resource")
private FileLock tryLock(File dir) throws IOException {
File lockF = new File(dir, FILE_LOCK);
lockF.deleteOnExit();
RandomAccessFile file = new RandomAccessFile(lockF, "rws");
FileLock res = null;
try {
res = file.getChannel().tryLock();
} catch (OverlappingFileLockException oe) {
file.close();
return null;
} catch (IOException e) {
LOGGER.error("Cannot create lock on " + lockF, e);
file.close();
throw e;
}
return res;
}
/**
* Unlock directory.
*
* @throws IOException
*/
private void unlock(File dir) throws IOException {
FileLock lock = locks.remove(dir.getAbsolutePath());
if (lock == null) {
return;
}
lock.release();
lock.channel().close();
lock = null;
}
static class BackgroundWorker implements Runnable {
private static final Logger LOG = LoggerFactory
.getLogger(BackgroundWorker.class);
private final Log log;
public BackgroundWorker(Log log) {
this.log = log;
}
@Override
public void run() {
try {
if (log.open) {
log.writeCheckpoint();
}
} catch (IOException e) {
log.channelCounter.incrementCheckpointWriteErrorCount();
LOG.error("Error doing checkpoint", e);
} catch (Throwable e) {
log.channelCounter.incrementCheckpointWriteErrorCount();
LOG.error("General error in checkpoint worker", e);
}
}
}
}
| 9,740 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/TransactionIDOracle.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.util.concurrent.atomic.AtomicLong;
public final class TransactionIDOracle {
private TransactionIDOracle() {
}
private static final AtomicLong TRANSACTION_ID =
new AtomicLong(System.currentTimeMillis());
public static void setSeed(long highest) {
long previous;
while (highest > (previous = TRANSACTION_ID.get())) {
TRANSACTION_ID.compareAndSet(previous, highest);
}
}
public static long next() {
return TRANSACTION_ID.incrementAndGet();
}
}
| 9,741 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/LogFileV3.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.base.Preconditions;
import com.google.protobuf.ByteString;
import com.google.protobuf.GeneratedMessage;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.apache.flume.channel.file.encryption.CipherProvider;
import org.apache.flume.channel.file.encryption.CipherProviderFactory;
import org.apache.flume.channel.file.encryption.DecryptionFailureException;
import org.apache.flume.channel.file.encryption.KeyProvider;
import org.apache.flume.channel.file.proto.ProtosFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.security.Key;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingDeque;
/**
* Represents a single data file on disk. Has methods to write,
* read sequentially (replay), and read randomly (channel takes).
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class LogFileV3 extends LogFile {
protected static final Logger LOGGER =
LoggerFactory.getLogger(LogFileV3.class);
private LogFileV3() {
}
static class MetaDataWriter extends LogFile.MetaDataWriter {
private ProtosFactory.LogFileMetaData logFileMetaData;
private final File metaDataFile;
protected MetaDataWriter(File logFile, int logFileID) throws IOException {
super(logFile, logFileID);
metaDataFile = Serialization.getMetaDataFile(logFile);
MetaDataReader metaDataReader = new MetaDataReader(logFile, logFileID);
logFileMetaData = metaDataReader.read();
int version = logFileMetaData.getVersion();
if (version != getVersion()) {
throw new IOException("Version is " + Integer.toHexString(version) +
" expected " + Integer.toHexString(getVersion())
+ " file: " + logFile);
}
setLastCheckpointOffset(logFileMetaData.getCheckpointPosition());
setLastCheckpointWriteOrderID(logFileMetaData.getCheckpointWriteOrderID());
}
@Override
int getVersion() {
return Serialization.VERSION_3;
}
@Override
void markCheckpoint(long currentPosition, long logWriteOrderID)
throws IOException {
ProtosFactory.LogFileMetaData.Builder metaDataBuilder =
ProtosFactory.LogFileMetaData.newBuilder(logFileMetaData);
metaDataBuilder.setCheckpointPosition(currentPosition);
metaDataBuilder.setCheckpointWriteOrderID(logWriteOrderID);
/*
* Set the previous checkpoint position and write order id so that it
* would be possible to recover from a backup.
*/
metaDataBuilder.setBackupCheckpointPosition(logFileMetaData
.getCheckpointPosition());
metaDataBuilder.setBackupCheckpointWriteOrderID(logFileMetaData
.getCheckpointWriteOrderID());
logFileMetaData = metaDataBuilder.build();
writeDelimitedTo(logFileMetaData, metaDataFile);
}
}
static class MetaDataReader {
private final File logFile;
private final File metaDataFile;
private final int logFileID;
protected MetaDataReader(File logFile, int logFileID) throws IOException {
this.logFile = logFile;
metaDataFile = Serialization.getMetaDataFile(logFile);
this.logFileID = logFileID;
}
ProtosFactory.LogFileMetaData read() throws IOException {
FileInputStream inputStream = new FileInputStream(metaDataFile);
try {
ProtosFactory.LogFileMetaData metaData = Preconditions.checkNotNull(
ProtosFactory.LogFileMetaData.parseDelimitedFrom(inputStream),
"Metadata cannot be null");
if (metaData.getLogFileID() != logFileID) {
throw new IOException("The file id of log file: "
+ logFile + " is different from expected "
+ " id: expected = " + logFileID + ", found = "
+ metaData.getLogFileID());
}
return metaData;
} finally {
try {
inputStream.close();
} catch (IOException e) {
LOGGER.warn("Unable to close " + metaDataFile, e);
}
}
}
}
/**
* Writes a GeneratedMessage to a temp file, synchronizes it to disk
* and then renames the file over file.
*
* @param msg GeneratedMessage to write to the file
* @param file destination file
* @throws IOException if a write error occurs or the File.renameTo
* method returns false meaning the file could not be overwritten.
*/
public static void writeDelimitedTo(GeneratedMessage msg, File file)
throws IOException {
File tmp = Serialization.getMetaDataTempFile(file);
FileOutputStream outputStream = new FileOutputStream(tmp);
boolean closed = false;
try {
msg.writeDelimitedTo(outputStream);
outputStream.getChannel().force(true);
outputStream.close();
closed = true;
if (!tmp.renameTo(file)) {
//Some platforms don't support moving over an existing file.
//So:
//log.meta -> log.meta.old
//log.meta.tmp -> log.meta
//delete log.meta.old
File oldFile = Serialization.getOldMetaDataFile(file);
if (!file.renameTo(oldFile)) {
throw new IOException("Unable to rename " + file + " to " + oldFile);
}
if (!tmp.renameTo(file)) {
throw new IOException("Unable to rename " + tmp + " over " + file);
}
oldFile.delete();
}
} finally {
if (!closed) {
try {
outputStream.close();
} catch (IOException e) {
LOGGER.warn("Unable to close " + tmp, e);
}
}
}
}
static class Writer extends LogFile.Writer {
Writer(File file, int logFileID, long maxFileSize,
@Nullable Key encryptionKey,
@Nullable String encryptionKeyAlias,
@Nullable String encryptionCipherProvider,
long usableSpaceRefreshInterval, boolean fsyncPerTransaction,
int fsyncInterval) throws IOException {
super(file, logFileID, maxFileSize,
CipherProviderFactory.getEncrypter(encryptionCipherProvider, encryptionKey),
usableSpaceRefreshInterval, fsyncPerTransaction, fsyncInterval);
ProtosFactory.LogFileMetaData.Builder metaDataBuilder =
ProtosFactory.LogFileMetaData.newBuilder();
if (encryptionKey != null) {
Preconditions.checkNotNull(encryptionKeyAlias, "encryptionKeyAlias");
Preconditions.checkNotNull(encryptionCipherProvider,
"encryptionCipherProvider");
ProtosFactory.LogFileEncryption.Builder logFileEncryptionBuilder =
ProtosFactory.LogFileEncryption.newBuilder();
logFileEncryptionBuilder.setCipherProvider(encryptionCipherProvider);
logFileEncryptionBuilder.setKeyAlias(encryptionKeyAlias);
logFileEncryptionBuilder.setParameters(
ByteString.copyFrom(getEncryptor().getParameters()));
metaDataBuilder.setEncryption(logFileEncryptionBuilder);
}
metaDataBuilder.setVersion(getVersion());
metaDataBuilder.setLogFileID(logFileID);
metaDataBuilder.setCheckpointPosition(0L);
metaDataBuilder.setCheckpointWriteOrderID(0L);
metaDataBuilder.setBackupCheckpointPosition(0L);
metaDataBuilder.setBackupCheckpointWriteOrderID(0L);
File metaDataFile = Serialization.getMetaDataFile(file);
writeDelimitedTo(metaDataBuilder.build(), metaDataFile);
}
@Override
int getVersion() {
return Serialization.VERSION_3;
}
}
static class RandomReader extends LogFile.RandomReader {
private volatile boolean initialized;
private volatile boolean encryptionEnabled;
private volatile Key key;
private volatile String cipherProvider;
private volatile byte[] parameters;
private BlockingQueue<CipherProvider.Decryptor> decryptors =
new LinkedBlockingDeque<CipherProvider.Decryptor>();
RandomReader(File file, @Nullable KeyProvider encryptionKeyProvider,
boolean fsyncPerTransaction) throws IOException {
super(file, encryptionKeyProvider, fsyncPerTransaction);
}
private void initialize() throws IOException {
File metaDataFile = Serialization.getMetaDataFile(getFile());
FileInputStream inputStream = new FileInputStream(metaDataFile);
try {
ProtosFactory.LogFileMetaData metaData = Preconditions.checkNotNull(
ProtosFactory.LogFileMetaData.parseDelimitedFrom(inputStream),
"MetaData cannot be null");
int version = metaData.getVersion();
if (version != getVersion()) {
throw new IOException("Version is " + Integer.toHexString(version) +
" expected " + Integer.toHexString(getVersion())
+ " file: " + getFile().getCanonicalPath());
}
encryptionEnabled = false;
if (metaData.hasEncryption()) {
if (getKeyProvider() == null) {
throw new IllegalStateException("Data file is encrypted but no " +
" provider was specified");
}
ProtosFactory.LogFileEncryption encryption = metaData.getEncryption();
key = getKeyProvider().getKey(encryption.getKeyAlias());
cipherProvider = encryption.getCipherProvider();
parameters = encryption.getParameters().toByteArray();
encryptionEnabled = true;
}
} finally {
try {
inputStream.close();
} catch (IOException e) {
LOGGER.warn("Unable to close " + metaDataFile, e);
}
}
}
private CipherProvider.Decryptor getDecryptor() {
CipherProvider.Decryptor decryptor = decryptors.poll();
if (decryptor == null) {
decryptor = CipherProviderFactory.getDecrypter(cipherProvider, key,
parameters);
}
return decryptor;
}
@Override
int getVersion() {
return Serialization.VERSION_3;
}
@Override
protected TransactionEventRecord doGet(RandomAccessFile fileHandle)
throws IOException, CorruptEventException {
// readers are opened right when the file is created and thus
// empty. As such we wait to initialize until there is some
// data before we we initialize
synchronized (this) {
if (!initialized) {
initialized = true;
initialize();
}
}
boolean success = false;
CipherProvider.Decryptor decryptor = null;
try {
byte[] buffer = readDelimitedBuffer(fileHandle);
if (encryptionEnabled) {
decryptor = getDecryptor();
buffer = decryptor.decrypt(buffer);
}
TransactionEventRecord event = TransactionEventRecord.fromByteArray(buffer);
success = true;
return event;
} catch (DecryptionFailureException ex) {
throw new CorruptEventException("Error decrypting event", ex);
} finally {
if (success && encryptionEnabled && decryptor != null) {
decryptors.offer(decryptor);
}
}
}
}
public static class SequentialReader extends LogFile.SequentialReader {
private CipherProvider.Decryptor decryptor;
private final boolean fsyncPerTransaction;
public SequentialReader(File file, @Nullable KeyProvider
encryptionKeyProvider, boolean fsyncPerTransaction) throws EOFException,
IOException {
super(file, encryptionKeyProvider);
this.fsyncPerTransaction = fsyncPerTransaction;
File metaDataFile = Serialization.getMetaDataFile(file);
FileInputStream inputStream = new FileInputStream(metaDataFile);
try {
ProtosFactory.LogFileMetaData metaData = Preconditions.checkNotNull(
ProtosFactory.LogFileMetaData.parseDelimitedFrom(inputStream),
"MetaData cannot be null");
int version = metaData.getVersion();
if (version != getVersion()) {
throw new IOException("Version is " + Integer.toHexString(version) +
" expected " + Integer.toHexString(getVersion())
+ " file: " + file.getCanonicalPath());
}
if (metaData.hasEncryption()) {
if (getKeyProvider() == null) {
throw new IllegalStateException("Data file is encrypted but no " +
" provider was specified");
}
ProtosFactory.LogFileEncryption encryption = metaData.getEncryption();
Key key = getKeyProvider().getKey(encryption.getKeyAlias());
decryptor = CipherProviderFactory.getDecrypter(
encryption.getCipherProvider(), key, encryption.getParameters().toByteArray());
}
setLogFileID(metaData.getLogFileID());
setLastCheckpointPosition(metaData.getCheckpointPosition());
setLastCheckpointWriteOrderID(metaData.getCheckpointWriteOrderID());
setPreviousCheckpointPosition(metaData.getBackupCheckpointPosition());
setPreviousCheckpointWriteOrderID(
metaData.getBackupCheckpointWriteOrderID());
} finally {
try {
inputStream.close();
} catch (IOException e) {
LOGGER.warn("Unable to close " + metaDataFile, e);
}
}
}
@Override
public int getVersion() {
return Serialization.VERSION_3;
}
@Override
LogRecord doNext(int offset) throws IOException, CorruptEventException,
DecryptionFailureException {
byte[] buffer = null;
TransactionEventRecord event = null;
try {
buffer = readDelimitedBuffer(getFileHandle());
if (decryptor != null) {
buffer = decryptor.decrypt(buffer);
}
event = TransactionEventRecord.fromByteArray(buffer);
} catch (CorruptEventException ex) {
LOGGER.warn("Corrupt file found. File id: log-" + this.getLogFileID(),
ex);
// Return null so that replay handler thinks all events in this file
// have been taken.
if (!fsyncPerTransaction) {
return null;
}
throw ex;
} catch (DecryptionFailureException ex) {
if (!fsyncPerTransaction) {
LOGGER.warn("Could not decrypt even read from channel. Skipping " +
"event.", ex);
return null;
}
throw ex;
}
return new LogRecord(getLogFileID(), offset, event);
}
}
}
| 9,742 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/Put.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Map;
import java.util.zip.CRC32;
import java.util.zip.Checksum;
import com.google.common.annotations.VisibleForTesting;
import org.apache.flume.channel.file.proto.ProtosFactory;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.protobuf.ByteString;
/**
* Represents a Put on disk
*/
class Put extends TransactionEventRecord {
private FlumeEvent event;
// Should we move this to a higher level to not make multiple instances?
// Doing that might cause performance issues, since access to this would
// need to be synchronized (the whole reset-update-getValue cycle would
// need to be).
private final Checksum checksum = new CRC32();
@VisibleForTesting
Put(Long transactionID, Long logWriteOrderID) {
this(transactionID, logWriteOrderID, null);
}
Put(Long transactionID, Long logWriteOrderID, FlumeEvent event) {
super(transactionID, logWriteOrderID);
this.event = event;
}
FlumeEvent getEvent() {
return event;
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
event = FlumeEvent.from(in);
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
event.write(out);
}
@Override
void writeProtos(OutputStream out) throws IOException {
ProtosFactory.Put.Builder putBuilder = ProtosFactory.Put.newBuilder();
ProtosFactory.FlumeEvent.Builder eventBuilder =
ProtosFactory.FlumeEvent.newBuilder();
Map<String, String> headers = event.getHeaders();
ProtosFactory.FlumeEventHeader.Builder headerBuilder =
ProtosFactory.FlumeEventHeader.newBuilder();
if (headers != null) {
for (String key : headers.keySet()) {
String value = headers.get(key);
headerBuilder.clear();
headerBuilder.setKey(key);
if (value != null) {
headerBuilder.setValue(value);
}
eventBuilder.addHeaders(headerBuilder.build());
}
}
eventBuilder.setBody(ByteString.copyFrom(event.getBody()));
ProtosFactory.FlumeEvent protoEvent = eventBuilder.build();
putBuilder.setEvent(protoEvent);
putBuilder.setChecksum(calculateChecksum(event.getBody()));
putBuilder.build().writeDelimitedTo(out);
}
@Override
void readProtos(InputStream in) throws IOException, CorruptEventException {
ProtosFactory.Put put = Preconditions.checkNotNull(
ProtosFactory.Put.parseDelimitedFrom(in), "Put cannot be null");
Map<String, String> headers = Maps.newHashMap();
ProtosFactory.FlumeEvent protosEvent = put.getEvent();
for (ProtosFactory.FlumeEventHeader header : protosEvent.getHeadersList()) {
headers.put(header.getKey(), header.getValue());
}
byte[] eventBody = protosEvent.getBody().toByteArray();
if (put.hasChecksum()) {
long eventBodyChecksum = calculateChecksum(eventBody);
if (eventBodyChecksum != put.getChecksum()) {
throw new CorruptEventException("Expected checksum for event was " +
eventBodyChecksum + " but the checksum of the event is " + put.getChecksum());
}
}
// TODO when we remove v2, remove FlumeEvent and use EventBuilder here
event = new FlumeEvent(headers, eventBody);
}
protected long calculateChecksum(byte[] body) {
checksum.reset();
checksum.update(body, 0, body.length);
return checksum.getValue();
}
@Override
public short getRecordType() {
return Type.PUT.get();
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("Put [event=");
builder.append(event);
builder.append(", getLogWriteOrderID()=");
builder.append(getLogWriteOrderID());
builder.append(", getTransactionID()=");
builder.append(getTransactionID());
builder.append("]");
return builder.toString();
}
}
| 9,743 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/BadCheckpointException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.channel.file;
import org.apache.flume.FlumeException;
/**
* Exception thrown when the checkpoint directory contains invalid data,
* probably due to the channel stopping while the checkpoint was written.
*/
public class BadCheckpointException extends FlumeException {
private static final long serialVersionUID = -5038652693746472779L;
public BadCheckpointException(String msg) {
super(msg);
}
public BadCheckpointException(String msg, Throwable t) {
super(msg, t);
}
}
| 9,744 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/EventQueueBackingStoreFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.io.Files;
import org.apache.flume.channel.file.instrumentation.FileChannelCounter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
class EventQueueBackingStoreFactory {
private static final Logger LOG = LoggerFactory.getLogger(EventQueueBackingStoreFactory.class);
private EventQueueBackingStoreFactory() {
}
static EventQueueBackingStore get(
File checkpointFile, int capacity, String name, FileChannelCounter counter
) throws Exception {
return get(checkpointFile, capacity, name, counter, true);
}
static EventQueueBackingStore get(
File checkpointFile, int capacity, String name, FileChannelCounter counter, boolean upgrade
) throws Exception {
return get(checkpointFile, null, capacity, name, counter, upgrade, false, false);
}
static EventQueueBackingStore get(
File checkpointFile, File backupCheckpointDir, int capacity, String name,
FileChannelCounter counter, boolean upgrade, boolean shouldBackup, boolean compressBackup
) throws Exception {
File metaDataFile = Serialization.getMetaDataFile(checkpointFile);
RandomAccessFile checkpointFileHandle = null;
try {
boolean checkpointExists = checkpointFile.exists();
boolean metaDataExists = metaDataFile.exists();
if (metaDataExists) {
// if we have a metadata file but no checkpoint file, we have a problem
// delete everything in the checkpoint directory and force
// a full replay.
if (!checkpointExists || checkpointFile.length() == 0) {
LOG.warn("MetaData file for checkpoint "
+ " exists but checkpoint does not. Checkpoint = " + checkpointFile
+ ", metaDataFile = " + metaDataFile);
throw new BadCheckpointException(
"The last checkpoint was not completed correctly, " +
"since Checkpoint file does not exist while metadata " +
"file does.");
}
}
// brand new, use v3
if (!checkpointExists) {
if (!checkpointFile.createNewFile()) {
throw new IOException("Cannot create " + checkpointFile);
}
return new EventQueueBackingStoreFileV3(checkpointFile,
capacity, name, counter, backupCheckpointDir, shouldBackup, compressBackup);
}
// v3 due to meta file, version will be checked by backing store
if (metaDataExists) {
return new EventQueueBackingStoreFileV3(checkpointFile, capacity,
name, counter, backupCheckpointDir, shouldBackup, compressBackup);
}
checkpointFileHandle = new RandomAccessFile(checkpointFile, "r");
int version = (int) checkpointFileHandle.readLong();
if (Serialization.VERSION_2 == version) {
if (upgrade) {
return upgrade(checkpointFile, capacity, name, backupCheckpointDir,
shouldBackup, compressBackup, counter);
}
return new EventQueueBackingStoreFileV2(checkpointFile, capacity, name, counter);
}
LOG.error("Found version " + Integer.toHexString(version) + " in " +
checkpointFile);
throw new BadCheckpointException("Checkpoint file exists with " +
Serialization.VERSION_3 + " but no metadata file found.");
} finally {
if (checkpointFileHandle != null) {
try {
checkpointFileHandle.close();
} catch (IOException e) {
LOG.warn("Unable to close " + checkpointFile, e);
}
}
}
}
private static EventQueueBackingStore upgrade(
File checkpointFile, int capacity, String name, File backupCheckpointDir,
boolean shouldBackup, boolean compressBackup, FileChannelCounter counter
) throws Exception {
LOG.info("Attempting upgrade of " + checkpointFile + " for " + name);
EventQueueBackingStoreFileV2 backingStoreV2 =
new EventQueueBackingStoreFileV2(checkpointFile, capacity, name, counter);
String backupName = checkpointFile.getName() + "-backup-"
+ System.currentTimeMillis();
Files.copy(checkpointFile,
new File(checkpointFile.getParentFile(), backupName));
File metaDataFile = Serialization.getMetaDataFile(checkpointFile);
EventQueueBackingStoreFileV3.upgrade(backingStoreV2, checkpointFile,
metaDataFile);
return new EventQueueBackingStoreFileV3(checkpointFile, capacity, name, counter,
backupCheckpointDir, shouldBackup, compressBackup);
}
}
| 9,745 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/NoopRecordException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
public class NoopRecordException extends Exception {
private static final long serialVersionUID = -7394180633208889738L;
public NoopRecordException() {
super();
}
public NoopRecordException(String msg) {
super(msg);
}
public NoopRecordException(String msg, Throwable th) {
super(msg, th);
}
}
| 9,746 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/LogUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.io.File;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.regex.Pattern;
import com.google.common.collect.Lists;
public class LogUtils {
private static final Pattern pattern =
Pattern.compile("^" + Log.PREFIX + "\\d+$");
/**
* Sort a list of files by the number after Log.PREFIX.
*/
static void sort(List<File> logs) {
Collections.sort(logs, new Comparator<File>() {
@Override
public int compare(File file1, File file2) {
int id1 = getIDForFile(file1);
int id2 = getIDForFile(file2);
if (id1 > id2) {
return 1;
} else if (id1 == id2) {
return 0;
}
return -1;
}
});
}
/**
* Get the id after the Log.PREFIX
*/
static int getIDForFile(File file) {
return Integer.parseInt(file.getName().substring(Log.PREFIX.length()));
}
/**
* Find all log files within a directory
*
* @param logDir directory to search
* @return List of data files within logDir
*/
static List<File> getLogs(File logDir) {
List<File> result = Lists.newArrayList();
File[] files = logDir.listFiles();
if (files == null) {
String msg = logDir + ".listFiles() returned null: ";
msg += "File = " + logDir.isFile() + ", ";
msg += "Exists = " + logDir.exists() + ", ";
msg += "Writable = " + logDir.canWrite();
throw new IllegalStateException(msg);
}
for (File file : files) {
String name = file.getName();
if (pattern.matcher(name).matches()) {
result.add(file);
}
}
return result;
}
}
| 9,747 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/WritableUtils.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.channel.file;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* Util methods copied from org.apache.hadoop.io.WritableUtils.
*/
class WritableUtils {
/**
* Serializes an integer to a binary stream with zero-compressed encoding.
* For -120 <= i <= 127, only one byte is used with the actual value.
* For other values of i, the first byte value indicates whether the
* integer is positive or negative, and the number of bytes that follow.
* If the first byte value v is between -121 and -124, the following integer
* is positive, with number of bytes that follow are -(v+120).
* If the first byte value v is between -125 and -128, the following integer
* is negative, with number of bytes that follow are -(v+124). Bytes are
* stored in the high-non-zero-byte-first order.
*
* @param stream Binary output stream
* @param i Integer to be serialized
* @throws java.io.IOException
*/
public static void writeVInt(DataOutput stream, int i) throws IOException {
writeVLong(stream, i);
}
/**
* Serializes a long to a binary stream with zero-compressed encoding.
* For -112 <= i <= 127, only one byte is used with the actual value.
* For other values of i, the first byte value indicates whether the
* long is positive or negative, and the number of bytes that follow.
* If the first byte value v is between -113 and -120, the following long
* is positive, with number of bytes that follow are -(v+112).
* If the first byte value v is between -121 and -128, the following long
* is negative, with number of bytes that follow are -(v+120). Bytes are
* stored in the high-non-zero-byte-first order.
*
* @param stream Binary output stream
* @param i Long to be serialized
* @throws java.io.IOException
*/
public static void writeVLong(DataOutput stream, long i) throws IOException {
if (i >= -112 && i <= 127) {
stream.writeByte((byte)i);
return;
}
int len = -112;
if (i < 0) {
i ^= -1L; // take one's complement'
len = -120;
}
long tmp = i;
while (tmp != 0) {
tmp = tmp >> 8;
len--;
}
stream.writeByte((byte)len);
len = (len < -120) ? -(len + 120) : -(len + 112);
for (int idx = len; idx != 0; idx--) {
int shiftbits = (idx - 1) * 8;
long mask = 0xFFL << shiftbits;
stream.writeByte((byte)((i & mask) >> shiftbits));
}
}
/**
* Reads a zero-compressed encoded long from input stream and returns it.
* @param stream Binary input stream
* @return deserialized long from stream.
* @throws java.io.IOException
*/
public static long readVLong(DataInput stream) throws IOException {
byte firstByte = stream.readByte();
int len = decodeVIntSize(firstByte);
if (len == 1) {
return firstByte;
}
long i = 0;
for (int idx = 0; idx < len - 1; idx++) {
byte b = stream.readByte();
i = i << 8;
i = i | (b & 0xFF);
}
return (isNegativeVInt(firstByte) ? (i ^ -1L) : i);
}
/**
* Reads a zero-compressed encoded integer from input stream and returns it.
* @param stream Binary input stream
* @return deserialized integer from stream.
* @throws java.io.IOException
*/
public static int readVInt(DataInput stream) throws IOException {
long n = readVLong(stream);
if ((n > Integer.MAX_VALUE) || (n < Integer.MIN_VALUE)) {
throw new IOException("value too long to fit in integer");
}
return (int)n;
}
/**
* Given the first byte of a vint/vlong, determine the sign
* @param value the first byte
* @return is the value negative
*/
public static boolean isNegativeVInt(byte value) {
return value < -120 || (value >= -112 && value < 0);
}
/**
* Parse the first byte of a vint/vlong to determine the number of bytes
* @param value the first byte of the vint/vlong
* @return the total number of bytes (1 to 9)
*/
public static int decodeVIntSize(byte value) {
if (value >= -112) {
return 1;
} else if (value < -120) {
return -119 - value;
}
return -111 - value;
}
}
| 9,748 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/encryption/KeyProviderType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file.encryption;
public enum KeyProviderType {
JCEKSFILE(JCEFileKeyProvider.Builder.class),
OTHER(null);
private final Class<? extends KeyProvider.Builder> keyProviderClass;
KeyProviderType(Class<? extends KeyProvider.Builder> keyStoreProviderClass) {
this.keyProviderClass = keyStoreProviderClass;
}
public Class<? extends KeyProvider.Builder> getBuilderClass() {
return keyProviderClass;
}
}
| 9,749 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/encryption/AESCTRNoPaddingProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file.encryption;
import java.nio.ByteBuffer;
import java.security.Key;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
import javax.crypto.Cipher;
import javax.crypto.spec.IvParameterSpec;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Throwables;
public class AESCTRNoPaddingProvider extends CipherProvider {
private static final Logger LOG = LoggerFactory
.getLogger(AESCTRNoPaddingProvider.class);
static final String TYPE = "AES/CTR/NoPadding";
public Encryptor.Builder<AESCTRNoPaddingEncryptor> newEncryptorBuilder() {
return new EncryptorBuilder();
}
public Decryptor.Builder<AESCTRNoPaddingDecryptor> newDecryptorBuilder() {
return new DecryptorBuilder();
}
public static class EncryptorBuilder
extends CipherProvider.Encryptor.Builder<AESCTRNoPaddingEncryptor> {
@Override
public AESCTRNoPaddingEncryptor build() {
ByteBuffer buffer = ByteBuffer.allocate(16);
byte[] seed = new byte[12];
SecureRandom random = new SecureRandom();
random.nextBytes(seed);
buffer.put(seed).putInt(1);
return new AESCTRNoPaddingEncryptor(key, buffer.array());
}
}
public static class DecryptorBuilder
extends CipherProvider.Decryptor.Builder<AESCTRNoPaddingDecryptor> {
@Override
public AESCTRNoPaddingDecryptor build() {
return new AESCTRNoPaddingDecryptor(key, parameters);
}
}
private static class AESCTRNoPaddingEncryptor extends Encryptor {
private byte[] parameters;
private Cipher cipher;
private AESCTRNoPaddingEncryptor(Key key, byte[] parameters) {
this.parameters = parameters;
cipher = getCipher(key, Cipher.ENCRYPT_MODE, parameters);
}
@Override
public byte[] getParameters() {
return parameters;
}
@Override
public String getCodec() {
return TYPE;
}
@Override
public byte[] encrypt(byte[] clearText) {
return doFinal(cipher, clearText);
}
}
private static class AESCTRNoPaddingDecryptor extends Decryptor {
private Cipher cipher;
private AESCTRNoPaddingDecryptor(Key key, byte[] parameters) {
cipher = getCipher(key, Cipher.DECRYPT_MODE, parameters);
}
@Override
public byte[] decrypt(byte[] cipherText) {
return doFinal(cipher, cipherText);
}
@Override
public String getCodec() {
return TYPE;
}
}
private static byte[] doFinal(Cipher cipher, byte[] input) throws DecryptionFailureException {
try {
return cipher.doFinal(input);
} catch (Exception e) {
String msg = "Unable to encrypt or decrypt data " + TYPE
+ " input.length " + input.length;
LOG.error(msg, e);
throw new DecryptionFailureException(msg, e);
}
}
private static Cipher getCipher(Key key, int mode, byte[] parameters) {
try {
Cipher cipher = Cipher.getInstance(TYPE);
cipher.init(mode, key, new IvParameterSpec(parameters));
return cipher;
} catch (Exception e) {
String msg = "Unable to load key using transformation: " + TYPE;
if (e instanceof InvalidKeyException) {
try {
int maxAllowedLen = Cipher.getMaxAllowedKeyLength(TYPE);
if (maxAllowedLen < 256) {
msg += "; Warning: Maximum allowed key length = " + maxAllowedLen
+ " with the available JCE security policy files. Have you"
+ " installed the JCE unlimited strength jurisdiction policy"
+ " files?";
}
} catch (NoSuchAlgorithmException ex) {
msg += "; Unable to find specified algorithm?";
}
}
LOG.error(msg, e);
throw Throwables.propagate(e);
}
}
}
| 9,750 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/encryption/DecryptionFailureException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file.encryption;
import org.apache.flume.FlumeException;
/**
* Exception that is thrown when the channel is unable to decrypt an even
* read from the channel.
*/
public class DecryptionFailureException extends FlumeException {
private static final long serialVersionUID = 6646810195384793646L;
public DecryptionFailureException(String msg) {
super(msg);
}
public DecryptionFailureException(String msg, Throwable th) {
super(msg, th);
}
}
| 9,751 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/encryption/CipherProviderFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file.encryption;
import java.security.Key;
import java.util.Locale;
import org.apache.flume.FlumeException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
public class CipherProviderFactory {
private static final Logger logger =
LoggerFactory.getLogger(CipherProviderFactory.class);
public static CipherProvider.Encryptor getEncrypter(String cipherProviderType,
Key key) {
if (cipherProviderType == null) {
return null;
}
CipherProvider provider = getProvider(cipherProviderType);
return provider.newEncryptorBuilder().setKey(key).build();
}
public static CipherProvider.Decryptor getDecrypter(String cipherProviderType,
Key key, byte[] parameters) {
if (cipherProviderType == null) {
return null;
}
CipherProvider provider = getProvider(cipherProviderType);
return provider.newDecryptorBuilder().setKey(key).setParameters(parameters)
.build();
}
@SuppressWarnings({ "rawtypes", "unchecked" })
private static CipherProvider getProvider(String cipherProviderType) {
Preconditions.checkNotNull(cipherProviderType,
"cipher provider type must not be null");
// try to find builder class in enum of known providers
CipherProviderType type;
try {
type = CipherProviderType.valueOf(cipherProviderType.toUpperCase(Locale.ENGLISH));
} catch (IllegalArgumentException e) {
logger.debug("Not in enum, loading provider class: {}", cipherProviderType);
type = CipherProviderType.OTHER;
}
Class<? extends CipherProvider> providerClass = type.getProviderClass();
// handle the case where they have specified their own builder in the config
if (providerClass == null) {
try {
Class c = Class.forName(cipherProviderType);
if (c != null && CipherProvider.class.isAssignableFrom(c)) {
providerClass = (Class<? extends CipherProvider>) c;
} else {
String errMessage = "Unable to instantiate provider from " +
cipherProviderType;
logger.error(errMessage);
throw new FlumeException(errMessage);
}
} catch (ClassNotFoundException ex) {
logger.error("Class not found: " + cipherProviderType, ex);
throw new FlumeException(ex);
}
}
try {
return providerClass.newInstance();
} catch (Exception ex) {
String errMessage = "Cannot instantiate provider: " + cipherProviderType;
logger.error(errMessage, ex);
throw new FlumeException(errMessage, ex);
}
}
}
| 9,752 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/encryption/EncryptionConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file.encryption;
public class EncryptionConfiguration {
private EncryptionConfiguration() {
}
// prefix before all encryption options
public static final String ENCRYPTION_PREFIX = "encryption";
/**
* Encryption key provider, default is null.
*/
public static final String KEY_PROVIDER = "keyProvider";
/**
* Encryption key alias, default is null.
*/
public static final String ACTIVE_KEY = "activeKey";
/**
* Encryption cipher provider, default is null.
*/
public static final String CIPHER_PROVIDER = "cipherProvider";
/**
* Space separated list of keys which are needed for the current set of logs
* plus the one specified in keyAlias
*/
public static final String JCE_FILE_KEYS = "keys";
/**
* Path to key password file is:
* keys.aliasName.passwordFile
*/
public static final String JCE_FILE_KEY_PASSWORD_FILE = "passwordFile";
/**
* Path to a jceks key store
*/
public static final String JCE_FILE_KEY_STORE_FILE = "keyStoreFile";
/**
* Path to a jceks key store password file
*/
public static final String JCE_FILE_KEY_STORE_PASSWORD_FILE = "keyStorePasswordFile";
}
| 9,753 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/encryption/KeyProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file.encryption;
import java.security.Key;
import org.apache.flume.Context;
public abstract class KeyProvider {
/**
* Returns a non-null Key
*/
public abstract Key getKey(String alias);
public interface Builder {
public abstract KeyProvider build(Context context);
}
}
| 9,754 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/encryption/CipherProviderType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file.encryption;
public enum CipherProviderType {
AESCTRNOPADDING(AESCTRNoPaddingProvider.class),
OTHER(null);
private final Class<? extends CipherProvider> providerClass;
CipherProviderType(Class<? extends CipherProvider> providerClass) {
this.providerClass = providerClass;
}
public Class<? extends CipherProvider> getProviderClass() {
return providerClass;
}
}
| 9,755 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/encryption/JCEFileKeyProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file.encryption;
import java.io.File;
import java.io.FileInputStream;
import java.security.Key;
import java.security.KeyStore;
import java.util.Map;
import org.apache.flume.Context;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.Maps;
import com.google.common.io.Files;
public class JCEFileKeyProvider extends KeyProvider {
private static final Logger logger =
LoggerFactory.getLogger(JCEFileKeyProvider.class);
private Map<String, File> aliasPasswordFileMap;
private KeyStore ks;
private char[] keyStorePassword;
private File keyStorePasswordFile;
public JCEFileKeyProvider(File keyStoreFile, File keyStorePasswordFile,
Map<String, File> aliasPasswordFileMap) {
super();
this.aliasPasswordFileMap = aliasPasswordFileMap;
this.keyStorePasswordFile = keyStorePasswordFile;
try {
ks = KeyStore.getInstance("jceks");
keyStorePassword = Files.toString(keyStorePasswordFile, Charsets.UTF_8)
.trim().toCharArray();
ks.load(new FileInputStream(keyStoreFile), keyStorePassword);
} catch (Exception ex) {
throw Throwables.propagate(ex);
}
}
@Override
public Key getKey(String alias) {
String passwordFile = keyStorePasswordFile.getAbsolutePath();
try {
char[] keyPassword = keyStorePassword;
if (aliasPasswordFileMap.containsKey(alias)) {
File keyPasswordFile = aliasPasswordFileMap.get(alias);
keyPassword = Files.toString(keyPasswordFile,
Charsets.UTF_8).trim().toCharArray();
passwordFile = keyPasswordFile.getAbsolutePath();
}
Key key = ks.getKey(alias, keyPassword);
if (key == null) {
throw new IllegalStateException("KeyStore returned null for " + alias);
}
return key;
} catch (Exception e) {
String msg = e.getClass().getName() + ": " + e.getMessage() + ". " +
"Key = " + alias + ", passwordFile = " + passwordFile;
throw new RuntimeException(msg, e);
}
}
public static class Builder implements KeyProvider.Builder {
@Override
public KeyProvider build(Context context) {
String keyStoreFileName = context.getString(
EncryptionConfiguration.JCE_FILE_KEY_STORE_FILE);
String keyStorePasswordFileName = context.getString(
EncryptionConfiguration.JCE_FILE_KEY_STORE_PASSWORD_FILE);
Preconditions.checkState(!Strings.isNullOrEmpty(keyStoreFileName),
"KeyStore file not specified");
Preconditions.checkState(!Strings.isNullOrEmpty(keyStorePasswordFileName),
"KeyStore password file not specified");
Map<String, File> aliasPasswordFileMap = Maps.newHashMap();
String passwordProtectedKeys = context.getString(
EncryptionConfiguration.JCE_FILE_KEYS);
Preconditions.checkState(!Strings.isNullOrEmpty(passwordProtectedKeys),
"Keys available to KeyStore was not specified or empty");
for (String passwordName : passwordProtectedKeys.trim().split("\\s+")) {
String propertyName = Joiner.on(".").join(EncryptionConfiguration.JCE_FILE_KEYS,
passwordName, EncryptionConfiguration.JCE_FILE_KEY_PASSWORD_FILE);
String passwordFileName = context.getString(propertyName,
keyStorePasswordFileName);
File passwordFile = new File(passwordFileName.trim());
if (passwordFile.isFile()) {
aliasPasswordFileMap.put(passwordName, passwordFile);
} else {
logger.warn("Password file for alias " + passwordName +
" does not exist");
}
}
File keyStoreFile = new File(keyStoreFileName.trim());
File keyStorePasswordFile = new File(keyStorePasswordFileName.trim());
return new JCEFileKeyProvider(keyStoreFile, keyStorePasswordFile,
aliasPasswordFileMap);
}
}
}
| 9,756 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/encryption/KeyProviderFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file.encryption;
import java.util.Locale;
import org.apache.flume.Context;
import org.apache.flume.FlumeException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
public class KeyProviderFactory {
private static final Logger logger =
LoggerFactory.getLogger(KeyProviderFactory.class);
@SuppressWarnings({ "rawtypes", "unchecked" })
public static KeyProvider getInstance(String keyProviderType, Context context) {
Preconditions.checkNotNull(keyProviderType,
"key provider type must not be null");
// try to find builder class in enum of known providers
KeyProviderType type;
try {
type = KeyProviderType.valueOf(keyProviderType.toUpperCase(Locale.ENGLISH));
} catch (IllegalArgumentException e) {
logger.debug("Not in enum, loading provider class: {}", keyProviderType);
type = KeyProviderType.OTHER;
}
Class<? extends KeyProvider.Builder> providerClass =
type.getBuilderClass();
// handle the case where they have specified their own builder in the config
if (providerClass == null) {
try {
Class c = Class.forName(keyProviderType);
if (c != null && KeyProvider.Builder.class.isAssignableFrom(c)) {
providerClass = (Class<? extends KeyProvider.Builder>) c;
} else {
String errMessage = "Unable to instantiate Builder from " +
keyProviderType;
logger.error(errMessage);
throw new FlumeException(errMessage);
}
} catch (ClassNotFoundException ex) {
logger.error("Class not found: " + keyProviderType, ex);
throw new FlumeException(ex);
}
}
// build the builder
KeyProvider.Builder provider;
try {
provider = providerClass.newInstance();
} catch (InstantiationException ex) {
String errMessage = "Cannot instantiate builder: " + keyProviderType;
logger.error(errMessage, ex);
throw new FlumeException(errMessage, ex);
} catch (IllegalAccessException ex) {
String errMessage = "Cannot instantiate builder: " + keyProviderType;
logger.error(errMessage, ex);
throw new FlumeException(errMessage, ex);
}
return provider.build(context);
}
}
| 9,757 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/encryption/CipherProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file.encryption;
import java.security.Key;
import com.google.common.base.Preconditions;
public abstract class CipherProvider {
public abstract Encryptor.Builder<?> newEncryptorBuilder();
public abstract Decryptor.Builder<?> newDecryptorBuilder();
public abstract static class Encryptor {
public abstract byte[] encrypt(byte[] clearText);
public abstract byte[] getParameters();
public abstract String getCodec();
/** Builder implementations MUST have a no-arg constructor */
public abstract static class Builder<T extends Encryptor> {
protected Key key;
public Builder<T> setKey(Key key) {
this.key = Preconditions.checkNotNull(key, "key cannot be null");
return this;
}
public abstract T build();
}
}
public abstract static class Decryptor {
public abstract byte[] decrypt(byte[] cipherText);
public abstract String getCodec();
/** Builder implementations MUST have a no-arg constructor */
public abstract static class Builder<T extends Decryptor> {
protected byte[] parameters;
protected Key key;
public Builder<T> setKey(Key key) {
this.key = Preconditions.checkNotNull(key, "key cannot be null");
return this;
}
public Builder<T> setParameters(byte[] parameters) {
this.parameters = parameters;
return this;
}
public abstract T build();
}
}
}
| 9,758 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/instrumentation/FileChannelCounterMBean.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file.instrumentation;
import org.apache.flume.instrumentation.ChannelCounterMBean;
public interface FileChannelCounterMBean extends ChannelCounterMBean {
boolean isOpen();
/**
* The numeric representation (0/1) of the negated value of the open flag.
*/
int getClosed();
/**
* A value of 0 represents that the channel is in a healthy state: it is either starting
* up (i.e. the replay is running) or already started up successfully.
* A value of 1 represents that the channel is in a permanently failed state, which means that
* the startup was unsuccessful due to an exception during the replay.
* Once the channel started up successfully the *ErrorCount (or the ratio of the *AttemptCount
* and *SuccessCount) counters should be used to check whether it is functioning properly.
*
* Note: this flag doesn't report the channel as unhealthy if the configuration failed because the
* ChannelCounter might not have been instantiated/started yet.
*/
int getUnhealthy();
/**
* A count of the number of IOExceptions encountered while trying to put() onto the channel.
* @see org.apache.flume.channel.file.FileChannel.FileBackedTransaction
#doPut(org.apache.flume.Event)
*/
long getEventPutErrorCount();
/**
* A count of the number of errors encountered while trying to take() from the channel,
* including IOExceptions and corruption-related errors.
* @see org.apache.flume.channel.file.FileChannel.FileBackedTransaction#doTake()
*/
long getEventTakeErrorCount();
/**
* A count of the number of errors encountered while trying to write the checkpoints. This
* includes any Throwables.
* @see org.apache.flume.channel.file.Log.BackgroundWorker#run()
*/
long getCheckpointWriteErrorCount();
/**
* A count of the number of errors encountered while trying to write the backup checkpoints. This
* includes any Throwables.
* @see org.apache.flume.channel.file.EventQueueBackingStoreFile#startBackupThread()
*/
long getCheckpointBackupWriteErrorCount();
}
| 9,759 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file | Create_ds/flume/flume-ng-channels/flume-file-channel/src/main/java/org/apache/flume/channel/file/instrumentation/FileChannelCounter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file.instrumentation;
import org.apache.flume.instrumentation.ChannelCounter;
public class FileChannelCounter extends ChannelCounter implements FileChannelCounterMBean {
private boolean open;
private int unhealthy;
private static final String EVENT_PUT_ERROR_COUNT = "channel.file.event.put.error";
private static final String EVENT_TAKE_ERROR_COUNT = "channel.file.event.take.error";
private static final String CHECKPOINT_WRITE_ERROR_COUNT = "channel.file.checkpoint.write.error";
private static final String CHECKPOINT_BACKUP_WRITE_ERROR_COUNT
= "channel.file.checkpoint.backup.write.error";
public FileChannelCounter(String name) {
super(name, new String[] {
EVENT_PUT_ERROR_COUNT, EVENT_TAKE_ERROR_COUNT,
CHECKPOINT_WRITE_ERROR_COUNT, CHECKPOINT_BACKUP_WRITE_ERROR_COUNT
}
);
}
@Override
public boolean isOpen() {
return open;
}
public void setOpen(boolean open) {
this.open = open;
}
@Override
public int getClosed() {
return open ? 0 : 1;
}
@Override
public int getUnhealthy() {
return unhealthy;
}
public void setUnhealthy(int unhealthy) {
this.unhealthy = unhealthy;
}
@Override
public long getEventPutErrorCount() {
return get(EVENT_PUT_ERROR_COUNT);
}
public void incrementEventPutErrorCount() {
increment(EVENT_PUT_ERROR_COUNT);
}
@Override
public long getEventTakeErrorCount() {
return get(EVENT_TAKE_ERROR_COUNT);
}
public void incrementEventTakeErrorCount() {
increment(EVENT_TAKE_ERROR_COUNT);
}
@Override
public long getCheckpointWriteErrorCount() {
return get(CHECKPOINT_WRITE_ERROR_COUNT);
}
public void incrementCheckpointWriteErrorCount() {
increment(CHECKPOINT_WRITE_ERROR_COUNT);
}
@Override
public long getCheckpointBackupWriteErrorCount() {
return get(CHECKPOINT_BACKUP_WRITE_ERROR_COUNT);
}
public void incrementCheckpointBackupWriteErrorCount() {
increment(CHECKPOINT_BACKUP_WRITE_ERROR_COUNT);
}
}
| 9,760 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/test/java/org/apache/flume/sink/hbase2/TestHBase2Sink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hbase2;
import com.google.common.base.Charsets;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.primitives.Longs;
import org.apache.flume.Channel;
import org.apache.flume.ChannelException;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.apache.flume.Sink.Status;
import org.apache.flume.Transaction;
import org.apache.flume.channel.MemoryChannel;
import org.apache.flume.conf.Configurables;
import org.apache.flume.conf.ConfigurationException;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZKConfig;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import static org.mockito.Mockito.*;
public class TestHBase2Sink {
private static final Logger logger =
LoggerFactory.getLogger(TestHBase2Sink.class);
private static final String tableName = "TestHbaseSink";
private static final String columnFamily = "TestColumnFamily";
private static final String inColumn = "iCol";
private static final String plCol = "pCol";
private static final String valBase = "testing hbase sink: jham";
private static HBaseTestingUtility testUtility;
private Configuration conf;
@BeforeClass
public static void setUpOnce() throws Exception {
String hbaseVer = org.apache.hadoop.hbase.util.VersionInfo.getVersion();
System.out.println("HBASE VERSION:" + hbaseVer);
Configuration conf = HBaseConfiguration.create();
conf.setBoolean("hbase.localcluster.assign.random.ports", true);
testUtility = new HBaseTestingUtility(conf);
testUtility.startMiniCluster();
}
@AfterClass
public static void tearDownOnce() throws Exception {
testUtility.shutdownMiniCluster();
}
/**
* Most common context setup for unit tests using
* {@link SimpleHBase2EventSerializer}.
*/
@Before
public void setUp() throws IOException {
conf = new Configuration(testUtility.getConfiguration());
testUtility.createTable(TableName.valueOf(tableName), columnFamily.getBytes());
}
@After
public void tearDown() throws IOException {
testUtility.deleteTable(TableName.valueOf(tableName));
}
/**
* Set up {@link Context} for use with {@link SimpleHBase2EventSerializer}.
*/
private Context getContextForSimpleHBase2EventSerializer() {
Context ctx = new Context();
ctx.put("table", tableName);
ctx.put("columnFamily", columnFamily);
ctx.put("serializer", SimpleHBase2EventSerializer.class.getName());
ctx.put("serializer.payloadColumn", plCol);
ctx.put("serializer.incrementColumn", inColumn);
return ctx;
}
/**
* Set up {@link Context} for use with {@link IncrementHBase2Serializer}.
*/
private Context getContextForIncrementHBaseSerializer() {
Context ctx = new Context();
ctx.put("table", tableName);
ctx.put("columnFamily", columnFamily);
ctx.put("serializer", IncrementHBase2Serializer.class.getName());
return ctx;
}
/**
* Set up {@link Context} for use with {@link IncrementHBase2Serializer}.
*/
private Context getContextWithoutIncrementHBaseSerializer() {
//Create a context without setting increment column and payload Column
Context ctx = new Context();
ctx.put("table", tableName);
ctx.put("columnFamily", columnFamily);
ctx.put("serializer", SimpleHBase2EventSerializer.class.getName());
return ctx;
}
@Test
public void testOneEventWithDefaults() throws Exception {
Context ctx = getContextWithoutIncrementHBaseSerializer();
HBase2Sink sink = new HBase2Sink(conf);
Configurables.configure(sink, ctx);
Channel channel = new MemoryChannel();
Configurables.configure(channel, new Context());
sink.setChannel(channel);
sink.start();
Transaction tx = channel.getTransaction();
tx.begin();
Event e = EventBuilder.withBody(Bytes.toBytes(valBase));
channel.put(e);
tx.commit();
tx.close();
sink.process();
sink.stop();
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(tableName))) {
byte[][] results = getResults(table, 1);
byte[] out = results[0];
Assert.assertArrayEquals(e.getBody(), out);
out = results[1];
Assert.assertArrayEquals(Longs.toByteArray(1), out);
}
}
@Test
public void testOneEvent() throws Exception {
Context ctx = getContextForSimpleHBase2EventSerializer();
HBase2Sink sink = new HBase2Sink(conf);
Configurables.configure(sink, ctx);
Channel channel = new MemoryChannel();
Configurables.configure(channel, new Context());
sink.setChannel(channel);
sink.start();
Transaction tx = channel.getTransaction();
tx.begin();
Event e = EventBuilder.withBody(
Bytes.toBytes(valBase));
channel.put(e);
tx.commit();
tx.close();
sink.process();
sink.stop();
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(tableName))) {
byte[][] results = getResults(table, 1);
byte[] out = results[0];
Assert.assertArrayEquals(e.getBody(), out);
out = results[1];
Assert.assertArrayEquals(Longs.toByteArray(1), out);
}
}
@Test
public void testThreeEvents() throws Exception {
Context ctx = getContextForSimpleHBase2EventSerializer();
ctx.put("batchSize", "3");
HBase2Sink sink = new HBase2Sink(conf);
Configurables.configure(sink, ctx);
Channel channel = new MemoryChannel();
Configurables.configure(channel, new Context());
sink.setChannel(channel);
sink.start();
Transaction tx = channel.getTransaction();
tx.begin();
for (int i = 0; i < 3; i++) {
Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + i));
channel.put(e);
}
tx.commit();
tx.close();
sink.process();
sink.stop();
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(tableName))) {
byte[][] results = getResults(table, 3);
byte[] out;
int found = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
if (Arrays.equals(results[j], Bytes.toBytes(valBase + "-" + i))) {
found++;
break;
}
}
}
Assert.assertEquals(3, found);
out = results[3];
Assert.assertArrayEquals(Longs.toByteArray(3), out);
}
}
@Test
public void testMultipleBatches() throws Exception {
Context ctx = getContextForSimpleHBase2EventSerializer();
ctx.put("batchSize", "2");
HBase2Sink sink = new HBase2Sink(conf);
Configurables.configure(sink, ctx);
//Reset the context to a higher batchSize
ctx.put("batchSize", "100");
Channel channel = new MemoryChannel();
Configurables.configure(channel, new Context());
sink.setChannel(channel);
sink.start();
Transaction tx = channel.getTransaction();
tx.begin();
for (int i = 0; i < 3; i++) {
Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + i));
channel.put(e);
}
tx.commit();
tx.close();
int count = 0;
while (sink.process() != Status.BACKOFF) {
count++;
}
sink.stop();
Assert.assertEquals(2, count);
try (Connection connection = ConnectionFactory.createConnection(conf)) {
Table table = connection.getTable(TableName.valueOf(tableName));
byte[][] results = getResults(table, 3);
byte[] out;
int found = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
if (Arrays.equals(results[j], Bytes.toBytes(valBase + "-" + i))) {
found++;
break;
}
}
}
Assert.assertEquals(3, found);
out = results[3];
Assert.assertArrayEquals(Longs.toByteArray(3), out);
}
}
@Test(expected = FlumeException.class)
public void testMissingTable() throws Exception {
logger.info("Running testMissingTable()");
Context ctx = getContextForSimpleHBase2EventSerializer();
// setUp() will create the table, so we delete it.
logger.info("Deleting table {}", tableName);
testUtility.deleteTable(TableName.valueOf(tableName));
ctx.put("batchSize", "2");
HBase2Sink sink = new HBase2Sink(conf);
Configurables.configure(sink, ctx);
//Reset the context to a higher batchSize
ctx.put("batchSize", "100");
Channel channel = new MemoryChannel();
Configurables.configure(channel, new Context());
sink.setChannel(channel);
logger.info("Writing data into channel");
Transaction tx = channel.getTransaction();
tx.begin();
for (int i = 0; i < 3; i++) {
Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + i));
channel.put(e);
}
tx.commit();
tx.close();
logger.info("Starting sink and processing events");
try {
logger.info("Calling sink.start()");
sink.start(); // This method will throw.
// We never get here, but we log in case the behavior changes.
logger.error("Unexpected error: Calling sink.process()");
sink.process();
logger.error("Unexpected error: Calling sink.stop()");
sink.stop();
} finally {
// Re-create the table so tearDown() doesn't throw.
testUtility.createTable(TableName.valueOf(tableName), columnFamily.getBytes());
}
// FIXME: The test should never get here, the below code doesn't run.
Assert.fail();
}
// TODO: Move this test to a different class and run it stand-alone.
/**
* This test must run last - it shuts down the minicluster :D
*
* @throws Exception
*/
@Ignore("For dev builds only:" +
"This test takes too long, and this has to be run after all other" +
"tests, since it shuts down the minicluster. " +
"Comment out all other tests" +
"and uncomment this annotation to run this test.")
@Test(expected = EventDeliveryException.class)
public void testHBaseFailure() throws Exception {
Context ctx = getContextForSimpleHBase2EventSerializer();
ctx.put("batchSize", "2");
HBase2Sink sink = new HBase2Sink(conf);
Configurables.configure(sink, ctx);
//Reset the context to a higher batchSize
ctx.put("batchSize", "100");
Channel channel = new MemoryChannel();
Configurables.configure(channel, new Context());
sink.setChannel(channel);
sink.start();
Transaction tx = channel.getTransaction();
tx.begin();
for (int i = 0; i < 3; i++) {
Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + i));
channel.put(e);
}
tx.commit();
tx.close();
sink.process();
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(tableName))) {
byte[][] results = getResults(table, 2);
byte[] out;
int found = 0;
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
if (Arrays.equals(results[j], Bytes.toBytes(valBase + "-" + i))) {
found++;
break;
}
}
}
Assert.assertEquals(2, found);
out = results[2];
Assert.assertArrayEquals(Longs.toByteArray(2), out);
}
testUtility.shutdownMiniCluster();
sink.process();
sink.stop();
}
/**
* Makes HBase scans to get rows in the payload column and increment column
* in the table given. Expensive, so tread lightly.
* Calling this function multiple times for the same result set is a bad
* idea. Cache the result set once it is returned by this function.
*
* @param table
* @param numEvents Number of events inserted into the table
* @return array of byte arrays
* @throws IOException
*/
private byte[][] getResults(Table table, int numEvents) throws IOException {
byte[][] results = new byte[numEvents + 1][];
Scan scan = new Scan();
scan.addColumn(columnFamily.getBytes(),plCol.getBytes());
scan.withStartRow(Bytes.toBytes("default"));
ResultScanner rs = table.getScanner(scan);
byte[] out;
int i = 0;
try {
for (Result r = rs.next(); r != null; r = rs.next()) {
out = r.getValue(columnFamily.getBytes(), plCol.getBytes());
if (i >= results.length - 1) {
rs.close();
throw new FlumeException("More results than expected in the table." +
"Expected = " + numEvents + ". Found = " + i);
}
results[i++] = out;
System.out.println(out);
}
} finally {
rs.close();
}
Assert.assertEquals(i, results.length - 1);
scan = new Scan();
scan.addColumn(columnFamily.getBytes(),inColumn.getBytes());
scan.withStartRow(Bytes.toBytes("incRow"));
rs = table.getScanner(scan);
try {
for (Result r = rs.next(); r != null; r = rs.next()) {
out = r.getValue(columnFamily.getBytes(), inColumn.getBytes());
results[i++] = out;
System.out.println(out);
}
} finally {
rs.close();
}
return results;
}
@Test
public void testTransactionStateOnChannelException() throws Exception {
Context ctx = getContextForSimpleHBase2EventSerializer();
ctx.put("batchSize", "1");
HBase2Sink sink = new HBase2Sink(conf);
Configurables.configure(sink, ctx);
// Reset the context to a higher batchSize
Channel channel = spy(new MemoryChannel());
Configurables.configure(channel, new Context());
sink.setChannel(channel);
sink.start();
Transaction tx = channel.getTransaction();
tx.begin();
Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + 0));
channel.put(e);
tx.commit();
tx.close();
doThrow(new ChannelException("Mock Exception")).when(channel).take();
try {
sink.process();
Assert.fail("take() method should throw exception");
} catch (ChannelException ex) {
Assert.assertEquals("Mock Exception", ex.getMessage());
SinkCounter sinkCounter = (SinkCounter) Whitebox.getInternalState(sink, "sinkCounter");
Assert.assertEquals(1, sinkCounter.getChannelReadFail());
}
doReturn(e).when(channel).take();
sink.process();
sink.stop();
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(tableName))) {
byte[][] results = getResults(table, 1);
byte[] out = results[0];
Assert.assertArrayEquals(e.getBody(), out);
out = results[1];
Assert.assertArrayEquals(Longs.toByteArray(1), out);
}
}
@Test
public void testTransactionStateOnSerializationException() throws Exception {
Context ctx = getContextForSimpleHBase2EventSerializer();
ctx.put("batchSize", "1");
ctx.put(HBase2SinkConfigurationConstants.CONFIG_SERIALIZER,
"org.apache.flume.sink.hbase2.MockSimpleHBase2EventSerializer");
HBase2Sink sink = new HBase2Sink(conf);
Configurables.configure(sink, ctx);
// Reset the context to a higher batchSize
ctx.put("batchSize", "100");
Channel channel = new MemoryChannel();
Configurables.configure(channel, new Context());
sink.setChannel(channel);
sink.start();
Transaction tx = channel.getTransaction();
tx.begin();
Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + 0));
channel.put(e);
tx.commit();
tx.close();
try {
MockSimpleHBase2EventSerializer.throwException = true;
sink.process();
Assert.fail("FlumeException expected from serializer");
} catch (FlumeException ex) {
Assert.assertEquals("Exception for testing", ex.getMessage());
SinkCounter sinkCounter = (SinkCounter) Whitebox.getInternalState(sink, "sinkCounter");
Assert.assertEquals(1, sinkCounter.getEventWriteFail());
}
MockSimpleHBase2EventSerializer.throwException = false;
sink.process();
sink.stop();
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(tableName))) {
byte[][] results = getResults(table, 1);
byte[] out = results[0];
Assert.assertArrayEquals(e.getBody(), out);
out = results[1];
Assert.assertArrayEquals(Longs.toByteArray(1), out);
}
}
@Test
public void testWithoutConfigurationObject() throws Exception {
Context ctx = getContextForSimpleHBase2EventSerializer();
Context tmpContext = new Context(ctx.getParameters());
tmpContext.put("batchSize", "2");
tmpContext.put(HBase2SinkConfigurationConstants.ZK_QUORUM,
ZKConfig.getZKQuorumServersString(conf));
System.out.print(ctx.getString(HBase2SinkConfigurationConstants.ZK_QUORUM));
tmpContext.put(HBase2SinkConfigurationConstants.ZK_ZNODE_PARENT,
conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT));
HBase2Sink sink = new HBase2Sink();
Configurables.configure(sink, tmpContext);
Channel channel = new MemoryChannel();
Configurables.configure(channel, ctx);
sink.setChannel(channel);
sink.start();
Transaction tx = channel.getTransaction();
tx.begin();
for (int i = 0; i < 3; i++) {
Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + i));
channel.put(e);
}
tx.commit();
tx.close();
Status status = Status.READY;
while (status != Status.BACKOFF) {
status = sink.process();
}
sink.stop();
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(tableName))) {
byte[][] results = getResults(table, 3);
byte[] out;
int found = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
if (Arrays.equals(results[j], Bytes.toBytes(valBase + "-" + i))) {
found++;
break;
}
}
}
Assert.assertEquals(3, found);
out = results[3];
Assert.assertArrayEquals(Longs.toByteArray(3), out);
}
}
@Test
public void testZKQuorum() throws Exception {
Context ctx = getContextForSimpleHBase2EventSerializer();
Context tmpContext = new Context(ctx.getParameters());
String zkQuorum = "zk1.flume.apache.org:3342, zk2.flume.apache.org:3342, " +
"zk3.flume.apache.org:3342";
tmpContext.put("batchSize", "2");
tmpContext.put(HBase2SinkConfigurationConstants.ZK_QUORUM, zkQuorum);
tmpContext.put(HBase2SinkConfigurationConstants.ZK_ZNODE_PARENT,
conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT));
HBase2Sink sink = new HBase2Sink();
Configurables.configure(sink, tmpContext);
Assert.assertEquals("zk1.flume.apache.org,zk2.flume.apache.org," +
"zk3.flume.apache.org", sink.getConfig().get(HConstants.ZOOKEEPER_QUORUM));
Assert.assertEquals(String.valueOf(3342),
sink.getConfig().get(HConstants.ZOOKEEPER_CLIENT_PORT));
}
@Test(expected = FlumeException.class)
public void testZKQuorumIncorrectPorts() throws Exception {
Context ctx = getContextForSimpleHBase2EventSerializer();
Context tmpContext = new Context(ctx.getParameters());
String zkQuorum = "zk1.flume.apache.org:3345, zk2.flume.apache.org:3342, " +
"zk3.flume.apache.org:3342";
tmpContext.put("batchSize", "2");
tmpContext.put(HBase2SinkConfigurationConstants.ZK_QUORUM, zkQuorum);
tmpContext.put(HBase2SinkConfigurationConstants.ZK_ZNODE_PARENT,
conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT));
HBase2Sink sink = new HBase2Sink();
Configurables.configure(sink, tmpContext);
Assert.fail();
}
@Test
public void testCoalesce() throws EventDeliveryException {
Context ctx = getContextForIncrementHBaseSerializer();
ctx.put("batchSize", "100");
ctx.put(HBase2SinkConfigurationConstants.CONFIG_COALESCE_INCREMENTS,
String.valueOf(true));
final Map<String, Long> expectedCounts = Maps.newHashMap();
expectedCounts.put("r1:c1", 10L);
expectedCounts.put("r1:c2", 20L);
expectedCounts.put("r2:c1", 7L);
expectedCounts.put("r2:c3", 63L);
HBase2Sink.DebugIncrementsCallback cb = new CoalesceValidator(expectedCounts);
HBase2Sink sink = new HBase2Sink(testUtility.getConfiguration(), cb);
Configurables.configure(sink, ctx);
Channel channel = createAndConfigureMemoryChannel(sink);
List<Event> events = Lists.newLinkedList();
generateEvents(events, expectedCounts);
putEvents(channel, events);
sink.start();
sink.process(); // Calls CoalesceValidator instance.
sink.stop();
}
@Test(expected = AssertionError.class)
public void negativeTestCoalesce() throws EventDeliveryException {
Context ctx = getContextForIncrementHBaseSerializer();
ctx.put("batchSize", "10");
final Map<String, Long> expectedCounts = Maps.newHashMap();
expectedCounts.put("r1:c1", 10L);
HBase2Sink.DebugIncrementsCallback cb = new CoalesceValidator(expectedCounts);
HBase2Sink sink = new HBase2Sink(testUtility.getConfiguration(), cb);
Configurables.configure(sink, ctx);
Channel channel = createAndConfigureMemoryChannel(sink);
List<Event> events = Lists.newLinkedList();
generateEvents(events, expectedCounts);
putEvents(channel, events);
sink.start();
sink.process(); // Calls CoalesceValidator instance.
sink.stop();
}
@Test
public void testBatchAware() throws EventDeliveryException {
logger.info("Running testBatchAware()");
Context ctx = getContextForIncrementHBaseSerializer();
HBase2Sink sink = new HBase2Sink(testUtility.getConfiguration());
Configurables.configure(sink, ctx);
Channel channel = createAndConfigureMemoryChannel(sink);
sink.start();
int batchCount = 3;
for (int i = 0; i < batchCount; i++) {
sink.process();
}
sink.stop();
Assert.assertEquals(batchCount,
((IncrementHBase2Serializer) sink.getSerializer()).getNumBatchesStarted());
}
@Test (expected = ConfigurationException.class)
public void testHBaseVersionCheck() throws Exception {
Context ctx = getContextWithoutIncrementHBaseSerializer();
HBase2Sink sink = mock(HBase2Sink.class);
doCallRealMethod().when(sink).configure(any());
when(sink.getHBbaseVersionString()).thenReturn("1.0.0");
Configurables.configure(sink, ctx);
}
@Test (expected = ConfigurationException.class)
public void testHBaseVersionCheckNotANumber() throws Exception {
Context ctx = getContextWithoutIncrementHBaseSerializer();
HBase2Sink sink = mock(HBase2Sink.class);
doCallRealMethod().when(sink).configure(any());
when(sink.getHBbaseVersionString()).thenReturn("Dummy text");
Configurables.configure(sink, ctx);
}
/**
* For testing that the rows coalesced, serialized by
* {@link IncrementHBase2Serializer}, are of the expected batch size.
*/
private static class CoalesceValidator
implements HBase2Sink.DebugIncrementsCallback {
private final Map<String,Long> expectedCounts;
public CoalesceValidator(Map<String, Long> expectedCounts) {
this.expectedCounts = expectedCounts;
}
@Override
@SuppressWarnings("unchecked")
public void onAfterCoalesce(Iterable<Increment> increments) {
for (Increment inc : increments) {
byte[] row = inc.getRow();
Map<byte[], NavigableMap<byte[], Long>> families = null;
try {
families = inc.getFamilyMapOfLongs();
} catch (Exception e) {
Throwables.propagate(e);
}
assert families != null;
for (byte[] family : families.keySet()) {
NavigableMap<byte[], Long> qualifiers = families.get(family);
for (Map.Entry<byte[], Long> entry : qualifiers.entrySet()) {
byte[] qualifier = entry.getKey();
Long count = entry.getValue();
String key = new String(row, Charsets.UTF_8) +
':' +
new String(qualifier, Charsets.UTF_8);
Assert.assertEquals("Expected counts don't match observed for " + key,
expectedCounts.get(key), count);
}
}
}
}
}
/**
* Add number of Events corresponding to counts to the events list.
* @param events Destination list.
* @param counts How many events to generate for each row:qualifier pair.
*/
private void generateEvents(List<Event> events, Map<String, Long> counts) {
for (String key : counts.keySet()) {
long count = counts.get(key);
for (long i = 0; i < count; i++) {
events.add(EventBuilder.withBody(key, Charsets.UTF_8));
}
}
}
private Channel createAndConfigureMemoryChannel(HBase2Sink sink) {
Channel channel = new MemoryChannel();
Context channelCtx = new Context();
channelCtx.put("capacity", String.valueOf(1000L));
channelCtx.put("transactionCapacity", String.valueOf(1000L));
Configurables.configure(channel, channelCtx);
sink.setChannel(channel);
channel.start();
return channel;
}
private void putEvents(Channel channel, Iterable<Event> events) {
Transaction tx = channel.getTransaction();
tx.begin();
for (Event event : events) {
channel.put(event);
}
tx.commit();
tx.close();
}
}
| 9,761 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/test/java/org/apache/flume/sink/hbase2/TestHBase2SinkCreation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hbase2;
import org.apache.flume.FlumeException;
import org.apache.flume.Sink;
import org.apache.flume.SinkFactory;
import org.apache.flume.sink.DefaultSinkFactory;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestHBase2SinkCreation {
private SinkFactory sinkFactory;
@Before
public void setUp() {
sinkFactory = new DefaultSinkFactory();
}
private void verifySinkCreation(Class<?> typeClass) throws FlumeException {
Sink sink = sinkFactory.create("hbase2-sink", "hbase2");
Assert.assertNotNull(sink);
Assert.assertTrue(typeClass.isInstance(sink));
}
@Test
public void testSinkCreation() {
verifySinkCreation(HBase2Sink.class);
}
}
| 9,762 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/test/java/org/apache/flume/sink/hbase2/IncrementHBase2Serializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hbase2;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import java.util.Collections;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.conf.ComponentConfiguration;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Row;
import java.util.List;
/**
* For Increment-related unit tests.
*/
class IncrementHBase2Serializer implements HBase2EventSerializer, BatchAware {
private Event event;
private byte[] family;
private int numBatchesStarted = 0;
@Override public void configure(Context context) {
}
@Override public void configure(ComponentConfiguration conf) {
}
@Override public void close() {
}
@Override
public void initialize(Event event, byte[] columnFamily) {
this.event = event;
this.family = columnFamily;
}
// This class only creates Increments.
@Override
public List<Row> getActions() {
return Collections.emptyList();
}
// Treat each Event as a String, i,e, "row:qualifier".
@Override
public List<Increment> getIncrements() {
List<Increment> increments = Lists.newArrayList();
String body = new String(event.getBody(), Charsets.UTF_8);
String[] pieces = body.split(":");
String row = pieces[0];
String qualifier = pieces[1];
Increment inc = new Increment(row.getBytes(Charsets.UTF_8));
inc.addColumn(family, qualifier.getBytes(Charsets.UTF_8), 1L);
increments.add(inc);
return increments;
}
@Override
public void onBatchStart() {
numBatchesStarted++;
}
@VisibleForTesting
public int getNumBatchesStarted() {
return numBatchesStarted;
}
}
| 9,763 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/test/java/org/apache/flume/sink/hbase2/MockSimpleHBase2EventSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hbase2;
import java.util.List;
import org.apache.flume.FlumeException;
import org.apache.hadoop.hbase.client.Row;
class MockSimpleHBase2EventSerializer extends SimpleHBase2EventSerializer {
public static boolean throwException = false;
@Override
public List<Row> getActions() throws FlumeException {
if (throwException) {
throw new FlumeException("Exception for testing");
}
return super.getActions();
}
} | 9,764 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/test/java/org/apache/flume/sink/hbase2/TestRegexHBase2EventSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hbase2;
import com.google.common.collect.Maps;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.event.EventBuilder;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Test;
import java.nio.charset.Charset;
import java.util.Calendar;
import java.util.List;
import java.util.Map;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestRegexHBase2EventSerializer {
@Test
/* Ensure that when no config is specified, the a catch-all regex is used
* with default column name. */
public void testDefaultBehavior() throws Exception {
RegexHBase2EventSerializer s = new RegexHBase2EventSerializer();
Context context = new Context();
s.configure(context);
String logMsg = "The sky is falling!";
Event e = EventBuilder.withBody(Bytes.toBytes(logMsg));
s.initialize(e, "CF".getBytes());
List<Row> actions = s.getActions();
assertTrue(actions.size() == 1);
assertTrue(actions.get(0) instanceof Put);
Put put = (Put) actions.get(0);
assertTrue(put.getFamilyCellMap().containsKey(s.cf));
List<Cell> cells = put.getFamilyCellMap().get(s.cf);
assertTrue(cells.size() == 1);
Map<String, String> resultMap = Maps.newHashMap();
for (Cell cell : cells) {
resultMap.put(new String(CellUtil.cloneQualifier(cell)),
new String(CellUtil.cloneValue(cell)));
}
assertTrue(resultMap.containsKey(
RegexHBase2EventSerializer.COLUMN_NAME_DEFAULT));
assertEquals("The sky is falling!",
resultMap.get(RegexHBase2EventSerializer.COLUMN_NAME_DEFAULT));
}
@Test
public void testRowIndexKey() throws Exception {
RegexHBase2EventSerializer s = new RegexHBase2EventSerializer();
Context context = new Context();
context.put(RegexHBase2EventSerializer.REGEX_CONFIG,"^([^\t]+)\t([^\t]+)\t" + "([^\t]+)$");
context.put(RegexHBase2EventSerializer.COL_NAME_CONFIG, "col1,col2,ROW_KEY");
context.put("rowKeyIndex", "2");
s.configure(context);
String body = "val1\tval2\trow1";
Event e = EventBuilder.withBody(Bytes.toBytes(body));
s.initialize(e, "CF".getBytes());
List<Row> actions = s.getActions();
Put put = (Put)actions.get(0);
List<Cell> cells = put.getFamilyCellMap().get(s.cf);
assertTrue(cells.size() == 2);
Map<String, String> resultMap = Maps.newHashMap();
for (Cell cell : cells) {
resultMap.put(new String(CellUtil.cloneQualifier(cell)),
new String(CellUtil.cloneValue(cell)));
}
assertEquals("val1", resultMap.get("col1"));
assertEquals("val2", resultMap.get("col2"));
assertEquals("row1", Bytes.toString(put.getRow()));
}
@Test
/* Test a common case where regex is used to parse apache log format. */
public void testApacheRegex() throws Exception {
RegexHBase2EventSerializer s = new RegexHBase2EventSerializer();
Context context = new Context();
context.put(RegexHBase2EventSerializer.REGEX_CONFIG,
"([^ ]*) ([^ ]*) ([^ ]*) (-|\\[[^\\]]*\\]) \"([^ ]+) ([^ ]+)" +
" ([^\"]+)\" (-|[0-9]*) (-|[0-9]*)(?: ([^ \"]*|\"[^\"]*\")" +
" ([^ \"]*|\"[^\"]*\"))?");
context.put(RegexHBase2EventSerializer.COL_NAME_CONFIG,
"host,identity,user,time,method,request,protocol,status,size," +
"referer,agent");
s.configure(context);
String logMsg = "33.22.11.00 - - [20/May/2011:07:01:19 +0000] " +
"\"GET /wp-admin/css/install.css HTTP/1.0\" 200 813 " +
"\"http://www.cloudera.com/wp-admin/install.php\" \"Mozilla/5.0 (comp" +
"atible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)\"";
Event e = EventBuilder.withBody(Bytes.toBytes(logMsg));
s.initialize(e, "CF".getBytes());
List<Row> actions = s.getActions();
assertEquals(1, s.getActions().size());
assertTrue(actions.get(0) instanceof Put);
Put put = (Put) actions.get(0);
assertTrue(put.getFamilyCellMap().containsKey(s.cf));
List<Cell> cells = put.getFamilyCellMap().get(s.cf);
assertTrue(cells.size() == 11);
Map<String, String> resultMap = Maps.newHashMap();
for (Cell cell : cells) {
resultMap.put(new String(CellUtil.cloneQualifier(cell)),
new String(CellUtil.cloneValue(cell)));
}
assertEquals("33.22.11.00", resultMap.get("host"));
assertEquals("-", resultMap.get("identity"));
assertEquals("-", resultMap.get("user"));
assertEquals("[20/May/2011:07:01:19 +0000]", resultMap.get("time"));
assertEquals("GET", resultMap.get("method"));
assertEquals("/wp-admin/css/install.css", resultMap.get("request"));
assertEquals("HTTP/1.0", resultMap.get("protocol"));
assertEquals("200", resultMap.get("status"));
assertEquals("813", resultMap.get("size"));
assertEquals("\"http://www.cloudera.com/wp-admin/install.php\"",
resultMap.get("referer"));
assertEquals("\"Mozilla/5.0 (compatible; Yahoo! Slurp; " +
"http://help.yahoo.com/help/us/ysearch/slurp)\"",
resultMap.get("agent"));
List<Increment> increments = s.getIncrements();
assertEquals(0, increments.size());
}
@Test
public void testRowKeyGeneration() {
Context context = new Context();
RegexHBase2EventSerializer s1 = new RegexHBase2EventSerializer();
s1.configure(context);
RegexHBase2EventSerializer s2 = new RegexHBase2EventSerializer();
s2.configure(context);
// Reset shared nonce to zero
RegexHBase2EventSerializer.nonce.set(0);
String randomString = RegexHBase2EventSerializer.randomKey;
Event e1 = EventBuilder.withBody(Bytes.toBytes("body"));
Event e2 = EventBuilder.withBody(Bytes.toBytes("body"));
Event e3 = EventBuilder.withBody(Bytes.toBytes("body"));
Calendar cal = mock(Calendar.class);
when(cal.getTimeInMillis()).thenReturn(1L);
s1.initialize(e1, "CF".getBytes());
String rk1 = new String(s1.getRowKey(cal));
assertEquals("1-" + randomString + "-0", rk1);
when(cal.getTimeInMillis()).thenReturn(10L);
s1.initialize(e2, "CF".getBytes());
String rk2 = new String(s1.getRowKey(cal));
assertEquals("10-" + randomString + "-1", rk2);
when(cal.getTimeInMillis()).thenReturn(100L);
s2.initialize(e3, "CF".getBytes());
String rk3 = new String(s2.getRowKey(cal));
assertEquals("100-" + randomString + "-2", rk3);
}
@Test
/* Test depositing of the header information. */
public void testDepositHeaders() throws Exception {
Charset charset = Charset.forName("KOI8-R");
RegexHBase2EventSerializer s = new RegexHBase2EventSerializer();
Context context = new Context();
context.put(RegexHBase2EventSerializer.DEPOSIT_HEADERS_CONFIG,
"true");
context.put(RegexHBase2EventSerializer.CHARSET_CONFIG,
charset.toString());
s.configure(context);
String body = "body";
Map<String, String> headers = Maps.newHashMap();
headers.put("header1", "value1");
headers.put("заголовок2", "значение2");
Event e = EventBuilder.withBody(Bytes.toBytes(body), headers);
s.initialize(e, "CF".getBytes());
List<Row> actions = s.getActions();
assertEquals(1, s.getActions().size());
assertTrue(actions.get(0) instanceof Put);
Put put = (Put) actions.get(0);
assertTrue(put.getFamilyCellMap().containsKey(s.cf));
List<Cell> cells = put.getFamilyCellMap().get(s.cf);
assertTrue(cells.size() == 3);
Map<String, byte[]> resultMap = Maps.newHashMap();
for (Cell cell : cells) {
resultMap.put(new String(CellUtil.cloneQualifier(cell), charset),
CellUtil.cloneValue(cell));
}
assertEquals(body,
new String(resultMap.get(RegexHBase2EventSerializer.COLUMN_NAME_DEFAULT),
charset));
assertEquals("value1", new String(resultMap.get("header1"), charset));
assertArrayEquals("значение2".getBytes(charset), resultMap.get("заголовок2"));
assertEquals("значение2".length(), resultMap.get("заголовок2").length);
List<Increment> increments = s.getIncrements();
assertEquals(0, increments.size());
}
}
| 9,765 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/main/java/org/apache/flume/sink/hbase2/BatchAware.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hbase2;
/**
* This interface allows for implementing HBase serializers that are aware of
* batching. {@link #onBatchStart()} is called at the beginning of each batch
* by the sink.
*/
public interface BatchAware {
void onBatchStart();
}
| 9,766 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/main/java/org/apache/flume/sink/hbase2/HBase2EventSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hbase2;
import java.util.List;
import org.apache.flume.Event;
import org.apache.flume.conf.Configurable;
import org.apache.flume.conf.ConfigurableComponent;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Row;
/**
* Interface for an event serializer which serializes the headers and body
* of an event to write them to HBase 2. This is configurable, so any config
* params required should be taken through this. Only the column family is
* passed in. The columns should exist in the table and column family
* specified in the configuration for the HBase2Sink.
*/
public interface HBase2EventSerializer extends Configurable, ConfigurableComponent {
/**
* Initialize the event serializer.
* @param event Event to be written to HBase
* @param columnFamily Column family to write to
*/
void initialize(Event event, byte[] columnFamily);
/**
* Get the actions that should be written out to hbase as a result of this
* event. This list is written to HBase using the HBase batch API.
* @return List of {@link org.apache.hadoop.hbase.client.Row} which
* are written as such to HBase.
*
* 0.92 increments do not implement Row, so this is not generic.
*
*/
List<Row> getActions();
List<Increment> getIncrements();
/*
* Clean up any state. This will be called when the sink is being stopped.
*/
void close();
}
| 9,767 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/main/java/org/apache/flume/sink/hbase2/SimpleHBase2EventSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hbase2;
import com.google.common.base.Charsets;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.FlumeException;
import org.apache.flume.conf.ComponentConfiguration;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Row;
import java.util.LinkedList;
import java.util.List;
/**
* A simple serializer that returns puts from an event, by writing the event
* body into it. The headers are discarded. It also updates a row in HBase
* which acts as an event counter.
* <p>Takes optional parameters:<p>
* <tt>rowPrefix:</tt> The prefix to be used. Default: <i>default</i><p>
* <tt>incrementRow</tt> The row to increment. Default: <i>incRow</i><p>
* <tt>suffix:</tt> <i>uuid/random/timestamp.</i>Default: <i>uuid</i><p>
* <p>Mandatory parameters: <p>
* <tt>cf:</tt>Column family.<p>
* Components that have no defaults and will not be used if null:
* <tt>payloadColumn:</tt> Which column to put payload in. If it is null,
* event data will not be written.<p>
* <tt>incColumn:</tt> Which column to increment. Null means no column is
* incremented.
*/
public class SimpleHBase2EventSerializer implements HBase2EventSerializer {
private String rowPrefix;
private byte[] incrementRow;
private byte[] cf;
private byte[] plCol;
private byte[] incCol;
private KeyType keyType;
private byte[] payload;
public SimpleHBase2EventSerializer() {
}
@Override
public void configure(Context context) {
rowPrefix = context.getString("rowPrefix", "default");
incrementRow =
context.getString("incrementRow", "incRow").getBytes(Charsets.UTF_8);
String suffix = context.getString("suffix", "uuid");
String payloadColumn = context.getString("payloadColumn", "pCol");
String incColumn = context.getString("incrementColumn", "iCol");
if (payloadColumn != null && !payloadColumn.isEmpty()) {
switch (suffix) {
case "timestamp":
keyType = KeyType.TS;
break;
case "random":
keyType = KeyType.RANDOM;
break;
case "nano":
keyType = KeyType.TSNANO;
break;
default:
keyType = KeyType.UUID;
break;
}
plCol = payloadColumn.getBytes(Charsets.UTF_8);
}
if (incColumn != null && !incColumn.isEmpty()) {
incCol = incColumn.getBytes(Charsets.UTF_8);
}
}
@Override
public void configure(ComponentConfiguration conf) {
}
@Override
public void initialize(Event event, byte[] cf) {
this.payload = event.getBody();
this.cf = cf;
}
@Override
public List<Row> getActions() throws FlumeException {
List<Row> actions = new LinkedList<>();
if (plCol != null) {
byte[] rowKey;
try {
if (keyType == KeyType.TS) {
rowKey = SimpleRowKeyGenerator.getTimestampKey(rowPrefix);
} else if (keyType == KeyType.RANDOM) {
rowKey = SimpleRowKeyGenerator.getRandomKey(rowPrefix);
} else if (keyType == KeyType.TSNANO) {
rowKey = SimpleRowKeyGenerator.getNanoTimestampKey(rowPrefix);
} else {
rowKey = SimpleRowKeyGenerator.getUUIDKey(rowPrefix);
}
Put put = new Put(rowKey);
put.addColumn(cf, plCol, payload);
actions.add(put);
} catch (Exception e) {
throw new FlumeException("Could not get row key!", e);
}
}
return actions;
}
@Override
public List<Increment> getIncrements() {
List<Increment> incs = new LinkedList<>();
if (incCol != null) {
Increment inc = new Increment(incrementRow);
inc.addColumn(cf, incCol, 1);
incs.add(inc);
}
return incs;
}
@Override
public void close() {
}
public enum KeyType {
UUID,
RANDOM,
TS,
TSNANO
}
}
| 9,768 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/main/java/org/apache/flume/sink/hbase2/HBase2Sink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hbase2;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.flume.Channel;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.apache.flume.Transaction;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.auth.FlumeAuthenticationUtil;
import org.apache.flume.auth.PrivilegedExecutor;
import org.apache.flume.conf.BatchSizeSupported;
import org.apache.flume.conf.Configurable;
import org.apache.flume.conf.ConfigurationException;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.flume.sink.AbstractSink;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.VersionInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
/**
* A simple sink which reads events from a channel and writes them to HBase 2.
* The HBase configuration is picked up from the first <tt>hbase-site.xml</tt>
* encountered in the classpath. This sink supports batch reading of
* events from the channel, and writing them to HBase, to minimize the number
* of flushes on the HBase tables. To use this sink, it has to be configured
* with certain mandatory parameters:<p>
* <tt>table: </tt> The name of the table in HBase to write to. <p>
* <tt>columnFamily: </tt> The column family in HBase to write to.<p>
* This sink will commit each transaction if the table's write buffer size is
* reached or if the number of events in the current transaction reaches the
* batch size, whichever comes first.<p>
* Other optional parameters are:<p>
* <tt>serializer:</tt> A class implementing {@link HBase2EventSerializer}.
* An instance of
* this class will be used to write out events to HBase.<p>
* <tt>serializer.*:</tt> Passed in the configure() method to serializer
* as an object of {@link org.apache.flume.Context}.<p>
* <tt>batchSize: </tt>This is the batch size used by the client. This is the
* maximum number of events the sink will commit per transaction. The default
* batch size is 100 events.
* <p>
* <p>
* <strong>Note: </strong> While this sink flushes all events in a transaction
* to HBase in one shot, HBase does not guarantee atomic commits on multiple
* rows. So if a subset of events in a batch are written to disk by HBase and
* HBase fails, the flume transaction is rolled back, causing flume to write
* all the events in the transaction all over again, which will cause
* duplicates. The serializer is expected to take care of the handling of
* duplicates etc. HBase also does not support batch increments, so if
* multiple increments are returned by the serializer, then HBase failure
* will cause them to be re-written, when HBase comes back up.
*/
public class HBase2Sink extends AbstractSink implements Configurable, BatchSizeSupported {
private String tableName;
private byte[] columnFamily;
private Connection conn;
private BufferedMutator table;
private long batchSize;
private final Configuration config;
private static final Logger logger = LoggerFactory.getLogger(HBase2Sink.class);
private HBase2EventSerializer serializer;
private String kerberosPrincipal;
private String kerberosKeytab;
private boolean enableWal = true;
private boolean batchIncrements = false;
private SinkCounter sinkCounter;
private PrivilegedExecutor privilegedExecutor;
// Internal hooks used for unit testing.
private DebugIncrementsCallback debugIncrCallback = null;
public HBase2Sink() {
this(HBaseConfiguration.create());
}
public HBase2Sink(Configuration conf) {
this.config = conf;
}
@VisibleForTesting
@InterfaceAudience.Private
HBase2Sink(Configuration conf, DebugIncrementsCallback cb) {
this(conf);
this.debugIncrCallback = cb;
}
@Override
public void start() {
Preconditions.checkArgument(table == null, "Please call stop " +
"before calling start on an old instance.");
try {
privilegedExecutor =
FlumeAuthenticationUtil.getAuthenticator(kerberosPrincipal, kerberosKeytab);
} catch (Exception ex) {
sinkCounter.incrementConnectionFailedCount();
throw new FlumeException("Failed to login to HBase using "
+ "provided credentials.", ex);
}
try {
conn = privilegedExecutor.execute((PrivilegedExceptionAction<Connection>) () -> {
conn = ConnectionFactory.createConnection(config);
return conn;
});
// Flush is controlled by us. This ensures that HBase changing
// their criteria for flushing does not change how we flush.
table = conn.getBufferedMutator(TableName.valueOf(tableName));
} catch (Exception e) {
sinkCounter.incrementConnectionFailedCount();
logger.error("Could not load table, " + tableName +
" from HBase", e);
throw new FlumeException("Could not load table, " + tableName +
" from HBase", e);
}
try {
if (!privilegedExecutor.execute((PrivilegedExceptionAction<Boolean>) () -> {
Table t = null;
try {
t = conn.getTable(TableName.valueOf(tableName));
return t.getTableDescriptor().hasFamily(columnFamily);
} finally {
if (t != null) {
t.close();
}
}
})) {
throw new IOException("Table " + tableName
+ " has no such column family " + Bytes.toString(columnFamily));
}
} catch (Exception e) {
//Get getTableDescriptor also throws IOException, so catch the IOException
//thrown above or by the getTableDescriptor() call.
sinkCounter.incrementConnectionFailedCount();
throw new FlumeException("Error getting column family from HBase."
+ "Please verify that the table " + tableName + " and Column Family, "
+ Bytes.toString(columnFamily) + " exists in HBase, and the"
+ " current user has permissions to access that table.", e);
}
super.start();
sinkCounter.incrementConnectionCreatedCount();
sinkCounter.start();
}
@Override
public void stop() {
try {
if (table != null) {
table.close();
}
table = null;
} catch (IOException e) {
throw new FlumeException("Error closing table.", e);
}
try {
if (conn != null) {
conn.close();
}
conn = null;
} catch (IOException e) {
throw new FlumeException("Error closing connection.", e);
}
sinkCounter.incrementConnectionClosedCount();
sinkCounter.stop();
}
@SuppressWarnings("unchecked")
@Override
public void configure(Context context) {
if (!this.hasVersionAtLeast2()) {
throw new ConfigurationException(
"HBase major version number must be at least 2 for hbase2sink");
}
tableName = context.getString(HBase2SinkConfigurationConstants.CONFIG_TABLE);
String cf = context.getString(
HBase2SinkConfigurationConstants.CONFIG_COLUMN_FAMILY);
batchSize = context.getLong(
HBase2SinkConfigurationConstants.CONFIG_BATCHSIZE, 100L);
Context serializerContext = new Context();
//If not specified, will use HBase defaults.
String eventSerializerType = context.getString(
HBase2SinkConfigurationConstants.CONFIG_SERIALIZER);
Preconditions.checkNotNull(tableName,
"Table name cannot be empty, please specify in configuration file");
Preconditions.checkNotNull(cf,
"Column family cannot be empty, please specify in configuration file");
//Check foe event serializer, if null set event serializer type
if (eventSerializerType == null || eventSerializerType.isEmpty()) {
eventSerializerType =
"org.apache.flume.sink.hbase2.SimpleHBase2EventSerializer";
logger.info("No serializer defined, Will use default");
}
serializerContext.putAll(context.getSubProperties(
HBase2SinkConfigurationConstants.CONFIG_SERIALIZER_PREFIX));
columnFamily = cf.getBytes(Charsets.UTF_8);
try {
Class<? extends HBase2EventSerializer> clazz =
(Class<? extends HBase2EventSerializer>)
Class.forName(eventSerializerType);
serializer = clazz.newInstance();
serializer.configure(serializerContext);
} catch (Exception e) {
logger.error("Could not instantiate event serializer.", e);
Throwables.propagate(e);
}
kerberosKeytab = context.getString(HBase2SinkConfigurationConstants.CONFIG_KEYTAB);
kerberosPrincipal = context.getString(HBase2SinkConfigurationConstants.CONFIG_PRINCIPAL);
enableWal = context.getBoolean(HBase2SinkConfigurationConstants
.CONFIG_ENABLE_WAL, HBase2SinkConfigurationConstants.DEFAULT_ENABLE_WAL);
logger.info("The write to WAL option is set to: " + String.valueOf(enableWal));
if (!enableWal) {
logger.warn("HBase Sink's enableWal configuration is set to false. All " +
"writes to HBase will have WAL disabled, and any data in the " +
"memstore of this region in the Region Server could be lost!");
}
batchIncrements = context.getBoolean(
HBase2SinkConfigurationConstants.CONFIG_COALESCE_INCREMENTS,
HBase2SinkConfigurationConstants.DEFAULT_COALESCE_INCREMENTS);
if (batchIncrements) {
logger.info("Increment coalescing is enabled. Increments will be " +
"buffered.");
}
String zkQuorum = context.getString(HBase2SinkConfigurationConstants
.ZK_QUORUM);
Integer port = null;
/*
* HBase allows multiple nodes in the quorum, but all need to use the
* same client port. So get the nodes in host:port format,
* and ignore the ports for all nodes except the first one. If no port is
* specified, use default.
*/
if (zkQuorum != null && !zkQuorum.isEmpty()) {
StringBuilder zkBuilder = new StringBuilder();
logger.info("Using ZK Quorum: " + zkQuorum);
String[] zkHosts = zkQuorum.split(",");
int length = zkHosts.length;
for (int i = 0; i < length; i++) {
String[] zkHostAndPort = zkHosts[i].split(":");
zkBuilder.append(zkHostAndPort[0].trim());
if (i != length - 1) {
zkBuilder.append(",");
} else {
zkQuorum = zkBuilder.toString();
}
if (zkHostAndPort[1] == null) {
throw new FlumeException("Expected client port for the ZK node!");
}
if (port == null) {
port = Integer.parseInt(zkHostAndPort[1].trim());
} else if (!port.equals(Integer.parseInt(zkHostAndPort[1].trim()))) {
throw new FlumeException("All Zookeeper nodes in the quorum must " +
"use the same client port.");
}
}
if (port == null) {
port = HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT;
}
this.config.set(HConstants.ZOOKEEPER_QUORUM, zkQuorum);
this.config.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, port);
}
String hbaseZnode = context.getString(
HBase2SinkConfigurationConstants.ZK_ZNODE_PARENT);
if (hbaseZnode != null && !hbaseZnode.isEmpty()) {
this.config.set(HConstants.ZOOKEEPER_ZNODE_PARENT, hbaseZnode);
}
sinkCounter = new SinkCounter(this.getName());
}
public Configuration getConfig() {
return config;
}
@Override
public Status process() throws EventDeliveryException {
Status status = Status.READY;
Channel channel = getChannel();
Transaction txn = channel.getTransaction();
List<Row> actions = new LinkedList<>();
List<Increment> incs = new LinkedList<>();
try {
txn.begin();
if (serializer instanceof BatchAware) {
((BatchAware) serializer).onBatchStart();
}
long i = 0;
for (; i < batchSize; i++) {
Event event = channel.take();
if (event == null) {
if (i == 0) {
status = Status.BACKOFF;
sinkCounter.incrementBatchEmptyCount();
} else {
sinkCounter.incrementBatchUnderflowCount();
}
break;
} else {
serializer.initialize(event, columnFamily);
actions.addAll(serializer.getActions());
incs.addAll(serializer.getIncrements());
}
}
if (i == batchSize) {
sinkCounter.incrementBatchCompleteCount();
}
sinkCounter.addToEventDrainAttemptCount(i);
putEventsAndCommit(actions, incs, txn);
} catch (Throwable e) {
try {
txn.rollback();
} catch (Exception e2) {
logger.error("Exception in rollback. Rollback might not have been " +
"successful.", e2);
}
logger.error("Failed to commit transaction." +
"Transaction rolled back.", e);
sinkCounter.incrementEventWriteOrChannelFail(e);
if (e instanceof Error || e instanceof RuntimeException) {
logger.error("Failed to commit transaction." +
"Transaction rolled back.", e);
Throwables.propagate(e);
} else {
logger.error("Failed to commit transaction." +
"Transaction rolled back.", e);
throw new EventDeliveryException("Failed to commit transaction." +
"Transaction rolled back.", e);
}
} finally {
txn.close();
}
return status;
}
private void putEventsAndCommit(final List<Row> actions,
final List<Increment> incs, Transaction txn) throws Exception {
privilegedExecutor.execute((PrivilegedExceptionAction<Void>) () -> {
final List<Mutation> mutations = new ArrayList<>(actions.size());
for (Row r : actions) {
if (r instanceof Put) {
((Put) r).setDurability(enableWal ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
}
// Newer versions of HBase - Increment implements Row.
if (r instanceof Increment) {
((Increment) r).setDurability(enableWal ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
}
if (r instanceof Mutation) {
mutations.add((Mutation)r);
} else {
logger.warn("dropping row " + r + " since it is not an Increment or Put");
}
}
table.mutate(mutations);
table.flush();
return null;
});
privilegedExecutor.execute((PrivilegedExceptionAction<Void>) () -> {
List<Increment> processedIncrements;
if (batchIncrements) {
processedIncrements = coalesceIncrements(incs);
} else {
processedIncrements = incs;
}
// Only used for unit testing.
if (debugIncrCallback != null) {
debugIncrCallback.onAfterCoalesce(processedIncrements);
}
for (final Increment i : processedIncrements) {
i.setDurability(enableWal ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
table.mutate(i);
}
table.flush();
return null;
});
txn.commit();
sinkCounter.addToEventDrainSuccessCount(actions.size());
}
@SuppressWarnings("unchecked")
private Map<byte[], NavigableMap<byte[], Long>> getFamilyMap(Increment inc) {
Preconditions.checkNotNull(inc, "Increment required");
return inc.getFamilyMapOfLongs();
}
/**
* Perform "compression" on the given set of increments so that Flume sends
* the minimum possible number of RPC operations to HBase per batch.
*
* @param incs Input: Increment objects to coalesce.
* @return List of new Increment objects after coalescing the unique counts.
*/
private List<Increment> coalesceIncrements(Iterable<Increment> incs) {
Preconditions.checkNotNull(incs, "List of Increments must not be null");
// Aggregate all of the increment row/family/column counts.
// The nested map is keyed like this: {row, family, qualifier} => count.
Map<byte[], Map<byte[], NavigableMap<byte[], Long>>> counters =
Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
for (Increment inc : incs) {
byte[] row = inc.getRow();
Map<byte[], NavigableMap<byte[], Long>> families = getFamilyMap(inc);
for (Map.Entry<byte[], NavigableMap<byte[], Long>> familyEntry : families.entrySet()) {
byte[] family = familyEntry.getKey();
NavigableMap<byte[], Long> qualifiers = familyEntry.getValue();
for (Map.Entry<byte[], Long> qualifierEntry : qualifiers.entrySet()) {
byte[] qualifier = qualifierEntry.getKey();
Long count = qualifierEntry.getValue();
incrementCounter(counters, row, family, qualifier, count);
}
}
}
// Reconstruct list of Increments per unique row/family/qualifier.
List<Increment> coalesced = Lists.newLinkedList();
for (Map.Entry<byte[], Map<byte[], NavigableMap<byte[], Long>>> rowEntry :
counters.entrySet()) {
byte[] row = rowEntry.getKey();
Map<byte[], NavigableMap<byte[], Long>> families = rowEntry.getValue();
Increment inc = new Increment(row);
for (Map.Entry<byte[], NavigableMap<byte[], Long>> familyEntry : families.entrySet()) {
byte[] family = familyEntry.getKey();
NavigableMap<byte[], Long> qualifiers = familyEntry.getValue();
for (Map.Entry<byte[], Long> qualifierEntry : qualifiers.entrySet()) {
byte[] qualifier = qualifierEntry.getKey();
long count = qualifierEntry.getValue();
inc.addColumn(family, qualifier, count);
}
}
coalesced.add(inc);
}
return coalesced;
}
/**
* Helper function for {@link #coalesceIncrements} to increment a counter
* value in the passed data structure.
*
* @param counters Nested data structure containing the counters.
* @param row Row key to increment.
* @param family Column family to increment.
* @param qualifier Column qualifier to increment.
* @param count Amount to increment by.
*/
private void incrementCounter(
Map<byte[], Map<byte[], NavigableMap<byte[], Long>>> counters,
byte[] row, byte[] family, byte[] qualifier, Long count) {
Map<byte[], NavigableMap<byte[], Long>> families =
counters.computeIfAbsent(row, k -> Maps.newTreeMap(Bytes.BYTES_COMPARATOR));
NavigableMap<byte[], Long> qualifiers =
families.computeIfAbsent(family, k -> Maps.newTreeMap(Bytes.BYTES_COMPARATOR));
qualifiers.merge(qualifier, count, (a, b) -> a + b);
}
String getHBbaseVersionString() {
return VersionInfo.getVersion();
}
private int getMajorVersion(String version) throws NumberFormatException {
return Integer.parseInt(version.split("\\.")[0]);
}
private boolean hasVersionAtLeast2() {
String version = getHBbaseVersionString();
try {
if (this.getMajorVersion(version) >= 2) {
return true;
}
} catch (NumberFormatException ex) {
logger.error(ex.getMessage());
}
logger.error("Invalid HBase version for hbase2sink:" + version);
return false;
}
@VisibleForTesting
@InterfaceAudience.Private
HBase2EventSerializer getSerializer() {
return serializer;
}
@Override
public long getBatchSize() {
return batchSize;
}
@VisibleForTesting
@InterfaceAudience.Private
interface DebugIncrementsCallback {
void onAfterCoalesce(Iterable<Increment> increments);
}
}
| 9,769 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/main/java/org/apache/flume/sink/hbase2/HBase2SinkConfigurationConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hbase2;
import org.apache.hadoop.hbase.HConstants;
/**
* Constants used for configuration of HBaseSink2
*
*/
public class HBase2SinkConfigurationConstants {
/**
* The HBase table which the sink should write to.
*/
public static final String CONFIG_TABLE = "table";
/**
* The column family which the sink should use.
*/
public static final String CONFIG_COLUMN_FAMILY = "columnFamily";
/**
* Maximum number of events the sink should take from the channel per
* transaction, if available.
*/
public static final String CONFIG_BATCHSIZE = "batchSize";
/**
* The fully qualified class name of the serializer the sink should use.
*/
public static final String CONFIG_SERIALIZER = "serializer";
/**
* Configuration to pass to the serializer.
*/
public static final String CONFIG_SERIALIZER_PREFIX = CONFIG_SERIALIZER + ".";
public static final String CONFIG_TIMEOUT = "timeout";
public static final String CONFIG_ENABLE_WAL = "enableWal";
public static final boolean DEFAULT_ENABLE_WAL = true;
public static final long DEFAULT_TIMEOUT = 60000;
public static final String CONFIG_KEYTAB = "kerberosKeytab";
public static final String CONFIG_PRINCIPAL = "kerberosPrincipal";
public static final String ZK_QUORUM = "zookeeperQuorum";
public static final String ZK_ZNODE_PARENT = "znodeParent";
public static final String DEFAULT_ZK_ZNODE_PARENT =
HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT;
public static final String CONFIG_COALESCE_INCREMENTS = "coalesceIncrements";
public static final Boolean DEFAULT_COALESCE_INCREMENTS = false;
public static final int DEFAULT_MAX_CONSECUTIVE_FAILS = 10;
public static final String CONFIG_MAX_CONSECUTIVE_FAILS = "maxConsecutiveFails";
}
| 9,770 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/main/java/org/apache/flume/sink/hbase2/RegexHBase2EventSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hbase2;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.FlumeException;
import org.apache.flume.conf.ComponentConfiguration;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Row;
import java.nio.charset.Charset;
import java.util.Calendar;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* An {@link HBase2EventSerializer} which parses columns based on a supplied
* regular expression and column name list.
* <p>
* Note that if the regular expression does not return the correct number of
* groups for a particular event, or it does not correctly match an event,
* the event is silently dropped.
* <p>
* Row keys for each event consist of a timestamp concatenated with an
* identifier which enforces uniqueness of keys across flume agents.
* <p>
* See static constant variables for configuration options.
*/
public class RegexHBase2EventSerializer implements HBase2EventSerializer {
// Config vars
/** Regular expression used to parse groups from event data. */
public static final String REGEX_CONFIG = "regex";
public static final String REGEX_DEFAULT = "(.*)";
/** Whether to ignore case when performing regex matches. */
public static final String IGNORE_CASE_CONFIG = "regexIgnoreCase";
public static final boolean IGNORE_CASE_DEFAULT = false;
/** Comma separated list of column names to place matching groups in. */
public static final String COL_NAME_CONFIG = "colNames";
public static final String COLUMN_NAME_DEFAULT = "payload";
/** Index of the row key in matched regex groups */
public static final String ROW_KEY_INDEX_CONFIG = "rowKeyIndex";
/** Placeholder in colNames for row key */
public static final String ROW_KEY_NAME = "ROW_KEY";
/** Whether to deposit event headers into corresponding column qualifiers */
public static final String DEPOSIT_HEADERS_CONFIG = "depositHeaders";
public static final boolean DEPOSIT_HEADERS_DEFAULT = false;
/** What charset to use when serializing into HBase's byte arrays */
public static final String CHARSET_CONFIG = "charset";
public static final String CHARSET_DEFAULT = "UTF-8";
/* This is a nonce used in HBase row-keys, such that the same row-key
* never gets written more than once from within this JVM. */
protected static final AtomicInteger nonce = new AtomicInteger(0);
protected static final String randomKey = RandomStringUtils.randomAlphanumeric(10);
protected byte[] cf;
private byte[] payload;
private final List<byte[]> colNames = Lists.newArrayList();
private Map<String, String> headers;
private boolean depositHeaders;
private Pattern inputPattern;
private Charset charset;
private int rowKeyIndex;
@Override
public void configure(Context context) {
String regex = context.getString(REGEX_CONFIG, REGEX_DEFAULT);
boolean regexIgnoreCase = context.getBoolean(IGNORE_CASE_CONFIG,
IGNORE_CASE_DEFAULT);
depositHeaders = context.getBoolean(DEPOSIT_HEADERS_CONFIG,
DEPOSIT_HEADERS_DEFAULT);
inputPattern = Pattern.compile(regex, Pattern.DOTALL
+ (regexIgnoreCase ? Pattern.CASE_INSENSITIVE : 0));
charset = Charset.forName(context.getString(CHARSET_CONFIG,
CHARSET_DEFAULT));
String colNameStr = context.getString(COL_NAME_CONFIG, COLUMN_NAME_DEFAULT);
String[] columnNames = colNameStr.split(",");
for (String s : columnNames) {
colNames.add(s.getBytes(charset));
}
//Rowkey is optional, default is -1
rowKeyIndex = context.getInteger(ROW_KEY_INDEX_CONFIG, -1);
//if row key is being used, make sure it is specified correct
if (rowKeyIndex >= 0) {
if (rowKeyIndex >= columnNames.length) {
throw new IllegalArgumentException(ROW_KEY_INDEX_CONFIG + " must be " +
"less than num columns " + columnNames.length);
}
if (!ROW_KEY_NAME.equalsIgnoreCase(columnNames[rowKeyIndex])) {
throw new IllegalArgumentException("Column at " + rowKeyIndex + " must be "
+ ROW_KEY_NAME + " and is " + columnNames[rowKeyIndex]);
}
}
}
@Override
public void configure(ComponentConfiguration conf) {
}
@Override
public void initialize(Event event, byte[] columnFamily) {
this.headers = event.getHeaders();
this.payload = event.getBody();
this.cf = columnFamily;
}
/**
* Returns a row-key with the following format:
* [time in millis]-[random key]-[nonce]
*/
protected byte[] getRowKey(Calendar cal) {
/* NOTE: This key generation strategy has the following properties:
*
* 1) Within a single JVM, the same row key will never be duplicated.
* 2) Amongst any two JVM's operating at different time periods (according
* to their respective clocks), the same row key will never be
* duplicated.
* 3) Amongst any two JVM's operating concurrently (according to their
* respective clocks), the odds of duplicating a row-key are non-zero
* but infinitesimal. This would require simultaneous collision in (a)
* the timestamp (b) the respective nonce and (c) the random string.
* The string is necessary since (a) and (b) could collide if a fleet
* of Flume agents are restarted in tandem.
*
* Row-key uniqueness is important because conflicting row-keys will cause
* data loss. */
String rowKey = String.format("%s-%s-%s", cal.getTimeInMillis(),
randomKey, nonce.getAndIncrement());
return rowKey.getBytes(charset);
}
protected byte[] getRowKey() {
return getRowKey(Calendar.getInstance());
}
@Override
public List<Row> getActions() throws FlumeException {
List<Row> actions = Lists.newArrayList();
byte[] rowKey;
Matcher m = inputPattern.matcher(new String(payload, charset));
if (!m.matches()) {
return Lists.newArrayList();
}
if (m.groupCount() != colNames.size()) {
return Lists.newArrayList();
}
try {
if (rowKeyIndex < 0) {
rowKey = getRowKey();
} else {
rowKey = m.group(rowKeyIndex + 1).getBytes(Charsets.UTF_8);
}
Put put = new Put(rowKey);
for (int i = 0; i < colNames.size(); i++) {
if (i != rowKeyIndex) {
put.addColumn(cf, colNames.get(i), m.group(i + 1).getBytes(Charsets.UTF_8));
}
}
if (depositHeaders) {
for (Map.Entry<String, String> entry : headers.entrySet()) {
put.addColumn(cf, entry.getKey().getBytes(charset), entry.getValue().getBytes(charset));
}
}
actions.add(put);
} catch (Exception e) {
throw new FlumeException("Could not get row key!", e);
}
return actions;
}
@Override
public List<Increment> getIncrements() {
return Lists.newArrayList();
}
@Override
public void close() {
}
} | 9,771 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-hbase2-sink/src/main/java/org/apache/flume/sink/hbase2/SimpleRowKeyGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hbase2;
import java.io.UnsupportedEncodingException;
import java.util.Random;
import java.util.UUID;
/**
* Utility class for users to generate their own keys. Any key can be used,
* this is just a utility that provides a set of simple keys.
*/
public class SimpleRowKeyGenerator {
public static byte[] getUUIDKey(String prefix) throws UnsupportedEncodingException {
return (prefix + UUID.randomUUID().toString()).getBytes("UTF8");
}
public static byte[] getRandomKey(String prefix) throws UnsupportedEncodingException {
return (prefix + String.valueOf(new Random().nextLong())).getBytes("UTF8");
}
public static byte[] getTimestampKey(String prefix) throws UnsupportedEncodingException {
return (prefix + String.valueOf(System.currentTimeMillis())).getBytes("UTF8");
}
public static byte[] getNanoTimestampKey(String prefix) throws UnsupportedEncodingException {
return (prefix + String.valueOf(System.nanoTime())).getBytes("UTF8");
}
}
| 9,772 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/test/java/org/apache/flume/sink/kudu/TestSecureKuduSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.kudu;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.kudu.test.ClientTestUtil.scanTableToStrings;
import static org.apache.kudu.util.SecurityUtil.KUDU_TICKETCACHE_PROPERTY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.google.common.collect.ImmutableList;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.event.EventBuilder;
import org.apache.kudu.test.KuduTestHarness;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.kudu.ColumnSchema;
import org.apache.kudu.Schema;
import org.apache.kudu.Type;
import org.apache.kudu.client.CreateTableOptions;
import org.apache.kudu.client.KuduTable;
import org.apache.kudu.test.cluster.MiniKuduCluster.MiniKuduClusterBuilder;
public class TestSecureKuduSink {
private static final Logger LOG = LoggerFactory.getLogger(TestSecureKuduSink.class);
private static final int TICKET_LIFETIME_SECONDS = 20;
private static final int RENEWABLE_LIFETIME_SECONDS = 35;
private static final MiniKuduClusterBuilder clusterBuilder =
KuduTestHarness.getBaseClusterBuilder()
.kdcTicketLifetime(TICKET_LIFETIME_SECONDS + "s")
.kdcRenewLifetime(RENEWABLE_LIFETIME_SECONDS + "s")
.enableKerberos();
@Rule
public KuduTestHarness harness = new KuduTestHarness(clusterBuilder);
@Before
public void clearTicketCacheProperty() {
// Let Flume authenticate.
System.clearProperty(KUDU_TICKETCACHE_PROPERTY);
}
@Test
public void testEventsWithShortTickets() throws Exception {
Instant start = Instant.now();
LOG.info("Creating new table...");
ArrayList<ColumnSchema> columns = new ArrayList<>(1);
columns.add(new ColumnSchema.ColumnSchemaBuilder("payload", Type.BINARY)
.key(true).build());
CreateTableOptions createOptions =
new CreateTableOptions().setRangePartitionColumns(ImmutableList.of("payload"))
.setNumReplicas(1);
String tableName = "test_long_lived_events";
KuduTable table = harness.getClient().createTable(tableName, new Schema(columns),
createOptions);
LOG.info("Created new table.");
KuduSink sink = KuduSinkTestUtil.createSecureSink(
tableName, harness.getMasterAddressesAsString(), harness.getClusterRoot());
sink.start();
LOG.info("Testing events at the beginning.");
int eventCount = 10;
processEvents(sink, 0, eventCount / 2);
LOG.info("Waiting for tickets to expire");
Duration elapsedSoFar = Duration.between(Instant.now(), start);
TimeUnit.MILLISECONDS.sleep(1000 * (RENEWABLE_LIFETIME_SECONDS + 1) -
elapsedSoFar.toMillis());
// At this point, the ticket will have been outstanding for at least
// (RENEWABLE_LIFETIME_SECONDS + 1) seconds-- so the sink will need to reacquire a ticket.
LOG.info("Testing events after ticket renewal.");
processEvents(sink, eventCount / 2, eventCount);
List<String> rows = scanTableToStrings(table);
assertEquals(eventCount + " row(s) expected", eventCount, rows.size());
for (int i = 0; i < eventCount; i++) {
assertTrue("incorrect payload", rows.get(i).contains("payload body " + i));
}
LOG.info("Testing {} events finished successfully.", eventCount);
}
private void processEvents(KuduSink sink, int from, int to) throws EventDeliveryException {
List<Event> events = new ArrayList<>();
for (int i = from; i < to; i++) {
Event e = EventBuilder.withBody(String.format("payload body %s", i).getBytes(UTF_8));
events.add(e);
}
KuduSinkTestUtil.processEvents(sink, events);
LOG.info("Events flushed.");
}
}
| 9,773 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/test/java/org/apache/flume/sink/kudu/TestKuduSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.kudu;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.kudu.test.ClientTestUtil.scanTableToStrings;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import com.google.common.collect.ImmutableList;
import org.apache.flume.Channel;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.apache.flume.Sink;
import org.apache.flume.Sink.Status;
import org.apache.flume.Transaction;
import org.apache.flume.conf.Configurables;
import org.apache.flume.event.EventBuilder;
import org.apache.kudu.test.KuduTestHarness;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.kudu.ColumnSchema;
import org.apache.kudu.Schema;
import org.apache.kudu.Type;
import org.apache.kudu.client.CreateTableOptions;
import org.apache.kudu.client.KuduTable;
public class TestKuduSink {
private static final Logger LOG = LoggerFactory.getLogger(TestKuduSink.class);
@Rule
public KuduTestHarness harness = new KuduTestHarness();
private KuduTable createNewTable(String tableName) throws Exception {
LOG.info("Creating new table...");
ArrayList<ColumnSchema> columns = new ArrayList<>(1);
columns.add(new ColumnSchema.ColumnSchemaBuilder("payload", Type.BINARY).key(true).build());
CreateTableOptions createOptions =
new CreateTableOptions().setRangePartitionColumns(ImmutableList.of("payload"))
.setNumReplicas(1);
KuduTable table = harness.getClient().createTable(tableName, new Schema(columns),
createOptions);
LOG.info("Created new table.");
return table;
}
@Test
public void testMandatoryParameters() {
LOG.info("Testing mandatory parameters...");
KuduSink sink = new KuduSink(harness.getClient());
HashMap<String, String> parameters = new HashMap<>();
Context context = new Context(parameters);
try {
Configurables.configure(sink, context);
Assert.fail("Should have failed due to missing properties");
} catch (NullPointerException npe) {
//good
}
parameters.put(KuduSinkConfigurationConstants.TABLE_NAME, "tableName");
context = new Context(parameters);
try {
Configurables.configure(sink, context);
Assert.fail("Should have failed due to missing properties");
} catch (NullPointerException npe) {
//good
}
LOG.info("Testing mandatory parameters finished successfully.");
}
@Test(expected = FlumeException.class)
public void testMissingTable() {
LOG.info("Testing missing table...");
KuduSink sink = KuduSinkTestUtil.createSink(harness.getClient(), "missingTable",
new Context());
sink.start();
LOG.info("Testing missing table finished successfully.");
}
@Test
public void testEmptyChannelWithDefaults() throws Exception {
testEventsWithDefaults(0);
}
@Test
public void testOneEventWithDefaults() throws Exception {
testEventsWithDefaults(1);
}
@Test
public void testThreeEventsWithDefaults() throws Exception {
testEventsWithDefaults(3);
}
@Test
public void testDuplicateRowsWithDuplicatesIgnored() throws Exception {
doTestDuplicateRows(true);
}
@Test
public void testDuplicateRowsWithDuplicatesNotIgnored() throws Exception {
doTestDuplicateRows(false);
}
private void doTestDuplicateRows(boolean ignoreDuplicateRows) throws Exception {
KuduTable table = createNewTable("testDuplicateRows" + ignoreDuplicateRows);
String tableName = table.getName();
Context sinkContext = new Context();
sinkContext.put(KuduSinkConfigurationConstants.IGNORE_DUPLICATE_ROWS,
Boolean.toString(ignoreDuplicateRows));
KuduSink sink = KuduSinkTestUtil.createSink(harness.getClient(), tableName, sinkContext);
sink.start();
Channel channel = sink.getChannel();
Transaction tx = channel.getTransaction();
tx.begin();
for (int i = 0; i < 2; i++) {
Event e = EventBuilder.withBody("key-0", UTF_8); // Duplicate keys.
channel.put(e);
}
tx.commit();
tx.close();
try {
Sink.Status status = sink.process();
if (!ignoreDuplicateRows) {
fail("Incorrectly ignored duplicate rows!");
}
assertSame("incorrect status for empty channel", status, Status.READY);
} catch (EventDeliveryException e) {
if (ignoreDuplicateRows) {
throw new AssertionError("Failed to ignore duplicate rows!", e);
} else {
LOG.info("Correctly did not ignore duplicate rows", e);
return;
}
}
// We only get here if the process() succeeded.
try {
List<String> rows = scanTableToStrings(table);
assertEquals("1 row expected", 1, rows.size());
} catch (Exception e) {
throw new RuntimeException(e);
}
LOG.info("Testing duplicate events finished successfully.");
}
private void testEventsWithDefaults(int eventCount) throws Exception {
LOG.info("Testing {} events...", eventCount);
KuduTable table = createNewTable("test" + eventCount + "events");
String tableName = table.getName();
List<Event> events = new ArrayList<>();
for (int i = 0; i < eventCount; i++) {
Event e = EventBuilder.withBody(String.format("payload body %s", i).getBytes(UTF_8));
events.add(e);
}
KuduSinkTestUtil.processEventsCreatingSink(harness.getClient(), new Context(), tableName,
events);
List<String> rows = scanTableToStrings(table);
assertEquals(eventCount + " row(s) expected", eventCount, rows.size());
for (int i = 0; i < eventCount; i++) {
assertTrue("incorrect payload", rows.get(i).contains("payload body " + i));
}
LOG.info("Testing {} events finished successfully.", eventCount);
}
}
| 9,774 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/test/java/org/apache/flume/sink/kudu/TestKeyedKuduOperationsProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.kudu;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.kudu.test.ClientTestUtil.scanTableToStrings;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.List;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.event.EventBuilder;
import org.junit.Rule;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.kudu.ColumnSchema;
import org.apache.kudu.Schema;
import org.apache.kudu.Type;
import org.apache.kudu.client.CreateTableOptions;
import org.apache.kudu.client.KuduTable;
import org.apache.kudu.test.KuduTestHarness;
public class TestKeyedKuduOperationsProducer {
private static final Logger LOG = LoggerFactory.getLogger(TestKeyedKuduOperationsProducer.class);
@Rule
public KuduTestHarness harness = new KuduTestHarness();
private KuduTable createNewTable(String tableName) throws Exception {
LOG.info("Creating new table...");
ArrayList<ColumnSchema> columns = new ArrayList<>(2);
columns.add(
new ColumnSchema.ColumnSchemaBuilder(
SimpleKeyedKuduOperationsProducer.KEY_COLUMN_DEFAULT, Type.STRING)
.key(true).build());
columns.add(
new ColumnSchema.ColumnSchemaBuilder(
SimpleKeyedKuduOperationsProducer.PAYLOAD_COLUMN_DEFAULT, Type.BINARY)
.key(false).build());
CreateTableOptions createOptions =
new CreateTableOptions()
.setRangePartitionColumns(ImmutableList.of(
SimpleKeyedKuduOperationsProducer.KEY_COLUMN_DEFAULT))
.setNumReplicas(1);
KuduTable table =
harness.getClient().createTable(tableName, new Schema(columns), createOptions);
LOG.info("Created new table.");
return table;
}
@Test
public void testEmptyChannelWithInsert() throws Exception {
testEvents(0, "insert");
}
@Test
public void testOneEventWithInsert() throws Exception {
testEvents(1, "insert");
}
@Test
public void testThreeEventsWithInsert() throws Exception {
testEvents(3, "insert");
}
@Test
public void testEmptyChannelWithUpsert() throws Exception {
testEvents(0, "upsert");
}
@Test
public void testOneEventWithUpsert() throws Exception {
testEvents(1, "upsert");
}
@Test
public void testThreeEventsWithUpsert() throws Exception {
testEvents(3, "upsert");
}
@Test
public void testDuplicateRowsWithUpsert() throws Exception {
LOG.info("Testing events with upsert...");
KuduTable table = createNewTable("testDupUpsertEvents");
String tableName = table.getName();
Context ctx = new Context(ImmutableMap.of(
KuduSinkConfigurationConstants.PRODUCER_PREFIX +
SimpleKeyedKuduOperationsProducer.OPERATION_PROP, "upsert",
KuduSinkConfigurationConstants.PRODUCER, SimpleKeyedKuduOperationsProducer.class.getName()
));
KuduSink sink = KuduSinkTestUtil.createSink(harness.getClient(), tableName, ctx);
sink.start();
int numRows = 3;
List<Event> events = new ArrayList<>();
for (int i = 0; i < numRows; i++) {
Event e = EventBuilder.withBody(String.format("payload body %s", i), UTF_8);
e.setHeaders(ImmutableMap.of(SimpleKeyedKuduOperationsProducer.KEY_COLUMN_DEFAULT,
String.format("key %s", i)));
events.add(e);
}
KuduSinkTestUtil.processEvents(sink, events);
List<String> rows = scanTableToStrings(table);
assertEquals(numRows + " row(s) expected", numRows, rows.size());
for (int i = 0; i < numRows; i++) {
assertTrue("incorrect payload", rows.get(i).contains("payload body " + i));
}
Event dup = EventBuilder.withBody("payload body upserted".getBytes(UTF_8));
dup.setHeaders(ImmutableMap.of("key", String.format("key %s", 0)));
KuduSinkTestUtil.processEvents(sink, ImmutableList.of(dup));
List<String> upRows = scanTableToStrings(table);
assertEquals(numRows + " row(s) expected", numRows, upRows.size());
assertTrue("incorrect payload", upRows.get(0).contains("payload body upserted"));
for (int i = 1; i < numRows; i++) {
assertTrue("incorrect payload", upRows.get(i).contains("payload body " + i));
}
LOG.info("Testing events with upsert finished successfully.");
}
private void testEvents(int eventCount, String operation) throws Exception {
LOG.info("Testing {} events...", eventCount);
KuduTable table = createNewTable("test" + eventCount + "events" + operation);
String tableName = table.getName();
Context context = new Context(ImmutableMap.of(
KuduSinkConfigurationConstants.PRODUCER_PREFIX +
SimpleKeyedKuduOperationsProducer.OPERATION_PROP, operation,
KuduSinkConfigurationConstants.PRODUCER, SimpleKeyedKuduOperationsProducer.class.getName()
));
List<Event> events = getEvents(eventCount);
KuduSinkTestUtil.processEventsCreatingSink(harness.getClient(), context, tableName, events);
List<String> rows = scanTableToStrings(table);
assertEquals(eventCount + " row(s) expected", eventCount, rows.size());
for (int i = 0; i < eventCount; i++) {
assertTrue("incorrect payload", rows.get(i).contains("payload body " + i));
}
LOG.info("Testing {} events finished successfully.", eventCount);
}
private List<Event> getEvents(int eventCount) {
List<Event> events = new ArrayList<>();
for (int i = 0; i < eventCount; i++) {
Event e = EventBuilder.withBody(String.format("payload body %s", i).getBytes(UTF_8));
e.setHeaders(ImmutableMap.of("key", String.format("key %s", i)));
events.add(e);
}
return events;
}
}
| 9,775 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/test/java/org/apache/flume/sink/kudu/KuduSinkTestUtil.java | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.flume.sink.kudu;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertSame;
import java.util.List;
import org.apache.flume.Channel;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.Sink.Status;
import org.apache.flume.Transaction;
import org.apache.flume.channel.MemoryChannel;
import org.apache.flume.conf.Configurables;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.kudu.client.KuduClient;
class KuduSinkTestUtil {
private static final Logger LOG = LoggerFactory.getLogger(KuduSinkTestUtil.class);
static KuduSink createSink(KuduClient client, String tableName, Context ctx) {
return createSink(tableName, client, ctx, client.getMasterAddressesAsString());
}
private static KuduSink createSink(
String tableName, KuduClient client, Context ctx, String masterAddresses) {
LOG.info("Creating Kudu sink for '{}' table...", tableName);
Context context = new Context();
context.put(KuduSinkConfigurationConstants.TABLE_NAME, tableName);
context.put(KuduSinkConfigurationConstants.MASTER_ADDRESSES, masterAddresses);
context.putAll(ctx.getParameters());
KuduSink sink = new KuduSink(client);
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, new Context());
sink.setChannel(channel);
LOG.info("Created Kudu sink for '{}' table.", tableName);
return sink;
}
static KuduSink createSecureSink(String tableName, String masterAddresses, String clusterRoot) {
Context context = new Context();
context.put(KuduSinkConfigurationConstants.KERBEROS_KEYTAB, clusterRoot +
"/krb5kdc/test-user.keytab");
context.put(KuduSinkConfigurationConstants.KERBEROS_PRINCIPAL, "test-user@KRBTEST.COM");
return createSink(tableName, null, context, masterAddresses);
}
static void processEventsCreatingSink(
KuduClient syncClient, Context context, String tableName, List<Event> events)
throws EventDeliveryException {
KuduSink sink = createSink(syncClient, tableName, context);
sink.start();
processEvents(sink, events);
}
static void processEvents(KuduSink sink, List<Event> events) throws EventDeliveryException {
Channel channel = sink.getChannel();
Transaction tx = channel.getTransaction();
tx.begin();
for (Event e : events) {
channel.put(e);
}
tx.commit();
tx.close();
Status status = sink.process();
if (events.isEmpty()) {
assertSame("incorrect status for empty channel", status, Status.BACKOFF);
} else {
assertNotSame("incorrect status for non-empty channel", status, Status.BACKOFF);
}
}
}
| 9,776 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/test/java/org/apache/flume/sink/kudu/TestRegexpKuduOperationsProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.kudu;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.kudu.test.ClientTestUtil.scanTableToStrings;
import static org.junit.Assert.assertEquals;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import com.google.common.collect.ImmutableList;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.event.EventBuilder;
import org.apache.kudu.test.KuduTestHarness;
import org.junit.Rule;
import org.junit.Test;
import org.apache.kudu.ColumnSchema;
import org.apache.kudu.Schema;
import org.apache.kudu.Type;
import org.apache.kudu.client.CreateTableOptions;
import org.apache.kudu.client.KuduTable;
public class TestRegexpKuduOperationsProducer {
private static final String TEST_REGEXP =
"(?<key>\\d+),(?<byteFld>\\d+),(?<shortFld>\\d+),(?<intFld>\\d+)," +
"(?<longFld>\\d+),(?<binaryFld>\\w+),(?<stringFld>\\w+),(?<boolFld>\\w+)," +
"(?<floatFld>\\d+\\.\\d*),(?<doubleFld>\\d+.\\d*)";
@Rule
public KuduTestHarness harness = new KuduTestHarness();
private KuduTable createNewTable(String tableName) throws Exception {
ArrayList<ColumnSchema> columns = new ArrayList<>(10);
columns.add(new ColumnSchema.ColumnSchemaBuilder("key", Type.INT32).key(true).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("byteFld", Type.INT8).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("shortFld", Type.INT16).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("intFld", Type.INT32).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("longFld", Type.INT64).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("binaryFld", Type.BINARY).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("stringFld", Type.STRING).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("boolFld", Type.BOOL).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("floatFld", Type.FLOAT).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("doubleFld", Type.DOUBLE).build());
CreateTableOptions createOptions =
new CreateTableOptions().addHashPartitions(ImmutableList.of("key"), 3).setNumReplicas(1);
return harness.getClient().createTable(tableName, new Schema(columns), createOptions);
}
@Test
public void testEmptyChannel() throws Exception {
testEvents(0, 1, "insert");
}
@Test
public void testOneEvent() throws Exception {
testEvents(1, 1, "insert");
}
@Test
public void testThreeEvents() throws Exception {
testEvents(3, 1, "insert");
}
@Test
public void testThreeEventsWithUpsert() throws Exception {
testEvents(3, 1, "upsert");
}
@Test
public void testOneEventTwoRowsEach() throws Exception {
testEvents(1, 2, "insert");
}
@Test
public void testTwoEventsTwoRowsEach() throws Exception {
testEvents(2, 2, "insert");
}
@Test
public void testTwoEventsTwoRowsEachWithUpsert() throws Exception {
testEvents(2, 2, "upsert");
}
private void testEvents(int eventCount, int perEventRowCount, String operation) throws Exception {
String tableName = String.format("test%sevents%srowseach%s",
eventCount, perEventRowCount, operation);
Context context = new Context();
context.put(KuduSinkConfigurationConstants.PRODUCER_PREFIX +
RegexpKuduOperationsProducer.PATTERN_PROP, TEST_REGEXP);
context.put(KuduSinkConfigurationConstants.PRODUCER_PREFIX +
RegexpKuduOperationsProducer.OPERATION_PROP, operation);
context.put(KuduSinkConfigurationConstants.PRODUCER,
RegexpKuduOperationsProducer.class.getName());
KuduTable table = createNewTable(tableName);
List<Event> events = generateEvents(eventCount, perEventRowCount, operation);
KuduSinkTestUtil.processEventsCreatingSink(harness.getClient(), context, tableName, events);
List<String> rows = scanTableToStrings(table);
assertEquals(eventCount * perEventRowCount + " row(s) expected",
eventCount * perEventRowCount,
rows.size());
ArrayList<String> rightAnswers = new ArrayList<>(eventCount * perEventRowCount);
for (int i = 0; i < eventCount; i++) {
for (int j = 0; j < perEventRowCount; j++) {
int value = operation.equals("upsert") && i == 0 ? 1 : i;
String baseAnswer = "INT32 key=1%2$d%3$d1, INT8 byteFld=%1$d, INT16 shortFld=%1$d, " +
"INT32 intFld=%1$d, INT64 longFld=%1$d, BINARY binaryFld=\"binary\", " +
"STRING stringFld=string, BOOL boolFld=false, FLOAT floatFld=%1$d.%1$d, " +
"DOUBLE doubleFld=%1$d.%1$d";
String rightAnswer = String.format(baseAnswer, value, i, j);
rightAnswers.add(rightAnswer);
}
}
Collections.sort(rightAnswers);
for (int k = 0; k < eventCount * perEventRowCount; k++) {
assertEquals("incorrect row", rightAnswers.get(k), rows.get(k));
}
}
private List<Event> generateEvents(int eventCount, int perEventRowCount, String operation) {
List<Event> events = new ArrayList<>();
for (int i = 0; i < eventCount; i++) {
StringBuilder payload = new StringBuilder();
for (int j = 0; j < perEventRowCount; j++) {
String baseRow = "|1%1$d%2$d1,%1$d,%1$d,%1$d,%1$d,binary," +
"string,false,%1$d.%1$d,%1$d.%1$d,%1$d.%1$d|";
String row = String.format(baseRow, i, j);
payload.append(row);
}
Event e = EventBuilder.withBody(payload.toString().getBytes(UTF_8));
events.add(e);
}
if (eventCount > 0) {
// In the upsert case, add one upsert row per insert event (i.e. per i)
// All such rows go in one event.
if (operation.equals("upsert")) {
StringBuilder upserts = new StringBuilder();
for (int j = 0; j < perEventRowCount; j++) {
String row = String.format("|1%2$d%3$d1,%1$d,%1$d,%1$d,%1$d,binary," +
"string,false,%1$d.%1$d,%1$d.%1$d,%1$d.%1$d|", 1, 0, j);
upserts.append(row);
}
Event e = EventBuilder.withBody(upserts.toString().getBytes(UTF_8));
events.add(e);
}
// Also check some bad/corner cases.
String mismatchInInt = "|1,2,taco,4,5,x,y,true,1.0.2.0,999|";
String emptyString = "";
String[] testCases = {mismatchInInt, emptyString};
for (String testCase : testCases) {
Event e = EventBuilder.withBody(testCase.getBytes(UTF_8));
events.add(e);
}
}
return events;
}
}
| 9,777 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/test/java/org/apache/flume/sink/kudu/TestAvroKuduOperationsProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.kudu;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.kudu.test.ClientTestUtil.scanTableToStrings;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.net.URL;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.Encoder;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.specific.SpecificDatumWriter;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.event.EventBuilder;
import org.junit.Rule;
import org.junit.Test;
import org.apache.kudu.ColumnSchema;
import org.apache.kudu.Schema;
import org.apache.kudu.Type;
import org.apache.kudu.client.CreateTableOptions;
import org.apache.kudu.client.KuduTable;
import org.apache.kudu.test.KuduTestHarness;
public class TestAvroKuduOperationsProducer {
private static String schemaUriString;
private static String schemaLiteral;
static {
try {
String schemaPath = "/testAvroKuduOperationsProducer.avsc";
URL schemaUrl = TestAvroKuduOperationsProducer.class.getResource(schemaPath);
File schemaFile = Paths.get(schemaUrl.toURI()).toFile();
schemaUriString = schemaFile.getAbsoluteFile().toURI().toString();
schemaLiteral = Files.toString(schemaFile, UTF_8);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
enum SchemaLocation {
GLOBAL, URL, LITERAL
}
@Rule
public KuduTestHarness harness = new KuduTestHarness();
@Test
public void testEmptyChannel() throws Exception {
testEvents(0, SchemaLocation.GLOBAL);
}
@Test
public void testOneEvent() throws Exception {
testEvents(1, SchemaLocation.GLOBAL);
}
@Test
public void testThreeEvents() throws Exception {
testEvents(3, SchemaLocation.GLOBAL);
}
@Test
public void testThreeEventsSchemaURLInEvent() throws Exception {
testEvents(3, SchemaLocation.URL);
}
@Test
public void testThreeEventsSchemaLiteralInEvent() throws Exception {
testEvents(3, SchemaLocation.LITERAL);
}
private void testEvents(int eventCount, SchemaLocation schemaLocation)
throws Exception {
KuduTable table = createNewTable(
String.format("test%sevents%s", eventCount, schemaLocation));
String tableName = table.getName();
Context context = schemaLocation != SchemaLocation.GLOBAL ? new Context()
: new Context(ImmutableMap.of(KuduSinkConfigurationConstants.PRODUCER_PREFIX +
AvroKuduOperationsProducer.SCHEMA_PROP, schemaUriString));
context.put(KuduSinkConfigurationConstants.PRODUCER,
AvroKuduOperationsProducer.class.getName());
List<Event> events = generateEvents(eventCount, schemaLocation);
KuduSinkTestUtil.processEventsCreatingSink(harness.getClient(), context, tableName, events);
List<String> answers = makeAnswers(eventCount);
List<String> rows = scanTableToStrings(table);
assertEquals("wrong number of rows inserted", answers.size(), rows.size());
assertArrayEquals("wrong rows inserted", answers.toArray(), rows.toArray());
}
private KuduTable createNewTable(String tableName) throws Exception {
List<ColumnSchema> columns = new ArrayList<>(5);
columns.add(new ColumnSchema.ColumnSchemaBuilder("key", Type.INT32).key(true).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("longField", Type.INT64).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("doubleField", Type.DOUBLE).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("nullableField", Type.STRING)
.nullable(true).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("stringField", Type.STRING).build());
CreateTableOptions createOptions =
new CreateTableOptions().setRangePartitionColumns(ImmutableList.of("key"))
.setNumReplicas(1);
return harness.getClient().createTable(tableName, new Schema(columns), createOptions);
}
private List<Event> generateEvents(int eventCount,
SchemaLocation schemaLocation) throws Exception {
List<Event> events = new ArrayList<>();
for (int i = 0; i < eventCount; i++) {
AvroKuduOperationsProducerTestRecord record = new AvroKuduOperationsProducerTestRecord();
record.setKey(10 * i);
record.setLongField(2L * i);
record.setDoubleField(2.71828 * i);
record.setNullableField(i % 2 == 0 ? null : "taco");
record.setStringField(String.format("hello %d", i));
ByteArrayOutputStream out = new ByteArrayOutputStream();
Encoder encoder = EncoderFactory.get().binaryEncoder(out, null);
DatumWriter<AvroKuduOperationsProducerTestRecord> writer =
new SpecificDatumWriter<>(AvroKuduOperationsProducerTestRecord.class);
writer.write(record, encoder);
encoder.flush();
Event e = EventBuilder.withBody(out.toByteArray());
if (schemaLocation == SchemaLocation.URL) {
e.setHeaders(ImmutableMap.of(AvroKuduOperationsProducer.SCHEMA_URL_HEADER,
schemaUriString));
} else if (schemaLocation == SchemaLocation.LITERAL) {
e.setHeaders(ImmutableMap.of(AvroKuduOperationsProducer.SCHEMA_LITERAL_HEADER,
schemaLiteral));
}
events.add(e);
}
return events;
}
private List<String> makeAnswers(int eventCount) {
List<String> answers = Lists.newArrayList();
for (int i = 0; i < eventCount; i++) {
answers.add(String.format(
"INT32 key=%s, INT64 longField=%s, DOUBLE doubleField=%s, " +
"STRING nullableField=%s, STRING stringField=hello %s",
10 * i,
2 * i,
2.71828 * i,
i % 2 == 0 ? "NULL" : "taco",
i));
}
Collections.sort(answers);
return answers;
}
}
| 9,778 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/test/java/org/apache/flume/sink/kudu/TestRegexpKuduOperationsProducerParseError.java | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.flume.sink.kudu;
import static org.apache.flume.sink.kudu.RegexpKuduOperationsProducer.*;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.Closeable;
import java.nio.charset.Charset;
import java.util.ArrayList;
import com.google.common.collect.ImmutableList;
import org.apache.flume.Context;
import org.apache.flume.FlumeException;
import org.apache.flume.event.EventBuilder;
import org.apache.kudu.test.KuduTestHarness;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.rules.RuleChain;
import org.apache.kudu.ColumnSchema;
import org.apache.kudu.Schema;
import org.apache.kudu.Type;
import org.apache.kudu.client.CreateTableOptions;
import org.apache.kudu.client.KuduTable;
import org.apache.kudu.test.CapturingLogAppender;
public class TestRegexpKuduOperationsProducerParseError {
private static final String TEST_REGEXP = "(?<key>\\d+),(?<byteFld>\\d+),(?<stringFld>\\w+)";
private static final String TEST_REGEXP_MISSING_COLUMN = "(?<key>\\d+),(?<byteFld>\\d+)";
private static final String TEST_OPERATION = "insert";
private static final String ROW_UNMATCHING = "invalid row";
private static final String ROW_BAD_COLUMN_VALUE = "1,1000,string";
private static final String ROW_MISSING_COLUMN = "1,1";
private static final String ERROR_MSG_UNMATCHED_ROW =
"Failed to match the pattern '" + TEST_REGEXP + "' in '" + ROW_UNMATCHING + "'";
private static final String ERROR_MSG_MISSING_COLUMN =
"Column 'stringFld' has no matching group in '" + ROW_MISSING_COLUMN + "'";
private static final String ERROR_MSG_BAD_COLUMN_VALUE =
"Raw value '" + ROW_BAD_COLUMN_VALUE +
"' couldn't be parsed to type Type: int8 for column 'byteFld'";
private static final String POLICY_REJECT = "REJECT";
private static final String POLICY_WARN = "WARN";
private static final String POLICY_IGNORE = "IGNORE";
public KuduTestHarness harness = new KuduTestHarness();
public ExpectedException thrown = ExpectedException.none();
// ExpectedException misbehaves when combined with other rules; we use a
// RuleChain to beat it into submission.
//
// See https://stackoverflow.com/q/28846088 for more information.
@Rule
public RuleChain chain = RuleChain.outerRule(harness).around(thrown);
@Test
public void testMissingColumnThrowsExceptionDefaultConfig() throws Exception {
Context additionalContext = new Context();
additionalContext.put(PATTERN_PROP, TEST_REGEXP_MISSING_COLUMN);
testThrowsException(additionalContext, ERROR_MSG_MISSING_COLUMN, ROW_MISSING_COLUMN);
}
@Test
public void testMissingColumnThrowsExceptionDeprecated() throws Exception {
Context additionalContext = new Context();
additionalContext.put(PATTERN_PROP, TEST_REGEXP_MISSING_COLUMN);
additionalContext.put(SKIP_MISSING_COLUMN_PROP, String.valueOf(false));
testThrowsException(additionalContext, ERROR_MSG_MISSING_COLUMN, ROW_MISSING_COLUMN);
}
@Test
public void testMissingColumnThrowsException() throws Exception {
Context additionalContext = new Context();
additionalContext.put(PATTERN_PROP, TEST_REGEXP_MISSING_COLUMN);
additionalContext.put(MISSING_COLUMN_POLICY_PROP, POLICY_REJECT);
testThrowsException(additionalContext, ERROR_MSG_MISSING_COLUMN, ROW_MISSING_COLUMN);
}
@Test
public void testMissingColumnLogsWarningDeprecated() throws Exception {
Context additionalContext = new Context();
additionalContext.put(PATTERN_PROP, TEST_REGEXP_MISSING_COLUMN);
additionalContext.put(SKIP_MISSING_COLUMN_PROP, String.valueOf(true));
testLogging(additionalContext, ERROR_MSG_MISSING_COLUMN, ROW_MISSING_COLUMN);
}
@Test
public void testMissingColumnLogsWarning() throws Exception {
Context additionalContext = new Context();
additionalContext.put(PATTERN_PROP, TEST_REGEXP_MISSING_COLUMN);
additionalContext.put(MISSING_COLUMN_POLICY_PROP, POLICY_WARN);
testLogging(additionalContext, ERROR_MSG_MISSING_COLUMN, ROW_MISSING_COLUMN);
}
@Test
public void testMissingColumnIgnored() throws Exception {
Context additionalContext = new Context();
additionalContext.put(PATTERN_PROP, TEST_REGEXP_MISSING_COLUMN);
additionalContext.put(MISSING_COLUMN_POLICY_PROP, POLICY_IGNORE);
testIgnored(additionalContext, ERROR_MSG_MISSING_COLUMN, ROW_MISSING_COLUMN);
}
@Test(expected = IllegalArgumentException.class)
public void testMissingColumnConfigValidation() throws Exception {
Context additionalContext = new Context();
additionalContext.put(SKIP_MISSING_COLUMN_PROP, String.valueOf(false));
additionalContext.put(MISSING_COLUMN_POLICY_PROP, POLICY_IGNORE);
getProducer(additionalContext);
}
@Test
public void testBadColumnValueThrowsExceptionDefaultConfig() throws Exception {
Context additionalContext = new Context();
testThrowsException(additionalContext, ERROR_MSG_BAD_COLUMN_VALUE, ROW_BAD_COLUMN_VALUE);
}
@Test
public void testBadColumnValueThrowsExceptionDeprecated() throws Exception {
Context additionalContext = new Context();
additionalContext.put(SKIP_BAD_COLUMN_VALUE_PROP, String.valueOf(false));
testThrowsException(additionalContext, ERROR_MSG_BAD_COLUMN_VALUE, ROW_BAD_COLUMN_VALUE);
}
@Test
public void testBadColumnValueThrowsException() throws Exception {
Context additionalContext = new Context();
additionalContext.put(BAD_COLUMN_VALUE_POLICY_PROP, POLICY_REJECT);
testThrowsException(additionalContext, ERROR_MSG_BAD_COLUMN_VALUE, ROW_BAD_COLUMN_VALUE);
}
@Test
public void testBadColumnValueLogsWarningDeprecated() throws Exception {
Context additionalContext = new Context();
additionalContext.put(SKIP_BAD_COLUMN_VALUE_PROP, String.valueOf(true));
testLogging(additionalContext, ERROR_MSG_BAD_COLUMN_VALUE, ROW_BAD_COLUMN_VALUE);
}
@Test
public void testBadColumnValueLogsWarning() throws Exception {
Context additionalContext = new Context();
additionalContext.put(BAD_COLUMN_VALUE_POLICY_PROP, POLICY_WARN);
testLogging(additionalContext, ERROR_MSG_BAD_COLUMN_VALUE, ROW_BAD_COLUMN_VALUE);
}
@Test
public void testBadColumnValueIgnored() throws Exception {
Context additionalContext = new Context();
additionalContext.put(BAD_COLUMN_VALUE_POLICY_PROP, POLICY_IGNORE);
testIgnored(additionalContext, ERROR_MSG_BAD_COLUMN_VALUE, ROW_BAD_COLUMN_VALUE);
}
@Test(expected = IllegalArgumentException.class)
public void testBadColumnValueConfigValidation() throws Exception {
Context additionalContext = new Context();
additionalContext.put(SKIP_BAD_COLUMN_VALUE_PROP, String.valueOf(false));
additionalContext.put(BAD_COLUMN_VALUE_POLICY_PROP, POLICY_IGNORE);
getProducer(additionalContext);
}
@Test
public void testUnmatchedRowLogsWarningWithDefaultConfig() throws Exception {
Context additionalContext = new Context();
testLogging(additionalContext, ERROR_MSG_UNMATCHED_ROW, ROW_UNMATCHING);
}
@Test
public void testUnmatchedRowThrowsException() throws Exception {
Context additionalContext = new Context();
additionalContext.put(UNMATCHED_ROW_POLICY_PROP, POLICY_REJECT);
testThrowsException(additionalContext, ERROR_MSG_UNMATCHED_ROW, ROW_UNMATCHING);
}
@Test
public void testUnmatchedRowLogsWarningDeprecated() throws Exception {
Context additionalContext = new Context();
additionalContext.put(WARN_UNMATCHED_ROWS_PROP, String.valueOf(true));
testLogging(additionalContext, ERROR_MSG_UNMATCHED_ROW, ROW_UNMATCHING);
}
@Test
public void testUnmatchedRowLogsWarning() throws Exception {
Context additionalContext = new Context();
additionalContext.put(UNMATCHED_ROW_POLICY_PROP, POLICY_WARN);
testLogging(additionalContext, ERROR_MSG_UNMATCHED_ROW, ROW_UNMATCHING);
}
@Test
public void testUnmatchedRowIgnoredDeprecated() throws Exception {
Context additionalContext = new Context();
additionalContext.put(WARN_UNMATCHED_ROWS_PROP, String.valueOf(false));
testIgnored(additionalContext, ERROR_MSG_UNMATCHED_ROW, ROW_UNMATCHING);
}
@Test
public void testUnmatchedRowIgnored() throws Exception {
Context additionalContext = new Context();
additionalContext.put(UNMATCHED_ROW_POLICY_PROP, POLICY_IGNORE);
testIgnored(additionalContext, ERROR_MSG_UNMATCHED_ROW, ROW_UNMATCHING);
}
@Test(expected = IllegalArgumentException.class)
public void testUnmatchedRowConfigValidation() throws Exception {
Context additionalContext = new Context();
additionalContext.put(WARN_UNMATCHED_ROWS_PROP, String.valueOf(false));
additionalContext.put(UNMATCHED_ROW_POLICY_PROP, POLICY_IGNORE);
getProducer(additionalContext);
}
@Test(expected = IllegalArgumentException.class)
public void testUnKnownPolicyConfigValidation() throws Exception {
Context additionalContext = new Context();
additionalContext.put(UNMATCHED_ROW_POLICY_PROP, "FORCED");
getProducer(additionalContext);
}
private void testLogging(
Context additionalContext, String expectedError, String eventBody) throws Exception {
String appendedText = processEvent(additionalContext, eventBody);
assertTrue(appendedText.contains(expectedError));
}
private void testIgnored(
Context additionalContext, String expectedError, String eventBody) throws Exception {
String appendedText = processEvent(additionalContext, eventBody);
assertFalse(appendedText.contains(expectedError));
}
private void testThrowsException(
Context additionalContext, String expectedError, String eventBody) throws Exception {
thrown.expect(FlumeException.class);
thrown.expectMessage(expectedError);
processEvent(additionalContext, eventBody);
}
private String processEvent(Context additionalContext, String eventBody) throws Exception {
CapturingLogAppender appender = new CapturingLogAppender();
RegexpKuduOperationsProducer producer = getProducer(additionalContext);
try (Closeable c = appender.attach()) {
producer.getOperations(EventBuilder.withBody(eventBody.getBytes(Charset.forName("UTF-8"))));
}
return appender.getAppendedText();
}
private RegexpKuduOperationsProducer getProducer(Context additionalContext) throws Exception {
RegexpKuduOperationsProducer producer = new RegexpKuduOperationsProducer();
producer.initialize(createNewTable("test"));
Context context = new Context();
context.put(PATTERN_PROP, TEST_REGEXP);
context.put(OPERATION_PROP, TEST_OPERATION);
context.putAll(additionalContext.getParameters());
producer.configure(context);
return producer;
}
private KuduTable createNewTable(String tableName) throws Exception {
ArrayList<ColumnSchema> columns = new ArrayList<>(10);
columns.add(new ColumnSchema.ColumnSchemaBuilder("key", Type.INT32).key(true).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("byteFld", Type.INT8).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("stringFld", Type.STRING).build());
CreateTableOptions createOptions = new CreateTableOptions()
.addHashPartitions(ImmutableList.of("key"), 3).setNumReplicas(1);
KuduTable table =
harness.getClient().createTable(tableName, new Schema(columns), createOptions);
return table;
}
}
| 9,779 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/main/java/org/apache/flume/sink/kudu/SimpleKeyedKuduOperationsProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.kudu;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.FlumeException;
import org.apache.kudu.client.Insert;
import org.apache.kudu.client.KuduTable;
import org.apache.kudu.client.Operation;
import org.apache.kudu.client.PartialRow;
import org.apache.kudu.client.Upsert;
/**
* A simple serializer that generates one {@link Insert} or {@link Upsert}
* per {@link Event} by writing the event body into a BINARY column. The pair
* (key column name, key column value) should be a header in the {@link Event};
* the column name is configurable but the column type must be STRING. Multiple
* key columns are not supported.
*
* <p><strong>Simple Keyed Kudu Operations Producer configuration parameters</strong>
*
* <table cellpadding=3 cellspacing=0 border=1
* summary="Simple Keyed Kudu Operations Producer configuration parameters">
* <tr>
* <th>Property Name</th>
* <th>Default</th>
* <th>Required?</th>
* <th>Description</th>
* </tr>
* <tr>
* <td>producer.payloadColumn</td>
* <td>payload</td>
* <td>No</td>
* <td>The name of the BINARY column to write the Flume event body to.</td>
* </tr>
* <tr>
* <td>producer.keyColumn</td>
* <td>key</td>
* <td>No</td>
* <td>The name of the STRING key column of the target Kudu table.</td>
* </tr>
* <tr>
* <td>producer.operation</td>
* <td>upsert</td>
* <td>No</td>
* <td>The operation used to write events to Kudu. Supported operations
* are 'insert' and 'upsert'</td>
* </tr>
* </table>
*/
public class SimpleKeyedKuduOperationsProducer implements KuduOperationsProducer {
public static final String PAYLOAD_COLUMN_PROP = "payloadColumn";
public static final String PAYLOAD_COLUMN_DEFAULT = "payload";
public static final String KEY_COLUMN_PROP = "keyColumn";
public static final String KEY_COLUMN_DEFAULT = "key";
public static final String OPERATION_PROP = "operation";
public static final String OPERATION_DEFAULT = "upsert";
private KuduTable table;
private String payloadColumn;
private String keyColumn;
private String operation = "";
public SimpleKeyedKuduOperationsProducer(){
}
@Override
public void configure(Context context) {
payloadColumn = context.getString(PAYLOAD_COLUMN_PROP, PAYLOAD_COLUMN_DEFAULT);
keyColumn = context.getString(KEY_COLUMN_PROP, KEY_COLUMN_DEFAULT);
operation = context.getString(OPERATION_PROP, OPERATION_DEFAULT);
}
@Override
public void initialize(KuduTable table) {
this.table = table;
}
@Override
public List<Operation> getOperations(Event event) throws FlumeException {
String key = event.getHeaders().get(keyColumn);
if (key == null) {
throw new FlumeException(
String.format("No value provided for key column %s", keyColumn));
}
try {
Operation op;
switch (operation.toLowerCase(Locale.ENGLISH)) {
case "upsert":
op = table.newUpsert();
break;
case "insert":
op = table.newInsert();
break;
default:
throw new FlumeException(
String.format("Unexpected operation %s", operation));
}
PartialRow row = op.getRow();
row.addString(keyColumn, key);
row.addBinary(payloadColumn, event.getBody());
return Collections.singletonList(op);
} catch (Exception e) {
throw new FlumeException("Failed to create Kudu Operation object", e);
}
}
@Override
public void close() {
}
}
| 9,780 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/main/java/org/apache/flume/sink/kudu/SimpleKuduOperationsProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.kudu;
import java.util.Collections;
import java.util.List;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.FlumeException;
import org.apache.kudu.client.Insert;
import org.apache.kudu.client.KuduTable;
import org.apache.kudu.client.Operation;
import org.apache.kudu.client.PartialRow;
/**
* A simple serializer that generates one {@link Insert} per {@link Event}
* by writing the event body into a BINARY column. The headers are discarded.
*
* <p><strong>Simple Kudu Event Producer configuration parameters</strong>
*
* <table cellpadding=3 cellspacing=0 border=1
* summary="Simple Kudu Event Producer configuration parameters">
* <tr>
* <th>Property Name</th>
* <th>Default</th>
* <th>Required?</th>
* <th>Description</th>
* </tr>
* <tr>
* <td>producer.payloadColumn</td>
* <td>payload</td>
* <td>No</td>
* <td>The name of the BINARY column to write the Flume the event body to.</td>
* </tr>
* </table>
*/
public class SimpleKuduOperationsProducer implements KuduOperationsProducer {
public static final String PAYLOAD_COLUMN_PROP = "payloadColumn";
public static final String PAYLOAD_COLUMN_DEFAULT = "payload";
private KuduTable table;
private String payloadColumn;
public SimpleKuduOperationsProducer() {
}
@Override
public void configure(Context context) {
payloadColumn = context.getString(PAYLOAD_COLUMN_PROP, PAYLOAD_COLUMN_DEFAULT);
}
@Override
public void initialize(KuduTable table) {
this.table = table;
}
@Override
public List<Operation> getOperations(Event event) throws FlumeException {
try {
Insert insert = table.newInsert();
PartialRow row = insert.getRow();
row.addBinary(payloadColumn, event.getBody());
return Collections.singletonList((Operation) insert);
} catch (Exception e) {
throw new FlumeException("Failed to create Kudu Insert object", e);
}
}
@Override
public void close() {
}
}
| 9,781 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/main/java/org/apache/flume/sink/kudu/KuduSinkConfigurationConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.kudu;
public class KuduSinkConfigurationConstants {
/**
* Comma-separated list of "host:port" Kudu master addresses.
* The port is optional and defaults to the Kudu Java client's default master
* port.
*/
public static final String MASTER_ADDRESSES = "masterAddresses";
/**
* The name of the table in Kudu to write to.
*/
public static final String TABLE_NAME = "tableName";
/**
* The fully qualified class name of the KuduOperationsProducer class that the
* sink should use.
*/
public static final String PRODUCER = "producer";
/**
* Prefix for configuration parameters that are passed to the
* KuduOperationsProducer.
*/
public static final String PRODUCER_PREFIX = PRODUCER + ".";
/**
* Maximum number of events that the sink should take from the channel per
* transaction.
*/
public static final String BATCH_SIZE = "batchSize";
/**
* Timeout period for Kudu operations, in milliseconds.
*/
public static final String TIMEOUT_MILLIS = "timeoutMillis";
/**
* Whether to ignore duplicate primary key errors caused by inserts.
*/
public static final String IGNORE_DUPLICATE_ROWS = "ignoreDuplicateRows";
/**
* Path to the keytab file used for authentication
*/
public static final String KERBEROS_KEYTAB = "kerberosKeytab";
/**
* Kerberos principal used for authentication
*/
public static final String KERBEROS_PRINCIPAL = "kerberosPrincipal";
/**
* The effective user if different from the kerberos principal
*/
public static final String PROXY_USER = "proxyUser";
}
| 9,782 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/main/java/org/apache/flume/sink/kudu/KuduOperationsProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.kudu;
import java.util.List;
import org.apache.flume.Event;
import org.apache.flume.conf.Configurable;
import org.apache.kudu.client.KuduTable;
import org.apache.kudu.client.Operation;
/**
* Interface for an operations producer that produces Kudu Operations from
* Flume events.
*/
public interface KuduOperationsProducer extends Configurable, AutoCloseable {
/**
* Initializes the operations producer. Called between configure and
* getOperations.
* @param table the KuduTable used to create Kudu Operation objects
*/
void initialize(KuduTable table);
/**
* Returns the operations that should be written to Kudu as a result of this event.
* @param event Event to convert to one or more Operations
* @return List of Operations that should be written to Kudu
*/
List<Operation> getOperations(Event event);
/**
* Cleans up any state. Called when the sink is stopped.
*/
@Override
void close();
}
| 9,783 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/main/java/org/apache/flume/sink/kudu/RegexpKuduOperationsProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.kudu;
import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.List;
import java.util.Locale;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.FlumeException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.kudu.ColumnSchema;
import org.apache.kudu.Schema;
import org.apache.kudu.Type;
import org.apache.kudu.client.Insert;
import org.apache.kudu.client.KuduTable;
import org.apache.kudu.client.Operation;
import org.apache.kudu.client.PartialRow;
import org.apache.kudu.client.Upsert;
/**
* A regular expression operations producer that generates one or more Kudu
* {@link Insert} or {@link Upsert} operations per Flume {@link Event} by
* parsing the event {@code body} using a regular expression. Values are
* coerced to the types of the named columns in the Kudu table.
*
* <p>Example: If the Kudu table has the schema:
*
* <pre>
* key INT32
* name STRING</pre>
*
* <p>and {@code producer.pattern = (?<key>\\d+),(?<name>\\w+)} then
* {@code RegexpKuduOperationsProducer} will parse the string:
*
* <pre>|12345,Mike||54321,Todd|</pre>
*
* into the rows: {@code (key=12345, name=Mike)} and {@code (key=54321, name=Todd)}.
*
* <p>Note: This class relies on JDK7 named capturing groups, which are
* documented in {@link Pattern}. The name of each capturing group must
* correspond to a column name in the destination Kudu table.
*
* <p><strong><code>RegexpKuduOperationsProducer</code> Flume Configuration Parameters</strong></p>
*
* <table cellpadding=3 cellspacing=0 border=1 summary="Flume Configuration Parameters">
* <tr>
* <th>Property Name</th>
* <th>Default</th>
* <th>Required?</th>
* <th>Description</th>
* </tr>
* <tr>
* <td>producer.pattern</td>
* <td></td>
* <td>Yes</td>
* <td>The regular expression used to parse the event body.</td>
* </tr>
* <tr>
* <td>producer.charset</td>
* <td>utf-8</td>
* <td>No</td>
* <td>The character set of the event body.</td>
* </tr>
* <tr>
* <td>producer.operation</td>
* <td>upsert</td>
* <td>No</td>
* <td>Operation type used to write the event to Kudu. Must be either
* {@code insert} or {@code upsert}.</td>
* </tr>
* <tr>
* <td>producer.skipMissingColumn</td>
* <td>false</td>
* <td>No</td>
* <td>
* <b>@deprecated</b><br/> use {@code producer.missingColumnPolicy}
* What to do if a column in the Kudu table has no corresponding capture group.
* If set to {@code true}, a warning message is logged and the operation is still attempted.
* If set to {@code false}, an exception is thrown and the sink will not process the
* {@code Event}, causing a Flume {@code Channel} rollback.
* </tr>
* <tr>
* <td>producer.skipBadColumnValue</td>
* <td>false</td>
* <td>No</td>
* <td>
* <b>@deprecated</b><br/> use {@code producer.badColumnValuePolicy}
* What to do if a value in the pattern match cannot be coerced to the required type.
* If set to {@code true}, a warning message is logged and the operation is still attempted.
* If set to {@code false}, an exception is thrown and the sink will not process the
* {@code Event}, causing a Flume {@code Channel} rollback.
* </tr>
* <tr>
* <td>producer.warnUnmatchedRows</td>
* <td>true</td>
* <td>No</td>
* <td>
* <b>@deprecated</b><br/> use {@code producer.unmatchedRowPolicy}
* Whether to log a warning about payloads that do not match the pattern. If set to
* {@code false}, event bodies with no matches will be silently dropped.</td>
* </tr>
* <tr>
* <td>producer.missingColumnPolicy</td>
* <td>REJECT</td>
* <td>No</td>
* <td>What to do if a column in the Kudu table has no corresponding capture group.<br/>
* If set to {@code REJECT}, an exception is thrown and the sink will not process the
* {@code Event}, causing a Flume {@code Channel} rollback.<br/>
* If set to {@code WARN}, a warning message is logged and the operation is still produced.<br/>
* If set to {@code IGNORE}, the operation is still produced without any log message.
* </tr>
* <tr>
* <td>producer.badColumnValuePolicy</td>
* <td>REJECT</td>
* <td>No</td>
* <td>What to do if a value in the pattern match cannot be coerced to the required type.<br/>
* If set to {@code REJECT}, an exception is thrown and the sink will not process the
* {@code Event}, causing a Flume {@code Channel} rollback.<br/>
* If set to {@code WARN}, a warning message is logged and the operation is still produced,
* but does not include the given column.<br/>
* If set to {@code IGNORE}, the operation is still produced, but does not include the given
* column and does not log any message.
* </tr>
* <tr>
* <td>producer.unmatchedRowPolicy</td>
* <td>WARN</td>
* <td>No</td>
* <td>What to do if a payload does not match the pattern.<br/>
* If set to {@code REJECT}, an exception is thrown and the sink will not process the
* {@code Event}, causing a Flume {@code Channel} rollback.<br/>
* If set to {@code WARN}, a warning message is logged and the row is skipped,
* not producing an operation.<br/>
* If set to {@code IGNORE}, the row is skipped without any log message.
* </tr>
* </table>
*
* @see Pattern
*/
public class RegexpKuduOperationsProducer implements KuduOperationsProducer {
private static final Logger logger = LoggerFactory.getLogger(RegexpKuduOperationsProducer.class);
private static final String INSERT = "insert";
private static final String UPSERT = "upsert";
private static final List<String> validOperations = Lists.newArrayList(UPSERT, INSERT);
public static final String PATTERN_PROP = "pattern";
public static final String ENCODING_PROP = "encoding";
public static final String DEFAULT_ENCODING = "utf-8";
public static final String OPERATION_PROP = "operation";
public static final String DEFAULT_OPERATION = UPSERT;
@Deprecated
public static final String SKIP_MISSING_COLUMN_PROP = "skipMissingColumn";
@Deprecated
public static final boolean DEFAULT_SKIP_MISSING_COLUMN = false;
@Deprecated
public static final String SKIP_BAD_COLUMN_VALUE_PROP = "skipBadColumnValue";
@Deprecated
public static final boolean DEFAULT_SKIP_BAD_COLUMN_VALUE = false;
@Deprecated
public static final String WARN_UNMATCHED_ROWS_PROP = "skipUnmatchedRows";
@Deprecated
public static final boolean DEFAULT_WARN_UNMATCHED_ROWS = true;
public static final String MISSING_COLUMN_POLICY_PROP = "missingColumnPolicy";
public static final ParseErrorPolicy DEFAULT_MISSING_COLUMN_POLICY = ParseErrorPolicy.REJECT;
public static final String BAD_COLUMN_VALUE_POLICY_PROP = "badColumnValuePolicy";
public static final ParseErrorPolicy DEFAULT_BAD_COLUMN_VALUE_POLICY = ParseErrorPolicy.REJECT;
public static final String UNMATCHED_ROW_POLICY_PROP = "unmatchedRowPolicy";
public static final ParseErrorPolicy DEFAULT_UNMATCHED_ROW_POLICY = ParseErrorPolicy.WARN;
private KuduTable table;
private Pattern pattern;
private Charset charset;
private String operation;
private ParseErrorPolicy missingColumnPolicy;
private ParseErrorPolicy badColumnValuePolicy;
private ParseErrorPolicy unmatchedRowPolicy;
public RegexpKuduOperationsProducer() {
}
@Override
public void configure(Context context) {
String regexp = context.getString(PATTERN_PROP);
Preconditions.checkArgument(regexp != null,
"Required parameter %s is not specified",
PATTERN_PROP);
try {
pattern = Pattern.compile(regexp);
} catch (PatternSyntaxException e) {
throw new IllegalArgumentException(
String.format("The pattern '%s' is invalid", regexp), e);
}
String charsetName = context.getString(ENCODING_PROP, DEFAULT_ENCODING);
try {
charset = Charset.forName(charsetName);
} catch (IllegalArgumentException e) {
throw new FlumeException(
String.format("Invalid or unsupported charset %s", charsetName), e);
}
operation = context.getString(OPERATION_PROP, DEFAULT_OPERATION).toLowerCase(Locale.ENGLISH);
Preconditions.checkArgument(
validOperations.contains(operation),
"Unrecognized operation '%s'",
operation);
missingColumnPolicy = getParseErrorPolicyCheckingDeprecatedProperty(
context, SKIP_MISSING_COLUMN_PROP, MISSING_COLUMN_POLICY_PROP,
ParseErrorPolicy.WARN, ParseErrorPolicy.REJECT, DEFAULT_MISSING_COLUMN_POLICY
);
badColumnValuePolicy = getParseErrorPolicyCheckingDeprecatedProperty(
context, SKIP_BAD_COLUMN_VALUE_PROP, BAD_COLUMN_VALUE_POLICY_PROP,
ParseErrorPolicy.WARN, ParseErrorPolicy.REJECT, DEFAULT_BAD_COLUMN_VALUE_POLICY
);
unmatchedRowPolicy = getParseErrorPolicyCheckingDeprecatedProperty(
context, WARN_UNMATCHED_ROWS_PROP, UNMATCHED_ROW_POLICY_PROP,
ParseErrorPolicy.WARN, ParseErrorPolicy.IGNORE, DEFAULT_UNMATCHED_ROW_POLICY
);
}
@Override
public void initialize(KuduTable table) {
this.table = table;
}
@Override
public List<Operation> getOperations(Event event) throws FlumeException {
String raw = new String(event.getBody(), charset);
Matcher m = pattern.matcher(raw);
boolean match = false;
Schema schema = table.getSchema();
List<Operation> ops = Lists.newArrayList();
while (m.find()) {
match = true;
Operation op;
switch (operation) {
case UPSERT:
op = table.newUpsert();
break;
case INSERT:
op = table.newInsert();
break;
default:
throw new FlumeException(
String.format("Unrecognized operation type '%s' in getOperations(): " +
"this should never happen!", operation));
}
PartialRow row = op.getRow();
for (ColumnSchema col : schema.getColumns()) {
try {
coerceAndSet(m.group(col.getName()), col.getName(), col.getType(), row);
} catch (NumberFormatException e) {
String msg = String.format(
"Raw value '%s' couldn't be parsed to type %s for column '%s'",
raw, col.getType(), col.getName());
logOrThrow(badColumnValuePolicy, msg, e);
} catch (IllegalArgumentException e) {
String msg = String.format(
"Column '%s' has no matching group in '%s'",
col.getName(), raw);
logOrThrow(missingColumnPolicy, msg, e);
} catch (Exception e) {
throw new FlumeException("Failed to create Kudu operation", e);
}
}
ops.add(op);
}
if (!match) {
String msg = String.format("Failed to match the pattern '%s' in '%s'", pattern, raw);
logOrThrow(unmatchedRowPolicy, msg, null);
}
return ops;
}
/**
* Coerces the string `rawVal` to the type `type` and sets the resulting
* value for column `colName` in `row`.
*
* @param rawVal the raw string column value
* @param colName the name of the column
* @param type the Kudu type to convert `rawVal` to
* @param row the row to set the value in
* @throws NumberFormatException if `rawVal` cannot be cast as `type`.
*/
private void coerceAndSet(String rawVal, String colName, Type type, PartialRow row)
throws NumberFormatException {
switch (type) {
case BOOL:
row.addBoolean(colName, Boolean.parseBoolean(rawVal));
break;
case INT8:
row.addByte(colName, Byte.parseByte(rawVal));
break;
case INT16:
row.addShort(colName, Short.parseShort(rawVal));
break;
case INT32:
row.addInt(colName, Integer.parseInt(rawVal));
break;
case INT64: // Fall through
case UNIXTIME_MICROS:
row.addLong(colName, Long.parseLong(rawVal));
break;
case FLOAT:
row.addFloat(colName, Float.parseFloat(rawVal));
break;
case DOUBLE:
row.addDouble(colName, Double.parseDouble(rawVal));
break;
case BINARY:
row.addBinary(colName, rawVal.getBytes(charset));
break;
case STRING:
row.addString(colName, rawVal);
break;
default:
logger.warn("got unknown type {} for column '{}'-- ignoring this column",
type, colName);
}
}
private void logOrThrow(ParseErrorPolicy policy, String msg, Exception e)
throws FlumeException {
switch (policy) {
case REJECT:
throw new FlumeException(msg, e);
case WARN:
logger.warn(msg, e);
break;
case IGNORE:
// Fall through
default:
}
}
@Override
public void close() {
}
private ParseErrorPolicy getParseErrorPolicyCheckingDeprecatedProperty(
Context context, String deprecatedPropertyName, String newPropertyName,
ParseErrorPolicy trueValue, ParseErrorPolicy falseValue, ParseErrorPolicy defaultValue) {
ParseErrorPolicy policy;
if (context.containsKey(deprecatedPropertyName)) {
logger.info("Configuration property {} is deprecated. Use {} instead.",
deprecatedPropertyName, newPropertyName);
Preconditions.checkArgument(!context.containsKey(newPropertyName),
"Both {} and {} specified. Use only one of them, preferably {}.",
deprecatedPropertyName, newPropertyName, newPropertyName);
policy = context.getBoolean(deprecatedPropertyName) ? trueValue : falseValue;
} else {
String policyString = context.getString(newPropertyName, defaultValue.name());
try {
policy = ParseErrorPolicy.valueOf(policyString.toUpperCase(Locale.ENGLISH));
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(
"Unknown policy '" + policyString + "'. Use one of the following: " +
Arrays.toString(ParseErrorPolicy.values()), e);
}
}
return policy;
}
private enum ParseErrorPolicy {
WARN,
IGNORE,
REJECT
}
}
| 9,784 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/main/java/org/apache/flume/sink/kudu/AvroKuduOperationsProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.kudu;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.net.URL;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import com.google.common.base.Preconditions;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DecoderFactory;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.FlumeException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.kudu.ColumnSchema;
import org.apache.kudu.client.KuduTable;
import org.apache.kudu.client.Operation;
import org.apache.kudu.client.PartialRow;
/**
* An Avro serializer that generates one operation per event by deserializing the event
* body as an Avro record and mapping its fields to columns in a Kudu table.
*
* <p><strong>Avro Kudu Operations Producer configuration parameters</strong>
* <table cellpadding=3 cellspacing=0 border=1
* summary="Avro Kudu Operations Producer configuration parameters">
* <tr><th>Property Name</th>
* <th>Default</th>
* <th>Required?</th>
* <th>Description</th></tr>
* <tr>
* <td>producer.operation</td>
* <td>upsert</td>
* <td>No</td>
* <td>The operation used to write events to Kudu.
* Supported operations are 'insert' and 'upsert'</td>
* </tr>
* <tr>
* <td>producer.schemaPath</td>
* <td></td>
* <td>No</td>
* <td>The location of the Avro schema file used to deserialize the Avro-encoded event bodies.
* It's used whenever an event does not include its own schema. If not specified, the
* schema must be specified on a per-event basis, either by url or as a literal.
* Schemas must be record type.</td>
* </tr>
* </table>
*/
public class AvroKuduOperationsProducer implements KuduOperationsProducer {
public static final String OPERATION_PROP = "operation";
public static final String SCHEMA_PROP = "schemaPath";
public static final String DEFAULT_OPERATION = "upsert";
public static final String SCHEMA_URL_HEADER = "flume.avro.schema.url";
public static final String SCHEMA_LITERAL_HEADER = "flume.avro.schema.literal";
private String operation = "";
private GenericRecord reuse;
private KuduTable table;
private String defaultSchemaUrl;
/**
* The binary decoder to reuse for event parsing.
*/
private BinaryDecoder decoder = null;
/**
* A cache of schemas retrieved by URL to avoid re-parsing the schema.
*/
private static final LoadingCache<String, Schema> schemasFromURL =
CacheBuilder.newBuilder()
.build(new CacheLoader<String, Schema>() {
@Override
public Schema load(String url) throws IOException {
Schema.Parser parser = new Schema.Parser();
InputStream is = null;
try {
FileSystem fs = FileSystem.get(URI.create(url), conf);
if (url.toLowerCase(Locale.ENGLISH).startsWith("hdfs:/")) {
is = fs.open(new Path(url));
} else {
is = new URL(url).openStream();
}
return parser.parse(is);
} finally {
if (is != null) {
is.close();
}
}
}
});
/**
* A cache of literal schemas to avoid re-parsing the schema.
*/
private static final LoadingCache<String, Schema> schemasFromLiteral =
CacheBuilder.newBuilder()
.build(new CacheLoader<String, Schema>() {
@Override
public Schema load(String literal) {
Preconditions.checkNotNull(literal,
"Schema literal cannot be null without a Schema URL");
return new Schema.Parser().parse(literal);
}
});
/**
* A cache of DatumReaders per schema.
*/
private static final LoadingCache<Schema, DatumReader<GenericRecord>> readers =
CacheBuilder.newBuilder()
.build(new CacheLoader<Schema, DatumReader<GenericRecord>>() {
@Override
public DatumReader<GenericRecord> load(Schema schema) {
return new GenericDatumReader<>(schema);
}
});
private static final Configuration conf = new Configuration();
public AvroKuduOperationsProducer() {
}
@Override
public void configure(Context context) {
this.operation = context.getString(OPERATION_PROP, DEFAULT_OPERATION);
String schemaPath = context.getString(SCHEMA_PROP);
if (schemaPath != null) {
defaultSchemaUrl = schemaPath;
}
}
@Override
public void initialize(KuduTable table) {
this.table = table;
}
@Override
public List<Operation> getOperations(Event event) throws FlumeException {
Schema schema = getSchema(event);
DatumReader<GenericRecord> reader = readers.getUnchecked(schema);
decoder = DecoderFactory.get().binaryDecoder(event.getBody(), decoder);
try {
reuse = reader.read(reuse, decoder);
} catch (IOException e) {
throw new FlumeException("Cannot deserialize event", e);
}
Operation op;
switch (operation.toLowerCase(Locale.ENGLISH)) {
case "upsert":
op = table.newUpsert();
break;
case "insert":
op = table.newInsert();
break;
default:
throw new FlumeException(String.format("Unexpected operation %s", operation));
}
setupOp(op, reuse);
return Collections.singletonList(op);
}
private void setupOp(Operation op, GenericRecord record) {
PartialRow row = op.getRow();
for (ColumnSchema col : table.getSchema().getColumns()) {
String name = col.getName();
Object value = record.get(name);
if (value == null) {
// Set null if nullable, otherwise leave unset for possible Kudu default.
if (col.isNullable()) {
row.setNull(name);
}
} else {
// Avro doesn't support 8- or 16-bit integer types, but we'll allow them to be passed as
// a larger type.
try {
switch (col.getType()) {
case BOOL:
row.addBoolean(name, (boolean) value);
break;
case INT8:
row.addByte(name, (byte) value);
break;
case INT16:
row.addShort(name, (short) value);
break;
case INT32:
row.addInt(name, (int) value);
break;
case INT64: // Fall through
case UNIXTIME_MICROS:
row.addLong(name, (long) value);
break;
case FLOAT:
row.addFloat(name, (float) value);
break;
case DOUBLE:
row.addDouble(name, (double) value);
break;
case STRING:
row.addString(name, value.toString());
break;
case BINARY:
row.addBinary(name, (byte[]) value);
break;
default:
throw new FlumeException(String.format(
"Unrecognized type %s for column %s", col.getType().toString(), name));
}
} catch (ClassCastException e) {
throw new FlumeException(
String.format("Failed to coerce value for column '%s' to type %s",
col.getName(),
col.getType()), e);
}
}
}
}
private Schema getSchema(Event event) throws FlumeException {
Map<String, String> headers = event.getHeaders();
String schemaUrl = headers.get(SCHEMA_URL_HEADER);
String schemaLiteral = headers.get(SCHEMA_LITERAL_HEADER);
try {
if (schemaUrl != null) {
return schemasFromURL.get(schemaUrl);
} else if (schemaLiteral != null) {
return schemasFromLiteral.get(schemaLiteral);
} else if (defaultSchemaUrl != null) {
return schemasFromURL.get(defaultSchemaUrl);
} else {
throw new FlumeException(
String.format("No schema for event. " +
"Specify configuration property '%s' or event header '%s'",
SCHEMA_PROP,
SCHEMA_URL_HEADER));
}
} catch (ExecutionException e) {
throw new FlumeException("Cannot get schema", e);
} catch (RuntimeException e) {
throw new FlumeException("Cannot parse schema", e);
}
}
@Override
public void close() {
}
}
| 9,785 |
0 | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-ng-kudu-sink/src/main/java/org/apache/flume/sink/kudu/KuduSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.kudu;
import java.lang.reflect.InvocationTargetException;
import java.security.PrivilegedAction;
import java.util.List;
import com.google.common.base.Preconditions;
import org.apache.flume.Channel;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.apache.flume.Transaction;
import org.apache.flume.auth.FlumeAuthenticationUtil;
import org.apache.flume.auth.PrivilegedExecutor;
import org.apache.flume.conf.Configurable;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.flume.sink.AbstractSink;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.kudu.client.AsyncKuduClient;
import org.apache.kudu.client.KuduClient;
import org.apache.kudu.client.KuduSession;
import org.apache.kudu.client.KuduTable;
import org.apache.kudu.client.Operation;
import org.apache.kudu.client.OperationResponse;
import org.apache.kudu.client.SessionConfiguration;
/**
* A Flume sink that reads events from a channel and writes them to a Kudu table.
*
* <p><strong>Flume Kudu Sink configuration parameters</strong>
*
* <table cellpadding=3 cellspacing=0 border=1 summary="Flume Kudu Sink configuration parameters">
* <tr><th>Property Name</th><th>Default</th><th>Required?</th><th>Description</th></tr>
* <tr><td>channel</td><td></td><td>Yes</td><td>The name of the Flume channel to read.</td></tr>
* <tr><td>type</td><td></td><td>Yes</td>
* <td>Component name. Must be {@code org.apache.kudu.flume.sink.KuduSink}</td></tr>
* <tr><td>masterAddresses</td><td></td><td>Yes</td>
* <td>Comma-separated list of "host:port" Kudu master addresses.
* The port is optional.</td></tr>
* <tr><td>tableName</td><td></td><td>Yes</td>
* <td>The name of the Kudu table to write to.</td></tr>
* <tr><td>batchSize</td><td>1000</td><td>No</td>
* <td>The maximum number of events the sink takes from the channel per transaction.</td></tr>
* <tr><td>ignoreDuplicateRows</td><td>true</td>
* <td>No</td><td>Whether to ignore duplicate primary key errors caused by inserts.</td></tr>
* <tr><td>timeoutMillis</td><td>10000</td><td>No</td>
* <td>Timeout period for Kudu write operations, in milliseconds.</td></tr>
* <tr><td>producer</td><td>{@link SimpleKuduOperationsProducer}</td><td>No</td>
* <td>The fully-qualified class name of the {@link KuduOperationsProducer}
* the sink should use.</td></tr>
* <tr><td>producer.*</td><td></td><td>(Varies by operations producer)</td>
* <td>Configuration properties to pass to the operations producer implementation.</td></tr>
* </table>
*
* <p><strong>Installation</strong>
*
* <p>After building the sink, in order to use it with Flume, place the file named
* <tt>kudu-flume-sink-<em>VERSION</em>-jar-with-dependencies.jar</tt> in the
* Flume <tt>plugins.d</tt> directory under <tt>kudu-flume-sink/lib/</tt>.
*
* <p>For detailed instructions on using Flume's plugins.d mechanism, please see the plugins.d
* section of the <a href="https://flume.apache.org/FlumeUserGuide.html#the-plugins-d-directory">
* Flume User Guide</a>.
*/
public class KuduSink extends AbstractSink implements Configurable {
private static final Logger logger = LoggerFactory.getLogger(KuduSink.class);
private static final int DEFAULT_BATCH_SIZE = 1000;
private static final Long DEFAULT_TIMEOUT_MILLIS =
AsyncKuduClient.DEFAULT_OPERATION_TIMEOUT_MS;
private static final String DEFAULT_KUDU_OPERATION_PRODUCER =
"org.apache.flume.sink.kudu.SimpleKuduOperationsProducer";
private static final boolean DEFAULT_IGNORE_DUPLICATE_ROWS = true;
private String masterAddresses;
private String tableName;
private int batchSize;
private long timeoutMillis;
private boolean ignoreDuplicateRows;
private KuduTable table;
private KuduSession session;
private KuduClient client;
private KuduOperationsProducer operationsProducer;
private SinkCounter sinkCounter;
private PrivilegedExecutor privilegedExecutor;
public KuduSink() {
this(null);
}
public KuduSink(KuduClient kuduClient) {
this.client = kuduClient;
}
@Override
public synchronized void start() {
Preconditions.checkState(table == null && session == null,
"Please call stop before calling start on an old instance.");
// Client is not null only inside tests.
if (client == null) {
// Creating client with FlumeAuthenticator.
client = privilegedExecutor.execute(
new PrivilegedAction<KuduClient>() {
@Override
public KuduClient run() {
return new KuduClient.KuduClientBuilder(masterAddresses).build();
}
}
);
}
session = client.newSession();
session.setFlushMode(SessionConfiguration.FlushMode.MANUAL_FLUSH);
session.setTimeoutMillis(timeoutMillis);
session.setIgnoreAllDuplicateRows(ignoreDuplicateRows);
session.setMutationBufferSpace(batchSize);
try {
table = client.openTable(tableName);
} catch (Exception ex) {
sinkCounter.incrementConnectionFailedCount();
String msg = String.format("Could not open Kudu table '%s'", tableName);
logger.error(msg, ex);
throw new FlumeException(msg, ex);
}
operationsProducer.initialize(table);
super.start();
sinkCounter.incrementConnectionCreatedCount();
sinkCounter.start();
}
@Override
public synchronized void stop() {
Exception ex = null;
try {
operationsProducer.close();
} catch (Exception e) {
ex = e;
logger.error("Error closing operations producer", e);
}
try {
if (client != null) {
client.shutdown();
}
} catch (Exception e) {
ex = e;
logger.error("Error closing client", e);
}
client = null;
table = null;
session = null;
sinkCounter.incrementConnectionClosedCount();
sinkCounter.stop();
if (ex != null) {
throw new FlumeException("Error stopping sink", ex);
}
}
@SuppressWarnings("unchecked")
@Override
public synchronized void configure(Context context) {
masterAddresses = context.getString(KuduSinkConfigurationConstants.MASTER_ADDRESSES);
Preconditions.checkNotNull(masterAddresses,
"Missing master addresses. Please specify property '%s'.",
KuduSinkConfigurationConstants.MASTER_ADDRESSES);
tableName = context.getString(KuduSinkConfigurationConstants.TABLE_NAME);
Preconditions.checkNotNull(tableName,
"Missing table name. Please specify property '%s'",
KuduSinkConfigurationConstants.TABLE_NAME);
batchSize = context.getInteger(KuduSinkConfigurationConstants.BATCH_SIZE, DEFAULT_BATCH_SIZE);
timeoutMillis = context.getLong(KuduSinkConfigurationConstants.TIMEOUT_MILLIS,
DEFAULT_TIMEOUT_MILLIS);
ignoreDuplicateRows = context.getBoolean(KuduSinkConfigurationConstants.IGNORE_DUPLICATE_ROWS,
DEFAULT_IGNORE_DUPLICATE_ROWS);
String operationProducerType = context.getString(KuduSinkConfigurationConstants.PRODUCER);
String kerberosPrincipal =
context.getString(KuduSinkConfigurationConstants.KERBEROS_PRINCIPAL);
String kerberosKeytab = context.getString(KuduSinkConfigurationConstants.KERBEROS_KEYTAB);
String proxyUser = context.getString(KuduSinkConfigurationConstants.PROXY_USER);
privilegedExecutor = FlumeAuthenticationUtil.getAuthenticator(
kerberosPrincipal, kerberosKeytab).proxyAs(proxyUser);
// Check for operations producer, if null set default operations producer type.
if (operationProducerType == null || operationProducerType.isEmpty()) {
operationProducerType = DEFAULT_KUDU_OPERATION_PRODUCER;
logger.warn("No Kudu operations producer provided, using default");
}
Context producerContext = new Context();
producerContext.putAll(context.getSubProperties(
KuduSinkConfigurationConstants.PRODUCER_PREFIX));
try {
Class<? extends KuduOperationsProducer> clazz =
(Class<? extends KuduOperationsProducer>)
Class.forName(operationProducerType);
operationsProducer = clazz.getDeclaredConstructor().newInstance();
operationsProducer.configure(producerContext);
} catch (ClassNotFoundException | NoSuchMethodException |
InstantiationException | IllegalAccessException | InvocationTargetException e) {
logger.error("Could not instantiate Kudu operations producer" , e);
throw new RuntimeException(e);
}
sinkCounter = new SinkCounter(this.getName());
}
public synchronized KuduClient getClient() {
return client;
}
@Override
public synchronized Status process() throws EventDeliveryException {
if (session.hasPendingOperations()) {
// If for whatever reason we have pending operations, refuse to process
// more and tell the caller to try again a bit later. We don't want to
// pile on the KuduSession.
return Status.BACKOFF;
}
Channel channel = getChannel();
Transaction txn = channel.getTransaction();
txn.begin();
try {
long txnEventCount = 0;
for (; txnEventCount < batchSize; txnEventCount++) {
Event event = channel.take();
if (event == null) {
break;
}
List<Operation> operations = operationsProducer.getOperations(event);
for (Operation o : operations) {
session.apply(o);
}
}
logger.debug("Flushing {} events", txnEventCount);
List<OperationResponse> responses = session.flush();
if (responses != null) {
for (OperationResponse response : responses) {
// Throw an EventDeliveryException if at least one of the responses was
// a row error. Row errors can occur for example when an event is inserted
// into Kudu successfully but the Flume transaction is rolled back for some reason,
// and a subsequent replay of the same Flume transaction leads to a
// duplicate key error since the row already exists in Kudu.
// Note: Duplicate keys will not be reported as errors if ignoreDuplicateRows
// is enabled in the config.
if (response.hasRowError()) {
throw new EventDeliveryException("Failed to flush one or more changes. " +
"Transaction rolled back: " + response.getRowError().toString());
}
}
}
if (txnEventCount == 0) {
sinkCounter.incrementBatchEmptyCount();
} else if (txnEventCount == batchSize) {
sinkCounter.incrementBatchCompleteCount();
} else {
sinkCounter.incrementBatchUnderflowCount();
}
txn.commit();
if (txnEventCount == 0) {
return Status.BACKOFF;
}
sinkCounter.addToEventDrainSuccessCount(txnEventCount);
return Status.READY;
} catch (Throwable e) {
txn.rollback();
String msg = "Failed to commit transaction. Transaction rolled back.";
logger.error(msg, e);
if (e instanceof Error || e instanceof RuntimeException) {
throw new RuntimeException(e);
} else {
logger.error(msg, e);
throw new EventDeliveryException(msg, e);
}
} finally {
txn.close();
}
}
}
| 9,786 |
0 | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/test/java/org/apache/flume/sink/hive/TestHiveWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hive;
import java.io.IOException;
import java.util.ArrayList;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import junit.framework.Assert;
import org.apache.flume.Context;
import org.apache.flume.event.SimpleEvent;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.hadoop.hive.cli.CliSessionState;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
import org.apache.hadoop.hive.ql.Driver;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hive.hcatalog.streaming.HiveEndPoint;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
public class TestHiveWriter {
static final String dbName = "testing";
static final String tblName = "alerts";
public static final String PART1_NAME = "continent";
public static final String PART2_NAME = "country";
public static final String[] partNames = { PART1_NAME, PART2_NAME };
private static final String COL1 = "id";
private static final String COL2 = "msg";
final String[] colNames = {COL1,COL2};
private String[] colTypes = { "int", "string" };
private static final String PART1_VALUE = "Asia";
private static final String PART2_VALUE = "India";
private final ArrayList<String> partVals;
private final String metaStoreURI;
private HiveDelimitedTextSerializer serializer;
private final HiveConf conf;
private ExecutorService callTimeoutPool;
int timeout = 10000; // msec
@Rule
public TemporaryFolder dbFolder = new TemporaryFolder();
private final Driver driver;
public TestHiveWriter() throws Exception {
partVals = new ArrayList<String>(2);
partVals.add(PART1_VALUE);
partVals.add(PART2_VALUE);
metaStoreURI = null;
int callTimeoutPoolSize = 1;
callTimeoutPool = Executors.newFixedThreadPool(callTimeoutPoolSize,
new ThreadFactoryBuilder().setNameFormat("hiveWriterTest").build());
// 1) Start metastore
conf = new HiveConf(this.getClass());
TestUtil.setConfValues(conf);
if (metaStoreURI != null) {
conf.setVar(HiveConf.ConfVars.METASTOREURIS, metaStoreURI);
}
// 2) Setup Hive client
SessionState.start(new CliSessionState(conf));
driver = new Driver(conf);
}
@Before
public void setUp() throws Exception {
// 1) prepare hive
TxnDbUtil.cleanDb(conf);
TxnDbUtil.prepDb(conf);
// 1) Setup tables
TestUtil.dropDB(conf, dbName);
String dbLocation = dbFolder.newFolder(dbName).getCanonicalPath() + ".db";
dbLocation = dbLocation.replaceAll("\\\\","/"); // for windows paths
TestUtil.createDbAndTable(driver, dbName, tblName, partVals, colNames, colTypes, partNames,
dbLocation);
// 2) Setup serializer
Context ctx = new Context();
ctx.put("serializer.fieldnames", COL1 + ",," + COL2 + ",");
serializer = new HiveDelimitedTextSerializer();
serializer.configure(ctx);
}
@Test
public void testInstantiate() throws Exception {
HiveEndPoint endPoint = new HiveEndPoint(metaStoreURI, dbName, tblName, partVals);
SinkCounter sinkCounter = new SinkCounter(this.getClass().getName());
HiveWriter writer = new HiveWriter(endPoint, 10, true, timeout, callTimeoutPool, "flumetest",
serializer, sinkCounter);
writer.close();
}
@Test
public void testWriteBasic() throws Exception {
HiveEndPoint endPoint = new HiveEndPoint(metaStoreURI, dbName, tblName, partVals);
SinkCounter sinkCounter = new SinkCounter(this.getClass().getName());
HiveWriter writer = new HiveWriter(endPoint, 10, true, timeout, callTimeoutPool, "flumetest",
serializer, sinkCounter);
writeEvents(writer,3);
writer.flush(false);
writer.close();
checkRecordCountInTable(3);
}
@Test
public void testWriteMultiFlush() throws Exception {
HiveEndPoint endPoint = new HiveEndPoint(metaStoreURI, dbName, tblName, partVals);
SinkCounter sinkCounter = new SinkCounter(this.getClass().getName());
HiveWriter writer = new HiveWriter(endPoint, 10, true, timeout, callTimeoutPool, "flumetest",
serializer, sinkCounter);
checkRecordCountInTable(0);
SimpleEvent event = new SimpleEvent();
String REC1 = "1,xyz,Hello world,abc";
event.setBody(REC1.getBytes());
writer.write(event);
checkRecordCountInTable(0);
writer.flush(true);
checkRecordCountInTable(1);
String REC2 = "2,xyz,Hello world,abc";
event.setBody(REC2.getBytes());
writer.write(event);
checkRecordCountInTable(1);
writer.flush(true);
checkRecordCountInTable(2);
String REC3 = "3,xyz,Hello world,abc";
event.setBody(REC3.getBytes());
writer.write(event);
writer.flush(true);
checkRecordCountInTable(3);
writer.close();
checkRecordCountInTable(3);
}
@Test
public void testTxnBatchConsumption() throws Exception {
// get a small txn batch and consume it, then roll to new batch, very
// the number of remaining txns to ensure Txns are not accidentally skipped
HiveEndPoint endPoint = new HiveEndPoint(metaStoreURI, dbName, tblName, partVals);
SinkCounter sinkCounter = new SinkCounter(this.getClass().getName());
int txnPerBatch = 3;
HiveWriter writer = new HiveWriter(endPoint, txnPerBatch, true, timeout, callTimeoutPool,
"flumetest", serializer, sinkCounter);
Assert.assertEquals(writer.getRemainingTxns(),2);
writer.flush(true);
Assert.assertEquals(writer.getRemainingTxns(), 1);
writer.flush(true);
Assert.assertEquals(writer.getRemainingTxns(), 0);
writer.flush(true);
// flip over to next batch
Assert.assertEquals(writer.getRemainingTxns(), 2);
writer.flush(true);
Assert.assertEquals(writer.getRemainingTxns(), 1);
writer.close();
}
private void checkRecordCountInTable(int expectedCount)
throws IOException {
int count = TestUtil.listRecordsInTable(driver, dbName, tblName).size();
Assert.assertEquals(expectedCount, count);
}
/**
* Sets up input fields to have same order as table columns,
* Also sets the separator on serde to be same as i/p field separator
* @throws Exception
*/
@Test
public void testInOrderWrite() throws Exception {
HiveEndPoint endPoint = new HiveEndPoint(metaStoreURI, dbName, tblName, partVals);
SinkCounter sinkCounter = new SinkCounter(this.getClass().getName());
int timeout = 5000; // msec
HiveDelimitedTextSerializer serializer2 = new HiveDelimitedTextSerializer();
Context ctx = new Context();
ctx.put("serializer.fieldnames", COL1 + "," + COL2);
ctx.put("serializer.serdeSeparator", ",");
serializer2.configure(ctx);
HiveWriter writer = new HiveWriter(endPoint, 10, true, timeout, callTimeoutPool,
"flumetest", serializer2, sinkCounter);
SimpleEvent event = new SimpleEvent();
event.setBody("1,Hello world 1".getBytes());
writer.write(event);
event.setBody("2,Hello world 2".getBytes());
writer.write(event);
event.setBody("3,Hello world 3".getBytes());
writer.write(event);
writer.flush(false);
writer.close();
}
@Test
public void testSerdeSeparatorCharParsing() throws Exception {
HiveEndPoint endPoint = new HiveEndPoint(metaStoreURI, dbName, tblName, partVals);
SinkCounter sinkCounter = new SinkCounter(this.getClass().getName());
int timeout = 10000; // msec
// 1) single character serdeSeparator
HiveDelimitedTextSerializer serializer1 = new HiveDelimitedTextSerializer();
Context ctx = new Context();
ctx.put("serializer.fieldnames", COL1 + "," + COL2);
ctx.put("serializer.serdeSeparator", ",");
serializer1.configure(ctx);
// show not throw
// 2) special character as serdeSeparator
HiveDelimitedTextSerializer serializer2 = new HiveDelimitedTextSerializer();
ctx = new Context();
ctx.put("serializer.fieldnames", COL1 + "," + COL2);
ctx.put("serializer.serdeSeparator", "'\t'");
serializer2.configure(ctx);
// show not throw
// 2) bad spec as serdeSeparator
HiveDelimitedTextSerializer serializer3 = new HiveDelimitedTextSerializer();
ctx = new Context();
ctx.put("serializer.fieldnames", COL1 + "," + COL2);
ctx.put("serializer.serdeSeparator", "ab");
try {
serializer3.configure(ctx);
Assert.assertTrue("Bad serdeSeparator character was accepted", false);
} catch (Exception e) {
// expect an exception
}
}
@Test
public void testSecondWriterBeforeFirstCommits() throws Exception {
// here we open a new writer while the first is still writing (not committed)
HiveEndPoint endPoint1 = new HiveEndPoint(metaStoreURI, dbName, tblName, partVals);
ArrayList<String> partVals2 = new ArrayList<String>(2);
partVals2.add(PART1_VALUE);
partVals2.add("Nepal");
HiveEndPoint endPoint2 = new HiveEndPoint(metaStoreURI, dbName, tblName, partVals2);
SinkCounter sinkCounter1 = new SinkCounter(this.getClass().getName());
SinkCounter sinkCounter2 = new SinkCounter(this.getClass().getName());
HiveWriter writer1 = new HiveWriter(endPoint1, 10, true, timeout, callTimeoutPool, "flumetest",
serializer, sinkCounter1);
writeEvents(writer1, 3);
HiveWriter writer2 = new HiveWriter(endPoint2, 10, true, timeout, callTimeoutPool, "flumetest",
serializer, sinkCounter2);
writeEvents(writer2, 3);
writer2.flush(false); // commit
writer1.flush(false); // commit
writer1.close();
writer2.close();
}
@Test
public void testSecondWriterAfterFirstCommits() throws Exception {
// here we open a new writer after the first writer has committed one txn
HiveEndPoint endPoint1 = new HiveEndPoint(metaStoreURI, dbName, tblName, partVals);
ArrayList<String> partVals2 = new ArrayList<String>(2);
partVals2.add(PART1_VALUE);
partVals2.add("Nepal");
HiveEndPoint endPoint2 = new HiveEndPoint(metaStoreURI, dbName, tblName, partVals2);
SinkCounter sinkCounter1 = new SinkCounter(this.getClass().getName());
SinkCounter sinkCounter2 = new SinkCounter(this.getClass().getName());
HiveWriter writer1 = new HiveWriter(endPoint1, 10, true, timeout, callTimeoutPool, "flumetest",
serializer, sinkCounter1);
writeEvents(writer1, 3);
writer1.flush(false); // commit
HiveWriter writer2 = new HiveWriter(endPoint2, 10, true, timeout, callTimeoutPool, "flumetest",
serializer, sinkCounter2);
writeEvents(writer2, 3);
writer2.flush(false); // commit
writer1.close();
writer2.close();
}
private void writeEvents(HiveWriter writer, int count)
throws InterruptedException, HiveWriter.WriteException {
SimpleEvent event = new SimpleEvent();
for (int i = 1; i <= count; i++) {
event.setBody((i + ",xyz,Hello world,abc").getBytes());
writer.write(event);
}
}
}
| 9,787 |
0 | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/test/java/org/apache/flume/sink/hive/TestHiveSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hive;
import com.google.common.collect.Lists;
import junit.framework.Assert;
import org.apache.flume.Channel;
import org.apache.flume.ChannelException;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.Transaction;
import org.apache.flume.channel.BasicTransactionSemantics;
import org.apache.flume.channel.MemoryChannel;
import org.apache.flume.conf.Configurables;
import org.apache.flume.event.SimpleEvent;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.hadoop.hive.cli.CliSessionState;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
import org.apache.hadoop.hive.ql.Driver;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.List;
import java.util.UUID;
public class TestHiveSink {
// 1) partitioned table
static final String dbName = "testing";
static final String tblName = "alerts";
public static final String PART1_NAME = "continent";
public static final String PART2_NAME = "country";
public static final String[] partNames = { PART1_NAME, PART2_NAME };
private static final String COL1 = "id";
private static final String COL2 = "msg";
final String[] colNames = {COL1,COL2};
private String[] colTypes = { "int", "string" };
private static final String PART1_VALUE = "Asia";
private static final String PART2_VALUE = "India";
private final ArrayList<String> partitionVals;
// 2) un-partitioned table
static final String dbName2 = "testing2";
static final String tblName2 = "alerts2";
final String[] colNames2 = {COL1,COL2};
private String[] colTypes2 = { "int", "string" };
HiveSink sink = new HiveSink();
private final HiveConf conf;
private final Driver driver;
final String metaStoreURI;
@Rule
public TemporaryFolder dbFolder = new TemporaryFolder();
private static final Logger LOG = LoggerFactory.getLogger(HiveSink.class);
public TestHiveSink() throws Exception {
partitionVals = new ArrayList<String>(2);
partitionVals.add(PART1_VALUE);
partitionVals.add(PART2_VALUE);
metaStoreURI = "null";
conf = new HiveConf(this.getClass());
TestUtil.setConfValues(conf);
// 1) prepare hive
TxnDbUtil.cleanDb(conf);
TxnDbUtil.prepDb(conf);
// 2) Setup Hive client
SessionState.start(new CliSessionState(conf));
driver = new Driver(conf);
}
@Before
public void setUp() throws Exception {
TestUtil.dropDB(conf, dbName);
sink = new HiveSink();
sink.setName("HiveSink-" + UUID.randomUUID().toString());
String dbLocation = dbFolder.newFolder(dbName).getCanonicalPath() + ".db";
dbLocation = dbLocation.replaceAll("\\\\","/"); // for windows paths
TestUtil.createDbAndTable(driver, dbName, tblName, partitionVals, colNames,
colTypes, partNames, dbLocation);
}
@After
public void tearDown() throws MetaException, HiveException {
TestUtil.dropDB(conf, dbName);
}
public void testSingleWriter(boolean partitioned, String dbName, String tblName,
Channel pChannel) throws Exception {
int totalRecords = 4;
int batchSize = 2;
int batchCount = totalRecords / batchSize;
Context context = new Context();
context.put("hive.metastore", metaStoreURI);
context.put("hive.database",dbName);
context.put("hive.table",tblName);
if (partitioned) {
context.put("hive.partition", PART1_VALUE + "," + PART2_VALUE);
}
context.put("autoCreatePartitions","false");
context.put("batchSize","" + batchSize);
context.put("serializer", HiveDelimitedTextSerializer.ALIAS);
context.put("serializer.fieldnames", COL1 + ",," + COL2 + ",");
context.put("heartBeatInterval", "0");
Channel channel = startSink(sink, context, pChannel);
List<String> bodies = Lists.newArrayList();
// push the events in two batches
Transaction txn = channel.getTransaction();
txn.begin();
for (int j = 1; j <= totalRecords; j++) {
Event event = new SimpleEvent();
String body = j + ",blah,This is a log message,other stuff";
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
}
// execute sink to process the events
txn.commit();
txn.close();
checkRecordCountInTable(0, dbName, tblName);
for (int i = 0; i < batchCount ; i++) {
sink.process();
}
checkRecordCountInTable(totalRecords, dbName, tblName);
sink.stop();
checkRecordCountInTable(totalRecords, dbName, tblName);
}
@Test
public void testSingleWriterSimplePartitionedTable() throws Exception {
testSingleWriter(true, dbName, tblName, null);
}
@Test
public void testSingleWriterSimpleUnPartitionedTable()
throws Exception {
TestUtil.dropDB(conf, dbName2);
String dbLocation = dbFolder.newFolder(dbName2).getCanonicalPath() + ".db";
dbLocation = dbLocation.replaceAll("\\\\","/"); // for windows paths
TestUtil.createDbAndTable(driver, dbName2, tblName2, null, colNames2, colTypes2,
null, dbLocation);
try {
testSingleWriter(false, dbName2, tblName2, null);
} finally {
TestUtil.dropDB(conf, dbName2);
}
}
@Test
public void testSingleWriterUseHeaders()
throws Exception {
String[] colNames = {COL1, COL2};
String PART1_NAME = "country";
String PART2_NAME = "hour";
String[] partNames = {PART1_NAME, PART2_NAME};
List<String> partitionVals = null;
String PART1_VALUE = "%{" + PART1_NAME + "}";
String PART2_VALUE = "%y-%m-%d-%k";
partitionVals = new ArrayList<String>(2);
partitionVals.add(PART1_VALUE);
partitionVals.add(PART2_VALUE);
String tblName = "hourlydata";
TestUtil.dropDB(conf, dbName2);
String dbLocation = dbFolder.newFolder(dbName2).getCanonicalPath() + ".db";
dbLocation = dbLocation.replaceAll("\\\\","/"); // for windows paths
TestUtil.createDbAndTable(driver, dbName2, tblName, partitionVals, colNames,
colTypes, partNames, dbLocation);
int totalRecords = 4;
int batchSize = 2;
int batchCount = totalRecords / batchSize;
Context context = new Context();
context.put("hive.metastore",metaStoreURI);
context.put("hive.database",dbName2);
context.put("hive.table",tblName);
context.put("hive.partition", PART1_VALUE + "," + PART2_VALUE);
context.put("autoCreatePartitions","true");
context.put("useLocalTimeStamp", "false");
context.put("batchSize","" + batchSize);
context.put("serializer", HiveDelimitedTextSerializer.ALIAS);
context.put("serializer.fieldnames", COL1 + ",," + COL2 + ",");
context.put("heartBeatInterval", "0");
Channel channel = startSink(sink, context);
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push events in two batches - two per batch. each batch is diff hour
Transaction txn = channel.getTransaction();
txn.begin();
for (int j = 1; j <= totalRecords; j++) {
Event event = new SimpleEvent();
String body = j + ",blah,This is a log message,other stuff";
event.setBody(body.getBytes());
eventDate.clear();
eventDate.set(2014, 03, 03, j % batchCount, 1); // yy mm dd hh mm
event.getHeaders().put( "timestamp",
String.valueOf(eventDate.getTimeInMillis()) );
event.getHeaders().put( PART1_NAME, "Asia" );
bodies.add(body);
channel.put(event);
}
// execute sink to process the events
txn.commit();
txn.close();
checkRecordCountInTable(0, dbName2, tblName);
for (int i = 0; i < batchCount ; i++) {
sink.process();
}
checkRecordCountInTable(totalRecords, dbName2, tblName);
sink.stop();
// verify counters
SinkCounter counter = sink.getCounter();
Assert.assertEquals(2, counter.getConnectionCreatedCount());
Assert.assertEquals(2, counter.getConnectionClosedCount());
Assert.assertEquals(2, counter.getBatchCompleteCount());
Assert.assertEquals(0, counter.getBatchEmptyCount());
Assert.assertEquals(0, counter.getConnectionFailedCount() );
Assert.assertEquals(4, counter.getEventDrainAttemptCount());
Assert.assertEquals(4, counter.getEventDrainSuccessCount() );
}
@Test
public void testHeartBeat()
throws EventDeliveryException, IOException {
int batchSize = 2;
int batchCount = 3;
int totalRecords = batchCount * batchSize;
Context context = new Context();
context.put("hive.metastore", metaStoreURI);
context.put("hive.database", dbName);
context.put("hive.table", tblName);
context.put("hive.partition", PART1_VALUE + "," + PART2_VALUE);
context.put("autoCreatePartitions","true");
context.put("batchSize","" + batchSize);
context.put("serializer", HiveDelimitedTextSerializer.ALIAS);
context.put("serializer.fieldnames", COL1 + ",," + COL2 + ",");
context.put("hive.txnsPerBatchAsk", "20");
context.put("heartBeatInterval", "3"); // heartbeat in seconds
Channel channel = startSink(sink, context);
List<String> bodies = Lists.newArrayList();
// push the events in two batches
for (int i = 0; i < batchCount; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (int j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
String body = i * j + ",blah,This is a log message,other stuff";
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
}
// execute sink to process the events
txn.commit();
txn.close();
sink.process();
sleep(3000); // allow heartbeat to happen
}
sink.stop();
checkRecordCountInTable(totalRecords, dbName, tblName);
}
@Test
public void testJsonSerializer() throws Exception {
int batchSize = 2;
int batchCount = 2;
int totalRecords = batchCount * batchSize;
Context context = new Context();
context.put("hive.metastore",metaStoreURI);
context.put("hive.database",dbName);
context.put("hive.table",tblName);
context.put("hive.partition", PART1_VALUE + "," + PART2_VALUE);
context.put("autoCreatePartitions","true");
context.put("batchSize","" + batchSize);
context.put("serializer", HiveJsonSerializer.ALIAS);
context.put("serializer.fieldnames", COL1 + ",," + COL2 + ",");
context.put("heartBeatInterval", "0");
Channel channel = startSink(sink, context);
List<String> bodies = Lists.newArrayList();
// push the events in two batches
for (int i = 0; i < batchCount; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (int j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
String body = "{\"id\" : 1, \"msg\" : \"using json serializer\"}";
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
}
// execute sink to process the events
txn.commit();
txn.close();
sink.process();
}
checkRecordCountInTable(totalRecords, dbName, tblName);
sink.stop();
checkRecordCountInTable(totalRecords, dbName, tblName);
}
@Test
public void testErrorCounter() throws Exception {
Channel channel = Mockito.mock(Channel.class);
Mockito.when(channel.take()).thenThrow(new ChannelException("dummy"));
Transaction transaction = Mockito.mock(BasicTransactionSemantics.class);
Mockito.when(channel.getTransaction()).thenReturn(transaction);
try {
testSingleWriter(true, dbName, tblName, channel);
} catch (EventDeliveryException e) {
//Expected exception
}
SinkCounter sinkCounter = (SinkCounter) Whitebox.getInternalState(sink, "sinkCounter");
Assert.assertEquals(1, sinkCounter.getChannelReadFail());
}
private void sleep(int n) {
try {
Thread.sleep(n);
} catch (InterruptedException e) {
}
}
private static Channel startSink(HiveSink sink, Context context) {
return startSink(sink, context, null);
}
private static Channel startSink(HiveSink sink, Context context, Channel pChannel) {
Configurables.configure(sink, context);
Channel channel = pChannel == null ? new MemoryChannel() : pChannel;
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
return channel;
}
private void checkRecordCountInTable(int expectedCount, String db, String tbl)
throws IOException {
int count = TestUtil.listRecordsInTable(driver, db, tbl).size();
Assert.assertEquals(expectedCount, count);
}
}
| 9,788 |
0 | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/test/java/org/apache/flume/sink/hive/TestUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hive;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.ql.Driver;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.util.Shell;
import org.apache.hive.hcatalog.streaming.QueryFailedException;
import org.apache.thrift.TException;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.List;
public class TestUtil {
private static final String txnMgr = "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager";
/**
* Set up the configuration so it will use the DbTxnManager, concurrency will be set to true,
* and the JDBC configs will be set for putting the transaction and lock info in the embedded
* metastore.
* @param conf HiveConf to add these values to.
*/
public static void setConfValues(HiveConf conf) {
conf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, txnMgr);
conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, true);
conf.set("fs.raw.impl", RawFileSystem.class.getName());
try {
conf.setBoolVar(HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION, false );
conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, "jdbc:derby:;databaseName=metastore_db;create=true");
conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "org.apache.derby.jdbc.EmbeddedDriver");
conf.setBoolVar(HiveConf.ConfVars.METASTORE_AUTO_CREATE_ALL, true);
conf.setIntVar(HiveConf.ConfVars.METASTORE_SERVER_PORT, 0);
conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, System.getProperty("java.io.tmpdir"));
} catch (Throwable t) {
t.printStackTrace();
}
}
public static void createDbAndTable(Driver driver, String databaseName,
String tableName, List<String> partVals,
String[] colNames, String[] colTypes,
String[] partNames, String dbLocation)
throws Exception {
String dbUri = "raw://" + dbLocation;
String tableLoc = dbUri + Path.SEPARATOR + tableName;
runDDL(driver, "create database IF NOT EXISTS " + databaseName + " location '" + dbUri + "'");
runDDL(driver, "use " + databaseName);
String crtTbl = "create table " + tableName +
" ( " + getTableColumnsStr(colNames,colTypes) + " )" +
getPartitionStmtStr(partNames) +
" clustered by ( " + colNames[0] + " )" +
" into 10 buckets " +
" stored as orc " +
" location '" + tableLoc + "'" +
" TBLPROPERTIES ('transactional'='true')";
runDDL(driver, crtTbl);
System.out.println("crtTbl = " + crtTbl);
if (partNames != null && partNames.length != 0) {
String addPart = "alter table " + tableName + " add partition ( " +
getTablePartsStr2(partNames, partVals) + " )";
runDDL(driver, addPart);
}
}
private static String getPartitionStmtStr(String[] partNames) {
if ( partNames == null || partNames.length == 0) {
return "";
}
return " partitioned by (" + getTablePartsStr(partNames) + " )";
}
// delete db and all tables in it
public static void dropDB(HiveConf conf, String databaseName)
throws HiveException, MetaException {
IMetaStoreClient client = new HiveMetaStoreClient(conf);
try {
for (String table : client.listTableNamesByFilter(databaseName, "", (short)-1)) {
client.dropTable(databaseName, table, true, true);
}
client.dropDatabase(databaseName);
} catch (TException e) {
client.close();
}
}
private static String getTableColumnsStr(String[] colNames, String[] colTypes) {
StringBuffer sb = new StringBuffer();
for (int i = 0; i < colNames.length; ++i) {
sb.append(colNames[i] + " " + colTypes[i]);
if (i < colNames.length - 1) {
sb.append(",");
}
}
return sb.toString();
}
// converts partNames into "partName1 string, partName2 string"
private static String getTablePartsStr(String[] partNames) {
if (partNames == null || partNames.length == 0) {
return "";
}
StringBuffer sb = new StringBuffer();
for (int i = 0; i < partNames.length; ++i) {
sb.append(partNames[i] + " string");
if (i < partNames.length - 1) {
sb.append(",");
}
}
return sb.toString();
}
// converts partNames,partVals into "partName1=val1, partName2=val2"
private static String getTablePartsStr2(String[] partNames, List<String> partVals) {
StringBuffer sb = new StringBuffer();
for (int i = 0; i < partVals.size(); ++i) {
sb.append(partNames[i] + " = '" + partVals.get(i) + "'");
if (i < partVals.size() - 1) {
sb.append(",");
}
}
return sb.toString();
}
public static ArrayList<String> listRecordsInTable(Driver driver, String dbName, String tblName)
throws IOException {
driver.run("select * from " + dbName + "." + tblName);
ArrayList<String> res = new ArrayList<String>();
driver.getResults(res);
return res;
}
public static ArrayList<String> listRecordsInPartition(Driver driver, String dbName,
String tblName, String continent,
String country)
throws IOException {
driver.run("select * from " + dbName + "." + tblName + " where continent='"
+ continent + "' and country='" + country + "'");
ArrayList<String> res = new ArrayList<String>();
driver.getResults(res);
return res;
}
public static class RawFileSystem extends RawLocalFileSystem {
private static final URI NAME;
static {
try {
NAME = new URI("raw:///");
} catch (URISyntaxException se) {
throw new IllegalArgumentException("bad uri", se);
}
}
@Override
public URI getUri() {
return NAME;
}
static String execCommand(File f, String... cmd) throws IOException {
String[] args = new String[cmd.length + 1];
System.arraycopy(cmd, 0, args, 0, cmd.length);
args[cmd.length] = f.getCanonicalPath();
String output = Shell.execCommand(args);
return output;
}
@Override
public FileStatus getFileStatus(Path path) throws IOException {
File file = pathToFile(path);
if (!file.exists()) {
throw new FileNotFoundException("Can't find " + path);
}
// get close enough
short mod = 0;
if (file.canRead()) {
mod |= 0444;
}
if (file.canWrite()) {
mod |= 0200;
}
if (file.canExecute()) {
mod |= 0111;
}
ShimLoader.getHadoopShims();
return new FileStatus(file.length(), file.isDirectory(), 1, 1024,
file.lastModified(), file.lastModified(),
FsPermission.createImmutable(mod), "owen", "users", path);
}
}
private static boolean runDDL(Driver driver, String sql) throws QueryFailedException {
int retryCount = 1; // # of times to retry if first attempt fails
for (int attempt = 0; attempt <= retryCount; ++attempt) {
driver.run(sql);
continue;
}
return false;
}
}
| 9,789 |
0 | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/main/java/org/apache/flume/sink/hive/HiveEventSerializer.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hive;
import org.apache.flume.Event;
import org.apache.flume.conf.Configurable;
import org.apache.hive.hcatalog.streaming.HiveEndPoint;
import org.apache.hive.hcatalog.streaming.RecordWriter;
import org.apache.hive.hcatalog.streaming.StreamingException;
import org.apache.hive.hcatalog.streaming.TransactionBatch;
import java.io.IOException;
import java.util.Collection;
public interface HiveEventSerializer extends Configurable {
public void write(TransactionBatch batch, Event e)
throws StreamingException, IOException, InterruptedException;
public void write(TransactionBatch txnBatch, Collection<byte[]> events)
throws StreamingException, IOException, InterruptedException;
RecordWriter createRecordWriter(HiveEndPoint endPoint)
throws StreamingException, IOException, ClassNotFoundException;
}
| 9,790 |
0 | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/main/java/org/apache/flume/sink/hive/HiveWriter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hive;
import org.apache.flume.Event;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.hive.hcatalog.streaming.HiveEndPoint;
import org.apache.hive.hcatalog.streaming.RecordWriter;
import org.apache.hive.hcatalog.streaming.SerializationError;
import org.apache.hive.hcatalog.streaming.StreamingConnection;
import org.apache.hive.hcatalog.streaming.StreamingException;
import org.apache.hive.hcatalog.streaming.StreamingIOFailure;
import org.apache.hive.hcatalog.streaming.TransactionBatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
/**
* Internal API intended for HiveSink use.
*/
class HiveWriter {
private static final Logger LOG = LoggerFactory.getLogger(HiveWriter.class);
private final HiveEndPoint endPoint;
private HiveEventSerializer serializer;
private final StreamingConnection connection;
private final int txnsPerBatch;
private final RecordWriter recordWriter;
private TransactionBatch txnBatch;
private final ExecutorService callTimeoutPool;
private final long callTimeout;
private long lastUsed; // time of last flush on this writer
private SinkCounter sinkCounter;
private int batchCounter;
private long eventCounter;
private long processSize;
protected boolean closed; // flag indicating HiveWriter was closed
private boolean autoCreatePartitions;
private boolean hearbeatNeeded = false;
private final int writeBatchSz = 1000;
private ArrayList<Event> batch = new ArrayList<Event>(writeBatchSz);
HiveWriter(HiveEndPoint endPoint, int txnsPerBatch,
boolean autoCreatePartitions, long callTimeout,
ExecutorService callTimeoutPool, String hiveUser,
HiveEventSerializer serializer, SinkCounter sinkCounter)
throws ConnectException, InterruptedException {
try {
this.autoCreatePartitions = autoCreatePartitions;
this.sinkCounter = sinkCounter;
this.callTimeout = callTimeout;
this.callTimeoutPool = callTimeoutPool;
this.endPoint = endPoint;
this.connection = newConnection(hiveUser);
this.txnsPerBatch = txnsPerBatch;
this.serializer = serializer;
this.recordWriter = serializer.createRecordWriter(endPoint);
this.txnBatch = nextTxnBatch(recordWriter);
this.txnBatch.beginNextTransaction();
this.closed = false;
this.lastUsed = System.currentTimeMillis();
} catch (InterruptedException e) {
throw e;
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new ConnectException(endPoint, e);
}
}
@Override
public String toString() {
return endPoint.toString();
}
/**
* Clear the class counters
*/
private void resetCounters() {
eventCounter = 0;
processSize = 0;
batchCounter = 0;
}
void setHearbeatNeeded() {
hearbeatNeeded = true;
}
public int getRemainingTxns() {
return txnBatch.remainingTransactions();
}
/**
* Write data, update stats
* @param event
* @throws WriteException - other streaming io error
* @throws InterruptedException
*/
public synchronized void write(final Event event)
throws WriteException, InterruptedException {
if (closed) {
throw new IllegalStateException("Writer closed. Cannot write to : " + endPoint);
}
batch.add(event);
if (batch.size() == writeBatchSz) {
// write the event
writeEventBatchToSerializer();
}
// Update Statistics
processSize += event.getBody().length;
eventCounter++;
}
private void writeEventBatchToSerializer()
throws InterruptedException, WriteException {
try {
timedCall(new CallRunner1<Void>() {
@Override
public Void call() throws InterruptedException, StreamingException {
try {
for (Event event : batch) {
try {
serializer.write(txnBatch, event);
} catch (SerializationError err) {
LOG.info("Parse failed : {} : {}", err.getMessage(), new String(event.getBody()));
}
}
return null;
} catch (IOException e) {
throw new StreamingIOFailure(e.getMessage(), e);
}
}
});
batch.clear();
} catch (StreamingException e) {
throw new WriteException(endPoint, txnBatch.getCurrentTxnId(), e);
} catch (TimeoutException e) {
throw new WriteException(endPoint, txnBatch.getCurrentTxnId(), e);
}
}
/**
* Commits the current Txn.
* If 'rollToNext' is true, will switch to next Txn in batch or to a
* new TxnBatch if current Txn batch is exhausted
*/
public void flush(boolean rollToNext)
throws CommitException, TxnBatchException, TxnFailure, InterruptedException,
WriteException {
if (!batch.isEmpty()) {
writeEventBatchToSerializer();
batch.clear();
}
//0 Heart beat on TxnBatch
if (hearbeatNeeded) {
hearbeatNeeded = false;
heartBeat();
}
lastUsed = System.currentTimeMillis();
try {
//1 commit txn & close batch if needed
commitTxn();
if (txnBatch.remainingTransactions() == 0) {
closeTxnBatch();
txnBatch = null;
if (rollToNext) {
txnBatch = nextTxnBatch(recordWriter);
}
}
//2 roll to next Txn
if (rollToNext) {
LOG.debug("Switching to next Txn for {}", endPoint);
txnBatch.beginNextTransaction(); // does not block
}
} catch (StreamingException e) {
throw new TxnFailure(txnBatch, e);
}
}
/**
* Aborts the current Txn
* @throws InterruptedException
*/
public void abort() throws InterruptedException {
batch.clear();
abortTxn();
}
/** Queues up a heartbeat request on the current and remaining txns using the
* heartbeatThdPool and returns immediately
*/
public void heartBeat() throws InterruptedException {
// 1) schedule the heartbeat on one thread in pool
try {
timedCall(new CallRunner1<Void>() {
@Override
public Void call() throws StreamingException {
LOG.info("Sending heartbeat on batch " + txnBatch);
txnBatch.heartbeat();
return null;
}
});
} catch (InterruptedException e) {
throw e;
} catch (Exception e) {
LOG.warn("Unable to send heartbeat on Txn Batch " + txnBatch, e);
// Suppressing exceptions as we don't care for errors on heartbeats
}
}
/**
* Close the Transaction Batch and connection
* @throws IOException
* @throws InterruptedException
*/
public void close() throws InterruptedException {
batch.clear();
abortRemainingTxns();
closeTxnBatch();
closeConnection();
closed = true;
}
private void abortRemainingTxns() throws InterruptedException {
try {
if (!isClosed(txnBatch.getCurrentTransactionState())) {
abortCurrTxnHelper();
}
// recursively abort remaining txns
if (txnBatch.remainingTransactions() > 0) {
timedCall(
new CallRunner1<Void>() {
@Override
public Void call() throws StreamingException, InterruptedException {
txnBatch.beginNextTransaction();
return null;
}
});
abortRemainingTxns();
}
} catch (StreamingException e) {
LOG.warn("Error when aborting remaining transactions in batch " + txnBatch, e);
return;
} catch (TimeoutException e) {
LOG.warn("Timed out when aborting remaining transactions in batch " + txnBatch, e);
return;
}
}
private void abortCurrTxnHelper() throws TimeoutException, InterruptedException {
try {
timedCall(
new CallRunner1<Void>() {
@Override
public Void call() throws StreamingException, InterruptedException {
txnBatch.abort();
LOG.info("Aborted txn " + txnBatch.getCurrentTxnId());
return null;
}
}
);
} catch (StreamingException e) {
LOG.warn("Unable to abort transaction " + txnBatch.getCurrentTxnId(), e);
// continue to attempt to abort other txns in the batch
}
}
private boolean isClosed(TransactionBatch.TxnState txnState) {
if (txnState == TransactionBatch.TxnState.COMMITTED) {
return true;
}
if (txnState == TransactionBatch.TxnState.ABORTED) {
return true;
}
return false;
}
public void closeConnection() throws InterruptedException {
LOG.info("Closing connection to EndPoint : {}", endPoint);
try {
timedCall(new CallRunner1<Void>() {
@Override
public Void call() {
connection.close(); // could block
return null;
}
});
sinkCounter.incrementConnectionClosedCount();
} catch (Exception e) {
LOG.warn("Error closing connection to EndPoint : " + endPoint, e);
// Suppressing exceptions as we don't care for errors on connection close
}
}
private void commitTxn() throws CommitException, InterruptedException {
if (LOG.isInfoEnabled()) {
LOG.info("Committing Txn " + txnBatch.getCurrentTxnId() + " on EndPoint: " + endPoint);
}
try {
timedCall(new CallRunner1<Void>() {
@Override
public Void call() throws StreamingException, InterruptedException {
txnBatch.commit(); // could block
return null;
}
});
} catch (Exception e) {
throw new CommitException(endPoint, txnBatch.getCurrentTxnId(), e);
}
}
private void abortTxn() throws InterruptedException {
LOG.info("Aborting Txn id {} on End Point {}", txnBatch.getCurrentTxnId(), endPoint);
try {
timedCall(new CallRunner1<Void>() {
@Override
public Void call() throws StreamingException, InterruptedException {
txnBatch.abort(); // could block
return null;
}
});
} catch (InterruptedException e) {
throw e;
} catch (TimeoutException e) {
LOG.warn("Timeout while aborting Txn " + txnBatch.getCurrentTxnId() +
" on EndPoint: " + endPoint, e);
} catch (Exception e) {
LOG.warn("Error aborting Txn " + txnBatch.getCurrentTxnId() + " on EndPoint: " + endPoint, e);
// Suppressing exceptions as we don't care for errors on abort
}
}
private StreamingConnection newConnection(final String proxyUser)
throws InterruptedException, ConnectException {
try {
return timedCall(new CallRunner1<StreamingConnection>() {
@Override
public StreamingConnection call() throws InterruptedException, StreamingException {
return endPoint.newConnection(autoCreatePartitions); // could block
}
});
} catch (Exception e) {
throw new ConnectException(endPoint, e);
}
}
private TransactionBatch nextTxnBatch(final RecordWriter recordWriter)
throws InterruptedException, TxnBatchException {
LOG.debug("Fetching new Txn Batch for {}", endPoint);
TransactionBatch batch = null;
try {
batch = timedCall(new CallRunner1<TransactionBatch>() {
@Override
public TransactionBatch call() throws InterruptedException, StreamingException {
return connection.fetchTransactionBatch(txnsPerBatch, recordWriter); // could block
}
});
LOG.info("Acquired Transaction batch {}", batch);
} catch (Exception e) {
throw new TxnBatchException(endPoint, e);
}
return batch;
}
private void closeTxnBatch() throws InterruptedException {
try {
LOG.info("Closing Txn Batch {}.", txnBatch);
timedCall(new CallRunner1<Void>() {
@Override
public Void call() throws InterruptedException, StreamingException {
txnBatch.close(); // could block
return null;
}
});
} catch (InterruptedException e) {
throw e;
} catch (Exception e) {
LOG.warn("Error closing Txn Batch " + txnBatch, e);
// Suppressing exceptions as we don't care for errors on batch close
}
}
private <T> T timedCall(final CallRunner1<T> callRunner)
throws TimeoutException, InterruptedException, StreamingException {
Future<T> future = callTimeoutPool.submit(new Callable<T>() {
@Override
public T call() throws StreamingException, InterruptedException, Failure {
return callRunner.call();
}
});
try {
if (callTimeout > 0) {
return future.get(callTimeout, TimeUnit.MILLISECONDS);
} else {
return future.get();
}
} catch (TimeoutException eT) {
future.cancel(true);
sinkCounter.incrementConnectionFailedCount();
throw eT;
} catch (ExecutionException e1) {
sinkCounter.incrementConnectionFailedCount();
Throwable cause = e1.getCause();
if (cause instanceof IOException) {
throw new StreamingException("I/O Failure", (IOException) cause);
} else if (cause instanceof StreamingException) {
throw (StreamingException) cause;
} else if (cause instanceof TimeoutException) {
throw new StreamingException("Operation Timed Out.", (TimeoutException) cause);
} else if (cause instanceof RuntimeException) {
throw (RuntimeException) cause;
} else if (cause instanceof InterruptedException) {
throw (InterruptedException) cause;
}
throw new StreamingException(e1.getMessage(), e1);
}
}
long getLastUsed() {
return lastUsed;
}
/**
* Simple interface whose <tt>call</tt> method is called by
* {#callWithTimeout} in a new thread inside a
* {@linkplain java.security.PrivilegedExceptionAction#run()} call.
* @param <T>
*/
private interface CallRunner<T> {
T call() throws Exception;
}
private interface CallRunner1<T> {
T call() throws StreamingException, InterruptedException, Failure;
}
public static class Failure extends Exception {
public Failure(String msg, Throwable cause) {
super(msg, cause);
}
}
public static class WriteException extends Failure {
public WriteException(HiveEndPoint endPoint, Long currentTxnId, Throwable cause) {
super("Failed writing to : " + endPoint + ". TxnID : " + currentTxnId, cause);
}
}
public static class CommitException extends Failure {
public CommitException(HiveEndPoint endPoint, Long txnID, Throwable cause) {
super("Commit of Txn " + txnID + " failed on EndPoint: " + endPoint, cause);
}
}
public static class ConnectException extends Failure {
public ConnectException(HiveEndPoint ep, Throwable cause) {
super("Failed connecting to EndPoint " + ep, cause);
}
}
public static class TxnBatchException extends Failure {
public TxnBatchException(HiveEndPoint ep, Throwable cause) {
super("Failed acquiring Transaction Batch from EndPoint: " + ep, cause);
}
}
private class TxnFailure extends Failure {
public TxnFailure(TransactionBatch txnBatch, Throwable cause) {
super("Failed switching to next Txn in TxnBatch " + txnBatch, cause);
}
}
}
| 9,791 |
0 | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/main/java/org/apache/flume/sink/hive/HiveJsonSerializer.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hive;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.hive.hcatalog.streaming.HiveEndPoint;
import org.apache.hive.hcatalog.streaming.RecordWriter;
import org.apache.hive.hcatalog.streaming.StreamingException;
import org.apache.hive.hcatalog.streaming.StrictJsonWriter;
import org.apache.hive.hcatalog.streaming.TransactionBatch;
import java.io.IOException;
import java.util.Collection;
/** Forwards the incoming event body to Hive unmodified
* Sets up the delimiter and the field to column mapping
*/
public class HiveJsonSerializer implements HiveEventSerializer {
public static final String ALIAS = "JSON";
@Override
public void write(TransactionBatch txnBatch, Event e)
throws StreamingException, IOException, InterruptedException {
txnBatch.write(e.getBody());
}
@Override
public void write(TransactionBatch txnBatch, Collection<byte[]> events)
throws StreamingException, IOException, InterruptedException {
txnBatch.write(events);
}
@Override
public RecordWriter createRecordWriter(HiveEndPoint endPoint)
throws StreamingException, IOException, ClassNotFoundException {
return new StrictJsonWriter(endPoint);
}
@Override
public void configure(Context context) {
return;
}
}
| 9,792 |
0 | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/main/java/org/apache/flume/sink/hive/HiveSink.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hive;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.flume.Channel;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.Transaction;
import org.apache.flume.conf.BatchSizeSupported;
import org.apache.flume.conf.Configurable;
import org.apache.flume.formatter.output.BucketPath;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.flume.sink.AbstractSink;
import org.apache.hive.hcatalog.streaming.HiveEndPoint;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TimeZone;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
public class HiveSink extends AbstractSink implements Configurable, BatchSizeSupported {
private static final Logger LOG = LoggerFactory.getLogger(HiveSink.class);
private static final int DEFAULT_MAXOPENCONNECTIONS = 500;
private static final int DEFAULT_TXNSPERBATCH = 100;
private static final int DEFAULT_BATCHSIZE = 15000;
private static final int DEFAULT_CALLTIMEOUT = 10000;
private static final int DEFAULT_IDLETIMEOUT = 0;
private static final int DEFAULT_HEARTBEATINTERVAL = 240; // seconds
private Map<HiveEndPoint, HiveWriter> allWriters;
private SinkCounter sinkCounter;
private volatile int idleTimeout;
private String metaStoreUri;
private String proxyUser;
private String database;
private String table;
private List<String> partitionVals;
private Integer txnsPerBatchAsk;
private Integer batchSize;
private Integer maxOpenConnections;
private boolean autoCreatePartitions;
private String serializerType;
private HiveEventSerializer serializer;
/**
* Default timeout for blocking I/O calls in HiveWriter
*/
private Integer callTimeout;
private Integer heartBeatInterval;
private ExecutorService callTimeoutPool;
private boolean useLocalTime;
private TimeZone timeZone;
private boolean needRounding;
private int roundUnit;
private Integer roundValue;
private Timer heartBeatTimer = new Timer();
private AtomicBoolean timeToSendHeartBeat = new AtomicBoolean(false);
@VisibleForTesting
Map<HiveEndPoint, HiveWriter> getAllWriters() {
return allWriters;
}
// read configuration and setup thresholds
@Override
public void configure(Context context) {
metaStoreUri = context.getString(Config.HIVE_METASTORE);
if (metaStoreUri == null) {
throw new IllegalArgumentException(Config.HIVE_METASTORE + " config setting is not " +
"specified for sink " + getName());
}
if (metaStoreUri.equalsIgnoreCase("null")) { // for testing support
metaStoreUri = null;
}
proxyUser = null; // context.getString("hive.proxyUser"); not supported by hive api yet
database = context.getString(Config.HIVE_DATABASE);
if (database == null) {
throw new IllegalArgumentException(Config.HIVE_DATABASE + " config setting is not " +
"specified for sink " + getName());
}
table = context.getString(Config.HIVE_TABLE);
if (table == null) {
throw new IllegalArgumentException(Config.HIVE_TABLE + " config setting is not " +
"specified for sink " + getName());
}
String partitions = context.getString(Config.HIVE_PARTITION);
if (partitions != null) {
partitionVals = Arrays.asList(partitions.split(","));
}
txnsPerBatchAsk = context.getInteger(Config.HIVE_TXNS_PER_BATCH_ASK, DEFAULT_TXNSPERBATCH);
if (txnsPerBatchAsk < 0) {
LOG.warn(getName() + ". hive.txnsPerBatchAsk must be positive number. Defaulting to "
+ DEFAULT_TXNSPERBATCH);
txnsPerBatchAsk = DEFAULT_TXNSPERBATCH;
}
batchSize = context.getInteger(Config.BATCH_SIZE, DEFAULT_BATCHSIZE);
if (batchSize < 0) {
LOG.warn(getName() + ". batchSize must be positive number. Defaulting to "
+ DEFAULT_BATCHSIZE);
batchSize = DEFAULT_BATCHSIZE;
}
idleTimeout = context.getInteger(Config.IDLE_TIMEOUT, DEFAULT_IDLETIMEOUT);
if (idleTimeout < 0) {
LOG.warn(getName() + ". idleTimeout must be positive number. Defaulting to "
+ DEFAULT_IDLETIMEOUT);
idleTimeout = DEFAULT_IDLETIMEOUT;
}
callTimeout = context.getInteger(Config.CALL_TIMEOUT, DEFAULT_CALLTIMEOUT);
if (callTimeout < 0) {
LOG.warn(getName() + ". callTimeout must be positive number. Defaulting to "
+ DEFAULT_CALLTIMEOUT);
callTimeout = DEFAULT_CALLTIMEOUT;
}
heartBeatInterval = context.getInteger(Config.HEART_BEAT_INTERVAL, DEFAULT_HEARTBEATINTERVAL);
if (heartBeatInterval < 0) {
LOG.warn(getName() + ". heartBeatInterval must be positive number. Defaulting to "
+ DEFAULT_HEARTBEATINTERVAL);
heartBeatInterval = DEFAULT_HEARTBEATINTERVAL;
}
maxOpenConnections = context.getInteger(Config.MAX_OPEN_CONNECTIONS,
DEFAULT_MAXOPENCONNECTIONS);
autoCreatePartitions = context.getBoolean("autoCreatePartitions", true);
// Timestamp processing
useLocalTime = context.getBoolean(Config.USE_LOCAL_TIME_STAMP, false);
String tzName = context.getString(Config.TIME_ZONE);
timeZone = (tzName == null) ? null : TimeZone.getTimeZone(tzName);
needRounding = context.getBoolean(Config.ROUND, false);
String unit = context.getString(Config.ROUND_UNIT, Config.MINUTE);
if (unit.equalsIgnoreCase(Config.HOUR)) {
this.roundUnit = Calendar.HOUR_OF_DAY;
} else if (unit.equalsIgnoreCase(Config.MINUTE)) {
this.roundUnit = Calendar.MINUTE;
} else if (unit.equalsIgnoreCase(Config.SECOND)) {
this.roundUnit = Calendar.SECOND;
} else {
LOG.warn(getName() + ". Rounding unit is not valid, please set one of " +
"minute, hour or second. Rounding will be disabled");
needRounding = false;
}
this.roundValue = context.getInteger(Config.ROUND_VALUE, 1);
if (roundUnit == Calendar.SECOND || roundUnit == Calendar.MINUTE) {
Preconditions.checkArgument(roundValue > 0 && roundValue <= 60,
"Round value must be > 0 and <= 60");
} else if (roundUnit == Calendar.HOUR_OF_DAY) {
Preconditions.checkArgument(roundValue > 0 && roundValue <= 24,
"Round value must be > 0 and <= 24");
}
// Serializer
serializerType = context.getString(Config.SERIALIZER, "");
if (serializerType.isEmpty()) {
throw new IllegalArgumentException("serializer config setting is not " +
"specified for sink " + getName());
}
serializer = createSerializer(serializerType);
serializer.configure(context);
Preconditions.checkArgument(batchSize > 0, "batchSize must be greater than 0");
if (sinkCounter == null) {
sinkCounter = new SinkCounter(getName());
}
}
@VisibleForTesting
protected SinkCounter getCounter() {
return sinkCounter;
}
private HiveEventSerializer createSerializer(String serializerName) {
if (serializerName.compareToIgnoreCase(HiveDelimitedTextSerializer.ALIAS) == 0 ||
serializerName.compareTo(HiveDelimitedTextSerializer.class.getName()) == 0) {
return new HiveDelimitedTextSerializer();
} else if (serializerName.compareToIgnoreCase(HiveJsonSerializer.ALIAS) == 0 ||
serializerName.compareTo(HiveJsonSerializer.class.getName()) == 0) {
return new HiveJsonSerializer();
}
try {
return (HiveEventSerializer) Class.forName(serializerName).newInstance();
} catch (Exception e) {
throw new IllegalArgumentException("Unable to instantiate serializer: " + serializerName
+ " on sink: " + getName(), e);
}
}
/**
* Pull events out of channel, find corresponding HiveWriter and write to it.
* Take at most batchSize events per Transaction. <br/>
* This method is not thread safe.
*/
public Status process() throws EventDeliveryException {
// writers used in this Txn
Channel channel = getChannel();
Transaction transaction = channel.getTransaction();
transaction.begin();
boolean success = false;
try {
// 1 Enable Heart Beats
if (timeToSendHeartBeat.compareAndSet(true, false)) {
enableHeartBeatOnAllWriters();
}
// 2 Drain Batch
int txnEventCount = drainOneBatch(channel);
transaction.commit();
success = true;
// 3 Update Counters
if (txnEventCount < 1) {
return Status.BACKOFF;
} else {
return Status.READY;
}
} catch (InterruptedException err) {
LOG.warn(getName() + ": Thread was interrupted.", err);
return Status.BACKOFF;
} catch (Exception e) {
sinkCounter.incrementEventWriteOrChannelFail(e);
throw new EventDeliveryException(e);
} finally {
if (!success) {
transaction.rollback();
}
transaction.close();
}
}
// Drains one batch of events from Channel into Hive
private int drainOneBatch(Channel channel)
throws HiveWriter.Failure, InterruptedException {
int txnEventCount = 0;
try {
Map<HiveEndPoint,HiveWriter> activeWriters = Maps.newHashMap();
for (; txnEventCount < batchSize; ++txnEventCount) {
// 0) Read event from Channel
Event event = channel.take();
if (event == null) {
break;
}
//1) Create end point by substituting place holders
HiveEndPoint endPoint = makeEndPoint(metaStoreUri, database, table,
partitionVals, event.getHeaders(), timeZone,
needRounding, roundUnit, roundValue, useLocalTime);
//2) Create or reuse Writer
HiveWriter writer = getOrCreateWriter(activeWriters, endPoint);
//3) Write
LOG.debug("{} : Writing event to {}", getName(), endPoint);
writer.write(event);
} // for
//4) Update counters
if (txnEventCount == 0) {
sinkCounter.incrementBatchEmptyCount();
} else if (txnEventCount == batchSize) {
sinkCounter.incrementBatchCompleteCount();
} else {
sinkCounter.incrementBatchUnderflowCount();
}
sinkCounter.addToEventDrainAttemptCount(txnEventCount);
// 5) Flush all Writers
for (HiveWriter writer : activeWriters.values()) {
writer.flush(true);
}
sinkCounter.addToEventDrainSuccessCount(txnEventCount);
return txnEventCount;
} catch (HiveWriter.Failure e) {
// in case of error we close all TxnBatches to start clean next time
LOG.warn(getName() + " : " + e.getMessage(), e);
abortAllWriters();
closeAllWriters();
throw e;
}
}
private void enableHeartBeatOnAllWriters() {
for (HiveWriter writer : allWriters.values()) {
writer.setHearbeatNeeded();
}
}
private HiveWriter getOrCreateWriter(Map<HiveEndPoint, HiveWriter> activeWriters,
HiveEndPoint endPoint)
throws HiveWriter.ConnectException, InterruptedException {
try {
HiveWriter writer = allWriters.get( endPoint );
if (writer == null) {
LOG.info(getName() + ": Creating Writer to Hive end point : " + endPoint);
writer = new HiveWriter(endPoint, txnsPerBatchAsk, autoCreatePartitions,
callTimeout, callTimeoutPool, proxyUser, serializer, sinkCounter);
sinkCounter.incrementConnectionCreatedCount();
if (allWriters.size() > maxOpenConnections) {
int retired = closeIdleWriters();
if (retired == 0) {
closeEldestWriter();
}
}
allWriters.put(endPoint, writer);
activeWriters.put(endPoint, writer);
} else {
if (activeWriters.get(endPoint) == null) {
activeWriters.put(endPoint,writer);
}
}
return writer;
} catch (HiveWriter.ConnectException e) {
sinkCounter.incrementConnectionFailedCount();
throw e;
}
}
private HiveEndPoint makeEndPoint(String metaStoreUri, String database, String table,
List<String> partVals, Map<String, String> headers,
TimeZone timeZone, boolean needRounding,
int roundUnit, Integer roundValue,
boolean useLocalTime) {
if (partVals == null) {
return new HiveEndPoint(metaStoreUri, database, table, null);
}
ArrayList<String> realPartVals = Lists.newArrayList();
for (String partVal : partVals) {
realPartVals.add(BucketPath.escapeString(partVal, headers, timeZone,
needRounding, roundUnit, roundValue, useLocalTime));
}
return new HiveEndPoint(metaStoreUri, database, table, realPartVals);
}
/**
* Locate writer that has not been used for longest time and retire it
*/
private void closeEldestWriter() throws InterruptedException {
long oldestTimeStamp = System.currentTimeMillis();
HiveEndPoint eldest = null;
for (Entry<HiveEndPoint,HiveWriter> entry : allWriters.entrySet()) {
if (entry.getValue().getLastUsed() < oldestTimeStamp) {
eldest = entry.getKey();
oldestTimeStamp = entry.getValue().getLastUsed();
}
}
try {
sinkCounter.incrementConnectionCreatedCount();
LOG.info(getName() + ": Closing least used Writer to Hive EndPoint : " + eldest);
allWriters.remove(eldest).close();
} catch (InterruptedException e) {
LOG.warn(getName() + ": Interrupted when attempting to close writer for end point: "
+ eldest, e);
throw e;
}
}
/**
* Locate all writers past idle timeout and retire them
* @return number of writers retired
*/
private int closeIdleWriters() throws InterruptedException {
int count = 0;
long now = System.currentTimeMillis();
ArrayList<HiveEndPoint> retirees = Lists.newArrayList();
//1) Find retirement candidates
for (Entry<HiveEndPoint,HiveWriter> entry : allWriters.entrySet()) {
if (now - entry.getValue().getLastUsed() > idleTimeout) {
++count;
retirees.add(entry.getKey());
}
}
//2) Retire them
for (HiveEndPoint ep : retirees) {
sinkCounter.incrementConnectionClosedCount();
LOG.info(getName() + ": Closing idle Writer to Hive end point : {}", ep);
allWriters.remove(ep).close();
}
return count;
}
/**
* Closes all writers and remove them from cache
* @return number of writers retired
*/
private void closeAllWriters() throws InterruptedException {
//1) Retire writers
for (Entry<HiveEndPoint,HiveWriter> entry : allWriters.entrySet()) {
entry.getValue().close();
}
//2) Clear cache
allWriters.clear();
}
/**
* Abort current Txn on all writers
* @return number of writers retired
*/
private void abortAllWriters() throws InterruptedException {
for (Entry<HiveEndPoint,HiveWriter> entry : allWriters.entrySet()) {
entry.getValue().abort();
}
}
@Override
public void stop() {
// do not constrain close() calls with a timeout
for (Entry<HiveEndPoint, HiveWriter> entry : allWriters.entrySet()) {
try {
HiveWriter w = entry.getValue();
w.close();
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
}
// shut down all thread pools
callTimeoutPool.shutdown();
try {
while (callTimeoutPool.isTerminated() == false) {
callTimeoutPool.awaitTermination(
Math.max(DEFAULT_CALLTIMEOUT, callTimeout), TimeUnit.MILLISECONDS);
}
} catch (InterruptedException ex) {
LOG.warn(getName() + ":Shutdown interrupted on " + callTimeoutPool, ex);
}
callTimeoutPool = null;
allWriters.clear();
allWriters = null;
sinkCounter.stop();
super.stop();
LOG.info("Hive Sink {} stopped", getName() );
}
@Override
public void start() {
String timeoutName = "hive-" + getName() + "-call-runner-%d";
// call timeout pool needs only 1 thd as sink is effectively single threaded
callTimeoutPool = Executors.newFixedThreadPool(1,
new ThreadFactoryBuilder().setNameFormat(timeoutName).build());
this.allWriters = Maps.newHashMap();
sinkCounter.start();
super.start();
setupHeartBeatTimer();
LOG.info(getName() + ": Hive Sink {} started", getName() );
}
private void setupHeartBeatTimer() {
if (heartBeatInterval > 0) {
heartBeatTimer.schedule(new TimerTask() {
@Override
public void run() {
timeToSendHeartBeat.set(true);
setupHeartBeatTimer();
}
}, heartBeatInterval * 1000);
}
}
@Override
public long getBatchSize() {
return batchSize;
}
@Override
public String toString() {
return "{ Sink type:" + getClass().getSimpleName() + ", name:" + getName() +
" }";
}
}
| 9,793 |
0 | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/main/java/org/apache/flume/sink/hive/Config.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hive;
public class Config {
public static final String HIVE_METASTORE = "hive.metastore";
public static final String HIVE_DATABASE = "hive.database";
public static final String HIVE_TABLE = "hive.table";
public static final String HIVE_PARTITION = "hive.partition";
public static final String HIVE_TXNS_PER_BATCH_ASK = "hive.txnsPerBatchAsk";
public static final String BATCH_SIZE = "batchSize";
public static final String IDLE_TIMEOUT = "idleTimeout";
public static final String CALL_TIMEOUT = "callTimeout";
public static final String HEART_BEAT_INTERVAL = "heartBeatInterval";
public static final String MAX_OPEN_CONNECTIONS = "maxOpenConnections";
public static final String USE_LOCAL_TIME_STAMP = "useLocalTimeStamp";
public static final String TIME_ZONE = "timeZone";
public static final String ROUND_UNIT = "roundUnit";
public static final String ROUND = "round";
public static final String HOUR = "hour";
public static final String MINUTE = "minute";
public static final String SECOND = "second";
public static final String ROUND_VALUE = "roundValue";
public static final String SERIALIZER = "serializer";
}
| 9,794 |
0 | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/main/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hive-sink/src/main/java/org/apache/flume/sink/hive/HiveDelimitedTextSerializer.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hive;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.hive.hcatalog.streaming.DelimitedInputWriter;
import org.apache.hive.hcatalog.streaming.HiveEndPoint;
import org.apache.hive.hcatalog.streaming.RecordWriter;
import org.apache.hive.hcatalog.streaming.StreamingException;
import org.apache.hive.hcatalog.streaming.TransactionBatch;
import java.io.IOException;
import java.util.Collection;
/** Forwards the incoming event body to Hive unmodified
* Sets up the delimiter and the field to column mapping
*/
public class HiveDelimitedTextSerializer implements HiveEventSerializer {
public static final String ALIAS = "DELIMITED";
public static final String defaultDelimiter = ",";
public static final String SERIALIZER_DELIMITER = "serializer.delimiter";
public static final String SERIALIZER_FIELDNAMES = "serializer.fieldnames";
public static final String SERIALIZER_SERDE_SEPARATOR = "serializer.serdeSeparator";
private String delimiter;
private String[] fieldToColMapping = null;
private Character serdeSeparator = null;
@Override
public void write(TransactionBatch txnBatch, Event e)
throws StreamingException, IOException, InterruptedException {
txnBatch.write(e.getBody());
}
@Override
public void write(TransactionBatch txnBatch, Collection<byte[]> events)
throws StreamingException, IOException, InterruptedException {
txnBatch.write(events);
}
@Override
public RecordWriter createRecordWriter(HiveEndPoint endPoint)
throws StreamingException, IOException, ClassNotFoundException {
if (serdeSeparator == null) {
return new DelimitedInputWriter(fieldToColMapping, delimiter, endPoint);
}
return new DelimitedInputWriter(fieldToColMapping, delimiter, endPoint, null, serdeSeparator);
}
@Override
public void configure(Context context) {
delimiter = parseDelimiterSpec(
context.getString(SERIALIZER_DELIMITER, defaultDelimiter) );
String fieldNames = context.getString(SERIALIZER_FIELDNAMES);
if (fieldNames == null) {
throw new IllegalArgumentException("serializer.fieldnames is not specified " +
"for serializer " + this.getClass().getName() );
}
String serdeSeparatorStr = context.getString(SERIALIZER_SERDE_SEPARATOR);
this.serdeSeparator = parseSerdeSeparatorSpec(serdeSeparatorStr);
// split, but preserve empty fields (-1)
fieldToColMapping = fieldNames.trim().split(",",-1);
}
// if delimiter is a double quoted like "\t", drop quotes
private static String parseDelimiterSpec(String delimiter) {
if (delimiter == null) {
return null;
}
if (delimiter.charAt(0) == '"' &&
delimiter.charAt(delimiter.length() - 1) == '"') {
return delimiter.substring(1,delimiter.length() - 1);
}
return delimiter;
}
// if delimiter is a single quoted character like '\t', drop quotes
private static Character parseSerdeSeparatorSpec(String separatorStr) {
if (separatorStr == null) {
return null;
}
if (separatorStr.length() == 1) {
return separatorStr.charAt(0);
}
if (separatorStr.length() == 3 &&
separatorStr.charAt(2) == '\'' &&
separatorStr.charAt(separatorStr.length() - 1) == '\'') {
return separatorStr.charAt(1);
}
throw new IllegalArgumentException("serializer.serdeSeparator spec is invalid " +
"for " + ALIAS + " serializer " );
}
}
| 9,795 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink/hdfs/MockHDFSWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hdfs;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MockHDFSWriter implements HDFSWriter {
private int filesOpened = 0;
private int filesClosed = 0;
private int bytesWritten = 0;
private int eventsWritten = 0;
private String filePath = null;
private static final Logger logger = LoggerFactory.getLogger(MockHDFSWriter.class);
private int numberOfRetriesRequired;
public volatile AtomicInteger currentCloseAttempts = new AtomicInteger(0);
public MockHDFSWriter(int numberOfRetriesRequired) {
this.numberOfRetriesRequired = numberOfRetriesRequired;
}
public MockHDFSWriter() {
this.numberOfRetriesRequired = 0;
}
public int getFilesOpened() {
return filesOpened;
}
public int getFilesClosed() {
return filesClosed;
}
public int getBytesWritten() {
return bytesWritten;
}
public int getEventsWritten() {
return eventsWritten;
}
public String getOpenedFilePath() {
return filePath;
}
public void configure(Context context) {
// no-op
}
public void open(String filePath) throws IOException {
this.filePath = filePath;
filesOpened++;
}
public void open(String filePath, CompressionCodec codec, CompressionType cType)
throws IOException {
this.filePath = filePath;
filesOpened++;
}
public void append(Event e) throws IOException {
eventsWritten++;
bytesWritten += e.getBody().length;
}
public void sync() throws IOException {
// does nothing
}
public void close() throws IOException {
filesClosed++;
int curr = currentCloseAttempts.incrementAndGet();
logger.info("Attempting to close: '" + currentCloseAttempts + "' of '" +
numberOfRetriesRequired + "'");
if (curr >= numberOfRetriesRequired || numberOfRetriesRequired == 0) {
logger.info("closing file");
} else {
throw new IOException("MockIOException");
}
}
@Override
public boolean isUnderReplicated() {
return false;
}
}
| 9,796 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink/hdfs/TestHDFSCompressedDataStream.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hdfs;
import java.io.File;
import java.io.FileInputStream;
import java.nio.ByteBuffer;
import java.nio.charset.CharsetDecoder;
import java.util.List;
import java.util.zip.GZIPInputStream;
import org.apache.avro.file.DataFileStream;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumReader;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.event.EventBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
public class TestHDFSCompressedDataStream {
private static final Logger logger =
LoggerFactory.getLogger(TestHDFSCompressedDataStream.class);
private File file;
private String fileURI;
private CompressionCodecFactory factory;
@Before
public void init() throws Exception {
this.file = new File("target/test/data/foo.gz");
this.fileURI = file.getAbsoluteFile().toURI().toString();
logger.info("File URI: {}", fileURI);
Configuration conf = new Configuration();
// local FS must be raw in order to be Syncable
conf.set("fs.file.impl", "org.apache.hadoop.fs.RawLocalFileSystem");
Path path = new Path(fileURI);
path.getFileSystem(conf); // get FS with our conf cached
this.factory = new CompressionCodecFactory(conf);
}
// make sure the data makes it to disk if we sync() the data stream
@Test
public void testGzipDurability() throws Exception {
Context context = new Context();
HDFSCompressedDataStream writer = new HDFSCompressedDataStream();
writer.configure(context);
writer.open(fileURI, factory.getCodec(new Path(fileURI)),
SequenceFile.CompressionType.BLOCK);
String[] bodies = { "yarf!" };
writeBodies(writer, bodies);
byte[] buf = new byte[256];
GZIPInputStream cmpIn = new GZIPInputStream(new FileInputStream(file));
int len = cmpIn.read(buf);
String result = new String(buf, 0, len, Charsets.UTF_8);
result = result.trim(); // BodyTextEventSerializer adds a newline
Assert.assertEquals("input and output must match", bodies[0], result);
}
@Test
public void testGzipDurabilityWithSerializer() throws Exception {
Context context = new Context();
context.put("serializer", "AVRO_EVENT");
HDFSCompressedDataStream writer = new HDFSCompressedDataStream();
writer.configure(context);
writer.open(fileURI, factory.getCodec(new Path(fileURI)),
SequenceFile.CompressionType.BLOCK);
String[] bodies = { "yarf!", "yarfing!" };
writeBodies(writer, bodies);
int found = 0;
int expected = bodies.length;
List<String> expectedBodies = Lists.newArrayList(bodies);
GZIPInputStream cmpIn = new GZIPInputStream(new FileInputStream(file));
DatumReader<GenericRecord> reader = new GenericDatumReader<GenericRecord>();
DataFileStream<GenericRecord> avroStream =
new DataFileStream<GenericRecord>(cmpIn, reader);
GenericRecord record = new GenericData.Record(avroStream.getSchema());
while (avroStream.hasNext()) {
avroStream.next(record);
CharsetDecoder decoder = Charsets.UTF_8.newDecoder();
String bodyStr = decoder.decode((ByteBuffer) record.get("body"))
.toString();
expectedBodies.remove(bodyStr);
found++;
}
avroStream.close();
cmpIn.close();
Assert.assertTrue("Found = " + found + ", Expected = " + expected
+ ", Left = " + expectedBodies.size() + " " + expectedBodies,
expectedBodies.size() == 0);
}
private void writeBodies(HDFSCompressedDataStream writer, String... bodies)
throws Exception {
for (String body : bodies) {
Event evt = EventBuilder.withBody(body, Charsets.UTF_8);
writer.append(evt);
}
writer.sync();
}
}
| 9,797 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink/hdfs/MockFileSystem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hdfs;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Progressable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MockFileSystem extends FileSystem {
private static final Logger logger =
LoggerFactory.getLogger(MockFileSystem.class);
FileSystem fs;
int numberOfRetriesRequired;
MockFsDataOutputStream latestOutputStream;
int currentRenameAttempts;
boolean closeSucceed = true;
public MockFileSystem(FileSystem fs, int numberOfRetriesRequired) {
this.fs = fs;
this.numberOfRetriesRequired = numberOfRetriesRequired;
}
public MockFileSystem(FileSystem fs,
int numberOfRetriesRequired, boolean closeSucceed) {
this.fs = fs;
this.numberOfRetriesRequired = numberOfRetriesRequired;
this.closeSucceed = closeSucceed;
}
@Override
public FSDataOutputStream append(Path arg0, int arg1, Progressable arg2)
throws IOException {
latestOutputStream = new MockFsDataOutputStream(
fs.append(arg0, arg1, arg2), closeSucceed);
return latestOutputStream;
}
@Override
public FSDataOutputStream create(Path arg0) throws IOException {
latestOutputStream = new MockFsDataOutputStream(fs.create(arg0), closeSucceed);
return latestOutputStream;
}
@Override
public FSDataOutputStream create(Path arg0, FsPermission arg1, boolean arg2, int arg3,
short arg4, long arg5, Progressable arg6)
throws IOException {
throw new IOException("Not a real file system");
}
@Override
@Deprecated
public boolean delete(Path arg0) throws IOException {
return fs.delete(arg0);
}
@Override
public boolean delete(Path arg0, boolean arg1) throws IOException {
return fs.delete(arg0, arg1);
}
@Override
public FileStatus getFileStatus(Path arg0) throws IOException {
return fs.getFileStatus(arg0);
}
@Override
public URI getUri() {
return fs.getUri();
}
@Override
public Path getWorkingDirectory() {
return fs.getWorkingDirectory();
}
@Override
public FileStatus[] listStatus(Path arg0) throws IOException {
return fs.listStatus(arg0);
}
@Override
public boolean mkdirs(Path arg0, FsPermission arg1) throws IOException {
// TODO Auto-generated method stub
return fs.mkdirs(arg0, arg1);
}
@Override
public FSDataInputStream open(Path arg0, int arg1) throws IOException {
return fs.open(arg0, arg1);
}
@Override
public boolean rename(Path arg0, Path arg1) throws IOException {
currentRenameAttempts++;
logger.info("Attempting to Rename: '" + currentRenameAttempts + "' of '" +
numberOfRetriesRequired + "'");
if (currentRenameAttempts >= numberOfRetriesRequired || numberOfRetriesRequired == 0) {
logger.info("Renaming file");
return fs.rename(arg0, arg1);
} else {
throw new IOException("MockIOException");
}
}
@Override
public void setWorkingDirectory(Path arg0) {
fs.setWorkingDirectory(arg0);
}
}
| 9,798 |
0 | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink | Create_ds/flume/flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink/hdfs/TestHDFSEventSinkDeadlock.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.sink.hdfs;
import com.google.common.collect.ImmutableMap;
import com.google.common.primitives.Longs;
import org.apache.flume.Channel;
import org.apache.flume.ChannelSelector;
import org.apache.flume.Context;
import org.apache.flume.SinkProcessor;
import org.apache.flume.SinkRunner;
import org.apache.flume.channel.ChannelProcessor;
import org.apache.flume.channel.MemoryChannel;
import org.apache.flume.channel.ReplicatingChannelSelector;
import org.apache.flume.conf.Configurables;
import org.apache.flume.sink.DefaultSinkProcessor;
import org.apache.flume.source.SequenceGeneratorSource;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadMXBean;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
public class TestHDFSEventSinkDeadlock {
public static void main(String... args) {
HDFSEventSink sink = new HDFSEventSink();
sink.setName("HDFSEventSink");
Context context = new Context(ImmutableMap.of(
"hdfs.path", "file:///tmp/flume-test/bucket-%t",
"hdfs.filePrefix", "flumetest",
"hdfs.rollInterval", "1",
"hdfs.maxOpenFiles", "1",
"hdfs.useLocalTimeStamp", "true"));
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, new Context());
final SequenceGeneratorSource source = new SequenceGeneratorSource();
Configurables.configure(source, new Context());
ChannelSelector rcs = new ReplicatingChannelSelector();
rcs.setChannels(Collections.singletonList(channel));
source.setChannelProcessor(new ChannelProcessor(rcs));
sink.setChannel(channel);
channel.start();
source.start();
SinkProcessor sinkProcessor = new DefaultSinkProcessor();
sinkProcessor.setSinks(Collections.singletonList(sink));
SinkRunner sinkRunner = new SinkRunner();
sinkRunner.setSink(sinkProcessor);
sinkRunner.start();
ScheduledExecutorService executor = Executors.newScheduledThreadPool(3);
executor.execute(new Runnable() {
@Override
public void run() {
int i = 0;
while (true) {
try {
source.process();
System.out.println(i++);
if (i == 250) {
System.out.println("No deadlock found after 250 iterations, exiting");
System.exit(0);
}
Thread.sleep((long) (Math.random() * 100 + 950));
} catch (Exception e) {
//
}
}
}
});
executor.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
ThreadMXBean bean = ManagementFactory.getThreadMXBean();
long[] threadIds = bean.findDeadlockedThreads();
if (threadIds != null) {
System.out.println("Deadlocked threads found");
printThreadStackTraces(threadIds);
System.exit(1);
}
}
}, 0, 1, TimeUnit.SECONDS);
}
private static void printThreadStackTraces(long[] threadIds) {
Set<Long> threadIdSet = new HashSet<>(Longs.asList(threadIds));
for (Thread th : Thread.getAllStackTraces().keySet()) {
if (threadIdSet.contains(th.getId())) {
System.out.println("Thread: " + th);
for (StackTraceElement e : th.getStackTrace()) {
System.out.println("\t" + e);
}
System.out.println("-----------------------------");
}
}
}
} | 9,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.