index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume/node/ConfigurationSourceFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.flume.node;
import java.net.URI;
import java.util.List;
import java.util.ServiceLoader;
import org.apache.flume.node.net.AuthorizationProvider;
/**
* Creates ConfigurationSources.
*/
public interface ConfigurationSourceFactory {
static ConfigurationSource getConfigurationSource(URI uri,
AuthorizationProvider authorizationProvider, boolean verifyHost) {
String protocol = uri.getScheme();
final ServiceLoader<ConfigurationSourceFactory> serviceLoader =
ServiceLoader.load(ConfigurationSourceFactory.class,
ConfigurationSourceFactory.class.getClassLoader());
for (final ConfigurationSourceFactory configurationSourceFactory : serviceLoader) {
if (configurationSourceFactory.getSchemes().contains(protocol)) {
return configurationSourceFactory.createConfigurationSource(uri, authorizationProvider,
verifyHost);
}
}
return null;
}
List<String> getSchemes();
ConfigurationSource createConfigurationSource(URI uri,
AuthorizationProvider authorizationProvider, boolean verifyHost);
}
| 9,600 |
0 | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume/node/ClasspathConfigurationSourceFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.flume.node;
import java.net.URI;
import java.util.List;
import org.apache.flume.node.net.AuthorizationProvider;
import com.google.common.collect.Lists;
/**
* Creates a ConfigurationSource from a file on the classpath..
*/
public class ClasspathConfigurationSourceFactory implements ConfigurationSourceFactory {
private static final List<String> SCHEMES = Lists.newArrayList("classpath");
public List<String> getSchemes() {
return SCHEMES;
}
public ConfigurationSource createConfigurationSource(URI uri,
AuthorizationProvider authorizationProvider, boolean verifyHost) {
return new ClasspathConfigurationSource(uri);
}
}
| 9,601 |
0 | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume/node/PropertiesFileConfigurationProvider.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.node;
import java.io.File;
import com.google.common.collect.Lists;
/**
* <p>
* A configuration provider that uses properties file for specifying
* configuration. The configuration files follow the Java properties file syntax
* rules specified at {@link java.util.Properties#load(java.io.Reader)}. Every
* configuration value specified in the properties file is prefixed by an
* <em>Agent Name</em> which helps isolate an individual agent's namespace.
* </p>
* <p>
* Valid configuration files must observe the following rules for every agent
* namespace.
* <ul>
* <li>For every <agent name> there must be three lists specified that
* include <tt><agent name>.sources</tt>,
* <tt><agent name>.sinks</tt>, and <tt><agent name>.channels</tt>.
* Each of these lists must contain a space separated list of names
* corresponding to that particular entity.</li>
* <li>For each source named in <tt><agent name>.sources</tt>, there must
* be a non-empty <tt>type</tt> attribute specified from the valid set of source
* types. For example:
* <tt><agent name>.sources.<source name>.type = event</tt></li>
* <li>For each source named in <tt><agent name>.sources</tt>, there must
* be a space-separated list of channel names that the source will associate
* with during runtime. Each of these names must be contained in the channels
* list specified by <tt><agent name>.channels</tt>. For example:
* <tt><agent name>.sources.<source name>.channels =
* <channel-1 name> <channel-2 name></tt></li>
* <li>For each source named in the <tt><agent name>.sources</tt>, there
* must be a <tt>runner</tt> namespace of configuration that configures the
* associated source runner. For example:
* <tt><agent name>.sources.<source name>.runner.type = avro</tt>.
* This namespace can also be used to configure other configuration of the
* source runner as needed. For example:
* <tt><agent name>.sources.<source name>.runner.port = 10101</tt>
* </li>
* <li>For each source named in <tt><sources>.sources</tt> there can
* be an optional <tt>selector.type</tt> specified that identifies the type
* of channel selector associated with the source. If not specified, the
* default replicating channel selector is used.
* </li><li>For each channel named in the <tt><agent name>.channels</tt>,
* there must be a non-empty <tt>type</tt> attribute specified from the valid
* set of channel types. For example:
* <tt><agent name>.channels.<channel name>.type = mem</tt></li>
* <li>For each sink named in the <tt><agent name>.sinks</tt>, there must
* be a non-empty <tt>type</tt> attribute specified from the valid set of sink
* types. For example:
* <tt><agent name>.sinks.<sink name>.type = hdfs</tt></li>
* <li>For each sink named in the <tt><agent name>.sinks</tt>, there must
* be a non-empty single-valued channel name specified as the value of the
* <tt>channel</tt> attribute. This value must be contained in the channels list
* specified by <tt><agent name>.channels</tt>. For example:
* <tt><agent name>.sinks.<sink name>.channel =
* <channel name></tt></li>
* <li>For each sink named in the <tt><agent name>.sinks</tt>, there must
* be a <tt>runner</tt> namespace of configuration that configures the
* associated sink runner. For example:
* <tt><agent name>.sinks.<sink name>.runner.type = polling</tt>.
* This namespace can also be used to configure other configuration of the sink
* runner as needed. For example:
* <tt><agent name>.sinks.<sink name>.runner.polling.interval =
* 60</tt></li>
* <li>A fourth optional list <tt><agent name>.sinkgroups</tt>
* may be added to each agent, consisting of unique space separated names
* for groups</li>
* <li>Each sinkgroup must specify sinks, containing a list of all sinks
* belonging to it. These cannot be shared by multiple groups.
* Further, one can set a processor and behavioral parameters to determine
* how sink selection is made via <tt><agent name>.sinkgroups.<
* group name<.processor</tt>. For further detail refer to individual processor
* documentation</li>
* <li>Sinks not assigned to a group will be assigned to default single sink
* groups.</li>
* </ul>
*
* Apart from the above required configuration values, each source, sink or
* channel can have its own set of arbitrary configuration as required by the
* implementation. Each of these configuration values are expressed by fully
* namespace qualified configuration keys. For example, the configuration
* property called <tt>capacity</tt> for a channel called <tt>ch1</tt> for the
* agent named <tt>host1</tt> with value <tt>1000</tt> will be expressed as:
* <tt>host1.channels.ch1.capacity = 1000</tt>.
* </p>
* <p>
* Any information contained in the configuration file other than what pertains
* to the configured agents, sources, sinks and channels via the explicitly
* enumerated list of sources, sinks and channels per agent name are ignored by
* this provider. Moreover, if any of the required configuration values are not
* present in the configuration file for the configured entities, that entity
* and anything that depends upon it is considered invalid and consequently not
* configured. For example, if a channel is missing its <tt>type</tt> attribute,
* it is considered misconfigured. Also, any sources or sinks that depend upon
* this channel are also considered misconfigured and not initialized.
* </p>
* <p>
* Example configuration file:
*
* <pre>
* #
* # Flume Configuration
* # This file contains configuration for one Agent identified as host1.
* #
*
* host1.sources = avroSource thriftSource
* host1.channels = jdbcChannel
* host1.sinks = hdfsSink
*
* # avroSource configuration
* host1.sources.avroSource.type = org.apache.flume.source.AvroSource
* host1.sources.avroSource.runner.type = avro
* host1.sources.avroSource.runner.port = 11001
* host1.sources.avroSource.channels = jdbcChannel
* host1.sources.avroSource.selector.type = replicating
*
* # thriftSource configuration
* host1.sources.thriftSource.type = org.apache.flume.source.ThriftSource
* host1.sources.thriftSource.runner.type = thrift
* host1.sources.thriftSource.runner.port = 12001
* host1.sources.thriftSource.channels = jdbcChannel
*
* # jdbcChannel configuration
* host1.channels.jdbcChannel.type = jdbc
* host1.channels.jdbcChannel.jdbc.driver = com.mysql.jdbc.Driver
* host1.channels.jdbcChannel.jdbc.connect.url = http://localhost/flumedb
* host1.channels.jdbcChannel.jdbc.username = flume
* host1.channels.jdbcChannel.jdbc.password = flume
*
* # hdfsSink configuration
* host1.sinks.hdfsSink.type = hdfs
* host1.sinks.hdfsSink.hdfs.path = hdfs://localhost/
* host1.sinks.hdfsSink.batchsize = 1000
* host1.sinks.hdfsSink.runner.type = polling
* host1.sinks.hdfsSink.runner.polling.interval = 60
* </pre>
*
* </p>
*
* @see java.util.Properties#load(java.io.Reader)
* @deprecated Use UriConfigurationProvider.
*/
@Deprecated
public class PropertiesFileConfigurationProvider extends UriConfigurationProvider {
public PropertiesFileConfigurationProvider(String agentName, File file) {
super(agentName, Lists.newArrayList(new FileConfigurationSource(file.toURI())), null, null, 0);
super.start();
}
}
| 9,602 |
0 | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume/node/ClasspathConfigurationSource.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.node;
import java.io.InputStream;
import java.net.URI;
import org.apache.commons.lang.StringUtils;
import org.apache.flume.conf.ConfigurationException;
public class ClasspathConfigurationSource implements ConfigurationSource {
private final String path;
private final URI uri;
public ClasspathConfigurationSource(URI uri) {
this.uri = uri;
if (StringUtils.isNotEmpty(uri.getPath())) {
// classpath:///filename && classpath:/filename
this.path = uri.getPath().substring(1);
} else if (StringUtils.isNotEmpty(uri.getAuthority())) {
// classpath://filename
this.path = uri.getAuthority();
} else if (StringUtils.isNotEmpty(uri.getSchemeSpecificPart())) {
// classpath:filename
this.path = uri.getSchemeSpecificPart();
} else {
throw new ConfigurationException("Invalid uri: " + uri);
}
}
@Override
public InputStream getInputStream() {
return this.getClass().getClassLoader().getResourceAsStream(path);
}
@Override
public String getUri() {
return this.uri.toString();
}
@Override
public String getExtension() {
int length = uri.getPath().indexOf(".");
if (length <= 1) {
return PROPERTIES;
}
return uri.getPath().substring(length + 1);
}
@Override
public String toString() {
return "{ classpath: " + path + "}";
}
}
| 9,603 |
0 | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume/node/EnvVarResolverProperties.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.node;
import com.google.common.base.Preconditions;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* A class that extends the Java built-in Properties overriding
* {@link java.util.Properties#getProperty(String)} to allow ${ENV_VAR_NAME}-style environment
* variable inclusions
* @deprecated Use ${env:key} instead.
*/
@Deprecated
public class EnvVarResolverProperties extends Properties {
private static final long serialVersionUID = -9134232469049352862L;
/**
* @param input The input string with ${ENV_VAR_NAME}-style environment variable names
* @return The output string with ${ENV_VAR_NAME} replaced with their environment variable values
*/
protected static String resolveEnvVars(String input) {
Preconditions.checkNotNull(input);
// match ${ENV_VAR_NAME}
Pattern p = Pattern.compile("\\$\\{(\\w+)\\}");
Matcher m = p.matcher(input);
StringBuffer sb = new StringBuffer();
while (m.find()) {
String envVarName = m.group(1);
String envVarValue = System.getenv(envVarName);
m.appendReplacement(sb, null == envVarValue ? "" : envVarValue);
}
m.appendTail(sb);
return sb.toString();
}
/**
* @param key the property key
* @return the value of the property key with ${ENV_VAR_NAME}-style environment variables replaced
*/
@Override
public String getProperty(String key) {
return resolveEnvVars(super.getProperty(key));
}
}
| 9,604 |
0 | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume/node | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume/node/net/LaxHostnameVerifier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.flume.node.net;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.SSLSession;
/**
* An HostnameVerifier which accepts everything.
*/
public final class LaxHostnameVerifier implements HostnameVerifier {
/**
* Singleton instance.
*/
public static final HostnameVerifier INSTANCE = new LaxHostnameVerifier();
private LaxHostnameVerifier() {
}
@Override
public boolean verify(final String s, final SSLSession sslSession) {
return true;
}
}
| 9,605 |
0 | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume/node | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume/node/net/UrlConnectionFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.flume.node.net;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLConnection;
import java.time.Instant;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import javax.net.ssl.HttpsURLConnection;
/**
* Constructs an HTTPURLConnection.
*/
public class UrlConnectionFactory {
private static int DEFAULT_TIMEOUT = 60000;
private static int connectTimeoutMillis = DEFAULT_TIMEOUT;
private static int readTimeoutMillis = DEFAULT_TIMEOUT;
private static final String XML = "application/xml";
private static final String YAML = "application/yaml";
private static final String JSON = "application/json";
private static final String PROPERTIES = "text/x-java-properties";
private static final String TEXT = "text/plain";
public static final String HTTP = "http";
public static final String HTTPS = "https";
public static HttpURLConnection createConnection(URL url,
AuthorizationProvider authorizationProvider, long lastModifiedMillis, boolean verifyHost)
throws IOException {
final HttpURLConnection urlConnection = (HttpURLConnection) url.openConnection();
if (HTTPS.equals(url.getProtocol()) && !verifyHost) {
((HttpsURLConnection) urlConnection).setHostnameVerifier(LaxHostnameVerifier.INSTANCE);
}
if (authorizationProvider != null) {
authorizationProvider.addAuthorization(urlConnection);
}
urlConnection.setAllowUserInteraction(false);
urlConnection.setDoOutput(true);
urlConnection.setDoInput(true);
urlConnection.setRequestMethod("GET");
if (connectTimeoutMillis > 0) {
urlConnection.setConnectTimeout(connectTimeoutMillis);
}
if (readTimeoutMillis > 0) {
urlConnection.setReadTimeout(readTimeoutMillis);
}
urlConnection.setRequestProperty("Content-Type", getContentType(url));
if (lastModifiedMillis > 0) {
ZonedDateTime zdt = Instant.ofEpochMilli(lastModifiedMillis).atZone(ZoneOffset.UTC);
String lastModified = DateTimeFormatter.RFC_1123_DATE_TIME.format(zdt);
urlConnection.setRequestProperty("If-Modified-Since", lastModified);
}
return urlConnection;
}
public static URLConnection createConnection(URL url) throws IOException {
return createConnection(url, null, 0, true);
}
public static URLConnection createConnection(URL url, AuthorizationProvider authorizationProvider)
throws IOException {
URLConnection urlConnection = null;
if (url.getProtocol().equals(HTTPS) || url.getProtocol().equals(HTTP)) {
urlConnection = createConnection(url, authorizationProvider, 0, true);
} else {
urlConnection = url.openConnection();
}
return urlConnection;
}
private static String getContentType(URL url) {
String[] fileParts = url.getFile().split("\\.");
String type = fileParts[fileParts.length - 1].trim();
switch (type) {
case "properties": {
return PROPERTIES;
}
case "json": {
return JSON;
}
case "yaml": case "yml": {
return YAML;
}
case "xml": {
return XML;
}
default: {
return TEXT;
}
}
}
}
| 9,606 |
0 | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume/node | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume/node/net/BasicAuthorizationProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.flume.node.net;
import java.net.URLConnection;
import java.nio.charset.StandardCharsets;
import java.util.Base64;
/**
* Provides the Basic Authorization header to a request.
*/
public class BasicAuthorizationProvider implements AuthorizationProvider {
private static final Base64.Encoder encoder = Base64.getEncoder();
private String authString = null;
public BasicAuthorizationProvider(String userName, String password) {
if (userName != null && password != null) {
String toEncode = userName + ":" + password;
authString = "Basic " + encoder.encodeToString(toEncode.getBytes(StandardCharsets.UTF_8));
}
}
@Override
public void addAuthorization(URLConnection urlConnection) {
if (authString != null) {
urlConnection.setRequestProperty("Authorization", authString);
}
}
}
| 9,607 |
0 | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume/node | Create_ds/flume/flume-ng-node/src/main/java/org/apache/flume/node/net/AuthorizationProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.flume.node.net;
import java.net.URLConnection;
/**
* Interface to be implemented to add an Authorization header to an HTTP request.
*/
public interface AuthorizationProvider {
void addAuthorization(URLConnection urlConnection);
}
| 9,608 |
0 | Create_ds/flume/flume-ng-sources/flume-jms-source/src/test/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-jms-source/src/test/java/org/apache/flume/source/jms/TestJMSMessageConsumer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.source.jms;
import static org.junit.Assert.*;
import static org.mockito.Matchers.*;
import static org.mockito.Mockito.*;
import java.util.List;
import javax.jms.Destination;
import javax.jms.JMSException;
import javax.jms.Message;
import javax.jms.Session;
import javax.jms.Topic;
import javax.jms.TopicSubscriber;
import org.apache.flume.Event;
import org.apache.flume.FlumeException;
import org.junit.Test;
import com.google.common.base.Optional;
public class TestJMSMessageConsumer extends JMSMessageConsumerTestBase {
@Test(expected = FlumeException.class)
public void testCreateConnectionFails() throws Exception {
when(connectionFactory.createConnection(USERNAME, PASSWORD))
.thenThrow(new JMSException(""));
create();
}
@Test
public void testCreateSessionFails() throws Exception {
when(connection.createSession(true, Session.SESSION_TRANSACTED))
.thenThrow(new JMSException(""));
try {
create();
fail("Expected exception: org.apache.flume.FlumeException");
} catch (FlumeException e) {
verify(connection).close();
}
}
@Test
public void testCreateQueueFails() throws Exception {
when(session.createQueue(destinationName))
.thenThrow(new JMSException(""));
try {
create();
fail("Expected exception: org.apache.flume.FlumeException");
} catch (FlumeException e) {
verify(session).close();
verify(connection).close();
}
}
@Test
public void testCreateTopicFails() throws Exception {
destinationType = JMSDestinationType.TOPIC;
when(session.createTopic(destinationName))
.thenThrow(new JMSException(""));
try {
create();
fail("Expected exception: org.apache.flume.FlumeException");
} catch (FlumeException e) {
verify(session).close();
verify(connection).close();
}
}
@Test
public void testCreateConsumerFails() throws Exception {
when(session.createConsumer(any(Destination.class), anyString()))
.thenThrow(new JMSException(""));
try {
create();
fail("Expected exception: org.apache.flume.FlumeException");
} catch (FlumeException e) {
verify(session).close();
verify(connection).close();
}
}
@Test(expected = IllegalArgumentException.class)
public void testInvalidDestination() throws Exception {
create(null, JMSDestinationLocator.JNDI, "ldap://localhost:389/test");
}
@Test(expected = IllegalArgumentException.class)
public void testInvalidBatchSizeZero() throws Exception {
batchSize = 0;
create();
}
@Test(expected = IllegalArgumentException.class)
public void testInvalidPollTime() throws Exception {
pollTimeout = -1L;
create();
}
@Test(expected = IllegalArgumentException.class)
public void testInvalidBatchSizeNegative() throws Exception {
batchSize = -1;
create();
}
@Test
public void testQueue() throws Exception {
destinationType = JMSDestinationType.QUEUE;
when(session.createQueue(destinationName)).thenReturn(queue);
consumer = create();
List<Event> events = consumer.take();
assertEquals(batchSize, events.size());
assertBodyIsExpected(events);
verify(session, never()).createTopic(anyString());
}
@Test
public void testTopic() throws Exception {
destinationType = JMSDestinationType.TOPIC;
when(session.createTopic(destinationName)).thenReturn(topic);
consumer = create();
List<Event> events = consumer.take();
assertEquals(batchSize, events.size());
assertBodyIsExpected(events);
verify(session, never()).createQueue(anyString());
}
@Test
public void testUserPass() throws Exception {
consumer = create();
List<Event> events = consumer.take();
assertEquals(batchSize, events.size());
assertBodyIsExpected(events);
}
@Test
public void testNoUserPass() throws Exception {
userName = Optional.absent();
when(connectionFactory.createConnection(USERNAME, PASSWORD)).thenThrow(new AssertionError());
when(connectionFactory.createConnection()).thenReturn(connection);
consumer = create();
List<Event> events = consumer.take();
assertEquals(batchSize, events.size());
assertBodyIsExpected(events);
}
@Test
public void testNoEvents() throws Exception {
when(messageConsumer.receive(anyLong())).thenReturn(null);
consumer = create();
List<Event> events = consumer.take();
assertEquals(0, events.size());
verify(messageConsumer, times(1)).receive(anyLong());
verifyNoMoreInteractions(messageConsumer);
}
@Test
public void testSingleEvent() throws Exception {
when(messageConsumer.receiveNoWait()).thenReturn(null);
consumer = create();
List<Event> events = consumer.take();
assertEquals(1, events.size());
assertBodyIsExpected(events);
}
@Test
public void testPartialBatch() throws Exception {
when(messageConsumer.receiveNoWait()).thenReturn(message, (Message)null);
consumer = create();
List<Event> events = consumer.take();
assertEquals(2, events.size());
assertBodyIsExpected(events);
}
@Test
public void testCommit() throws Exception {
consumer = create();
consumer.commit();
verify(session, times(1)).commit();
}
@Test
public void testRollback() throws Exception {
consumer = create();
consumer.rollback();
verify(session, times(1)).rollback();
}
@Test
public void testClose() throws Exception {
doThrow(new JMSException("")).when(session).close();
consumer = create();
consumer.close();
verify(session, times(1)).close();
verify(connection, times(1)).close();
}
@Test
public void testCreateDurableSubscription() throws Exception {
String name = "SUBSCRIPTION_NAME";
String clientID = "CLIENT_ID";
TopicSubscriber mockTopicSubscriber = mock(TopicSubscriber.class);
when(session.createDurableSubscriber(any(Topic.class), anyString(), anyString(), anyBoolean()))
.thenReturn(mockTopicSubscriber );
when(session.createTopic(destinationName)).thenReturn(topic);
new JMSMessageConsumer(WONT_USE, connectionFactory, destinationName, destinationLocator,
JMSDestinationType.TOPIC, messageSelector, batchSize, pollTimeout, converter, userName,
password, Optional.of(clientID), true, name);
verify(connection, times(1)).setClientID(clientID);
verify(session, times(1)).createDurableSubscriber(topic, name, messageSelector, true);
}
@Test(expected = JMSException.class)
public void testTakeFailsDueToJMSExceptionFromReceive() throws JMSException {
when(messageConsumer.receive(anyLong())).thenThrow(new JMSException(""));
consumer = create();
consumer.take();
}
@Test(expected = JMSException.class)
public void testTakeFailsDueToRuntimeExceptionFromReceive() throws JMSException {
when(messageConsumer.receive(anyLong())).thenThrow(new RuntimeException());
consumer = create();
consumer.take();
}
@Test(expected = JMSException.class)
public void testTakeFailsDueToJMSExceptionFromReceiveNoWait() throws JMSException {
when(messageConsumer.receiveNoWait()).thenThrow(new JMSException(""));
consumer = create();
consumer.take();
}
@Test(expected = JMSException.class)
public void testTakeFailsDueToRuntimeExceptionFromReceiveNoWait() throws JMSException {
when(messageConsumer.receiveNoWait()).thenThrow(new RuntimeException());
consumer = create();
consumer.take();
}
@Test
public void testCommitFailsDueToJMSException() throws JMSException {
doThrow(new JMSException("")).when(session).commit();
consumer = create();
consumer.commit();
}
@Test
public void testCommitFailsDueToRuntimeException() throws JMSException {
doThrow(new RuntimeException()).when(session).commit();
consumer = create();
consumer.commit();
}
@Test
public void testRollbackFailsDueToJMSException() throws JMSException {
doThrow(new JMSException("")).when(session).rollback();
consumer = create();
consumer.rollback();
}
@Test
public void testRollbackFailsDueToRuntimeException() throws JMSException {
doThrow(new RuntimeException()).when(session).rollback();
consumer = create();
consumer.rollback();
}
}
| 9,609 |
0 | Create_ds/flume/flume-ng-sources/flume-jms-source/src/test/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-jms-source/src/test/java/org/apache/flume/source/jms/TestDefaultJMSMessageConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.source.jms;
import static org.junit.Assert.*;
import static org.mockito.Matchers.*;
import static org.mockito.Mockito.*;
import java.io.ByteArrayOutputStream;
import java.io.ObjectOutput;
import java.io.ObjectOutputStream;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.Map;
import javax.jms.BytesMessage;
import javax.jms.JMSException;
import javax.jms.Message;
import javax.jms.ObjectMessage;
import javax.jms.TextMessage;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.junit.Before;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.base.Charsets;
import com.google.common.collect.Maps;
public class TestDefaultJMSMessageConverter {
private static final String TEXT = "text";
private static final byte[] BYTES = TEXT.getBytes(Charsets.UTF_8);
private Context context;
private Message message;
private Map<String, String> headers;
private JMSMessageConverter converter;
@Before
public void setUp() throws Exception {
headers = Maps.newHashMap();
context = new Context();
converter = new DefaultJMSMessageConverter.Builder().build(context);
}
void createTextMessage() throws Exception {
TextMessage message = mock(TextMessage.class);
when(message.getText()).thenReturn(TEXT);
this.message = message;
}
void createNullTextMessage() throws Exception {
TextMessage message = mock(TextMessage.class);
when(message.getText()).thenReturn(null);
this.message = message;
}
void createBytesMessage() throws Exception {
BytesMessage message = mock(BytesMessage.class);
when(message.getBodyLength()).thenReturn((long)BYTES.length);
when(message.readBytes(any(byte[].class))).then(new Answer<Integer>() {
@Override
public Integer answer(InvocationOnMock invocation) throws Throwable {
byte[] buffer = (byte[])invocation.getArguments()[0];
if (buffer != null) {
assertEquals(buffer.length, BYTES.length);
System.arraycopy(BYTES, 0, buffer, 0, BYTES.length);
}
return BYTES.length;
}
});
this.message = message;
}
void createObjectMessage() throws Exception {
ObjectMessage message = mock(ObjectMessage.class);
when(message.getObject()).thenReturn(TEXT);
this.message = message;
}
void createHeaders() throws Exception {
final Iterator<String> keys = headers.keySet().iterator();
when(message.getPropertyNames()).thenReturn(new Enumeration<Object>() {
@Override
public boolean hasMoreElements() {
return keys.hasNext();
}
@Override
public Object nextElement() {
return keys.next();
}
});
when(message.getStringProperty(anyString())).then(new Answer<String>() {
@Override
public String answer(InvocationOnMock invocation) throws Throwable {
return headers.get(invocation.getArguments()[0]);
}
});
}
@Test
public void testTextMessage() throws Exception {
createTextMessage();
headers.put("key1", "value1");
headers.put("key2", "value2");
createHeaders();
Event event = converter.convert(message).iterator().next();
assertEquals(headers, event.getHeaders());
assertEquals(TEXT, new String(event.getBody(), Charsets.UTF_8));
}
@Test
public void testNullTextMessage() throws Exception {
createNullTextMessage();
headers.put("key1", "value1");
headers.put("key2", "value2");
createHeaders();
Event event = converter.convert(message).iterator().next();
assertEquals(headers, event.getHeaders());
// In case of a null text message, the event's body will be empty due to
// SimpleEvent's body not updated with a valid text message.
assertEquals(event.getBody().length, 0);
}
@Test
public void testBytesMessage() throws Exception {
createBytesMessage();
headers.put("key1", "value1");
headers.put("key2", "value2");
createHeaders();
Event event = converter.convert(message).iterator().next();
assertEquals(headers, event.getHeaders());
assertArrayEquals(BYTES, event.getBody());
}
@Test(expected = JMSException.class)
public void testBytesMessageTooLarge() throws Exception {
createBytesMessage();
when(((BytesMessage)message).getBodyLength()).thenReturn(Long.MAX_VALUE);
createHeaders();
converter.convert(message);
}
@Test(expected = JMSException.class)
public void testBytesMessagePartialReturn() throws Exception {
createBytesMessage();
when(((BytesMessage)message).readBytes(any(byte[].class)))
.thenReturn(BYTES.length + 1);
createHeaders();
converter.convert(message);
}
@Test
public void testObjectMessage() throws Exception {
createObjectMessage();
headers.put("key1", "value1");
headers.put("key2", "value2");
createHeaders();
Event event = converter.convert(message).iterator().next();
assertEquals(headers, event.getHeaders());
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ObjectOutput out = new ObjectOutputStream(bos);
out.writeObject(TEXT);
assertArrayEquals(bos.toByteArray(), event.getBody());
}
@Test
public void testNoHeaders() throws Exception {
createTextMessage();
createHeaders();
Event event = converter.convert(message).iterator().next();
assertEquals(Collections.EMPTY_MAP, event.getHeaders());
assertEquals(TEXT, new String(event.getBody(), Charsets.UTF_8));
}
}
| 9,610 |
0 | Create_ds/flume/flume-ng-sources/flume-jms-source/src/test/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-jms-source/src/test/java/org/apache/flume/source/jms/JMSMessageConsumerTestBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.source.jms;
import static org.junit.Assert.*;
import static org.mockito.Matchers.*;
import static org.mockito.Mockito.*;
import java.util.Enumeration;
import java.util.List;
import javax.jms.Connection;
import javax.jms.ConnectionFactory;
import javax.jms.Destination;
import javax.jms.MessageConsumer;
import javax.jms.Queue;
import javax.jms.Session;
import javax.jms.TextMessage;
import javax.jms.Topic;
import javax.naming.InitialContext;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.junit.After;
import org.junit.Before;
import com.google.common.base.Charsets;
import com.google.common.base.Optional;
public abstract class JMSMessageConsumerTestBase {
static final String USERNAME = "userName";
static final String PASSWORD = "password";
static final String DESTINATION_NAME = "destinationName";
static final String SELECTOR = "selector";
static final String TEXT = "text";
static final InitialContext WONT_USE = null;
Context context;
JMSMessageConsumer consumer;
ConnectionFactory connectionFactory;
String destinationName;
JMSDestinationType destinationType;
JMSDestinationLocator destinationLocator;
String messageSelector;
int batchSize;
long pollTimeout;
JMSMessageConverter converter;
Optional<String> userName;
Optional<String> password;
Connection connection;
Session session;
Queue queue;
Topic topic;
MessageConsumer messageConsumer;
TextMessage message;
Event event;
@Before
public void setup() throws Exception {
beforeSetup();
connectionFactory = mock(ConnectionFactory.class);
connection = mock(Connection.class);
session = mock(Session.class);
queue = mock(Queue.class);
topic = mock(Topic.class);
messageConsumer = mock(MessageConsumer.class);
message = mock(TextMessage.class);
when(message.getPropertyNames()).thenReturn(new Enumeration<Object>() {
@Override
public boolean hasMoreElements() {
return false;
}
@Override
public Object nextElement() {
throw new UnsupportedOperationException();
}
});
when(message.getText()).thenReturn(TEXT);
when(connectionFactory.createConnection(USERNAME, PASSWORD)).thenReturn(connection);
when(connection.createSession(true, Session.SESSION_TRANSACTED)).thenReturn(session);
when(session.createQueue(destinationName)).thenReturn(queue);
when(session.createConsumer(any(Destination.class), anyString())).thenReturn(messageConsumer);
when(messageConsumer.receiveNoWait()).thenReturn(message);
when(messageConsumer.receive(anyLong())).thenReturn(message);
destinationName = DESTINATION_NAME;
destinationType = JMSDestinationType.QUEUE;
destinationLocator = JMSDestinationLocator.CDI;
messageSelector = SELECTOR;
batchSize = 10;
pollTimeout = 500L;
context = new Context();
converter = new DefaultJMSMessageConverter.Builder().build(context);
event = converter.convert(message).iterator().next();
userName = Optional.of(USERNAME);
password = Optional.of(PASSWORD);
afterSetup();
}
void beforeSetup() throws Exception {
}
void afterSetup() throws Exception {
}
void beforeTearDown() throws Exception {
}
void afterTearDown() throws Exception {
}
void assertBodyIsExpected(List<Event> events) {
for (Event event : events) {
assertEquals(TEXT, new String(event.getBody(), Charsets.UTF_8));
}
}
JMSMessageConsumer create(JMSDestinationType destinationType,
JMSDestinationLocator destinationLocator, String destinationName) {
return new JMSMessageConsumer(WONT_USE, connectionFactory, destinationName,
destinationLocator, destinationType, messageSelector, batchSize,
pollTimeout, converter, userName, password, Optional.<String>absent(), false, "");
}
JMSMessageConsumer create() {
return create(this.destinationType, this.destinationLocator, this.destinationName);
}
@After
public void tearDown() throws Exception {
beforeTearDown();
if (consumer != null) {
consumer.close();
}
afterTearDown();
}
}
| 9,611 |
0 | Create_ds/flume/flume-ng-sources/flume-jms-source/src/test/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-jms-source/src/test/java/org/apache/flume/source/jms/TestJMSSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.source.jms;
import static org.mockito.Matchers.*;
import static org.mockito.Mockito.*;
import java.io.File;
import java.util.List;
import java.util.Properties;
import java.util.UUID;
import javax.jms.JMSException;
import javax.jms.Message;
import javax.naming.InitialContext;
import javax.naming.NamingException;
import junit.framework.Assert;
import org.apache.commons.io.FileUtils;
import org.apache.flume.ChannelException;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.apache.flume.PollableSource.Status;
import org.apache.flume.channel.ChannelProcessor;
import org.apache.flume.conf.Configurable;
import org.apache.flume.instrumentation.SourceCounter;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
public class TestJMSSource extends JMSMessageConsumerTestBase {
private JMSSource source;
private Context context;
private InitialContext initialContext;
private ChannelProcessor channelProcessor;
private List<Event> events;
private InitialContextFactory contextFactory;
private File baseDir;
private File passwordFile;
@SuppressWarnings("unchecked")
@Override
void afterSetup() throws Exception {
System.setProperty(JMSSource.JNDI_ALLOWED_PROTOCOLS, "dummy");
baseDir = Files.createTempDir();
passwordFile = new File(baseDir, "password");
Assert.assertTrue(passwordFile.createNewFile());
initialContext = mock(InitialContext.class);
channelProcessor = mock(ChannelProcessor.class);
events = Lists.newArrayList();
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
events.addAll((List<Event>)invocation.getArguments()[0]);
return null;
}
}).when(channelProcessor).processEventBatch(any(List.class));
consumer = spy(create());
when(initialContext.lookup(anyString())).thenReturn(connectionFactory);
contextFactory = mock(InitialContextFactory.class);
when(contextFactory.create(any(Properties.class))).thenReturn(initialContext);
source = spy(new JMSSource(contextFactory));
doReturn(consumer).when(source).createConsumer();
source.setName("JMSSource-" + UUID.randomUUID());
source.setChannelProcessor(channelProcessor);
context = new Context();
context.put(JMSSourceConfiguration.BATCH_SIZE, String.valueOf(batchSize));
context.put(JMSSourceConfiguration.DESTINATION_NAME, "INBOUND");
context.put(JMSSourceConfiguration.DESTINATION_TYPE,
JMSSourceConfiguration.DESTINATION_TYPE_QUEUE);
context.put(JMSSourceConfiguration.PROVIDER_URL, "dummy:1414");
context.put(JMSSourceConfiguration.INITIAL_CONTEXT_FACTORY, "ldap://dummy:389");
}
@Override
void afterTearDown() throws Exception {
FileUtils.deleteDirectory(baseDir);
}
@Test
public void testStop() throws Exception {
source.configure(context);
source.start();
source.stop();
verify(consumer).close();
}
@Test(expected = IllegalArgumentException.class)
public void testConfigureWithoutInitialContextFactory() throws Exception {
context.put(JMSSourceConfiguration.INITIAL_CONTEXT_FACTORY, "");
source.configure(context);
}
@Test(expected = IllegalArgumentException.class)
public void testConfigureWithoutProviderURL() throws Exception {
context.put(JMSSourceConfiguration.PROVIDER_URL, "");
source.configure(context);
}
@Test(expected = IllegalArgumentException.class)
public void testConfigureWithoutDestinationName() throws Exception {
context.put(JMSSourceConfiguration.DESTINATION_NAME, "");
source.configure(context);
}
@Test(expected = IllegalArgumentException.class)
public void testConfigureWithConnectionFactory() throws Exception {
context.put(JMSSourceConfiguration.CONNECTION_FACTORY,
"ldap://localhost:319/connectionFactory");
source.configure(context);
}
@Test(expected = FlumeException.class)
public void testConfigureWithBadDestinationType() throws Exception {
context.put(JMSSourceConfiguration.DESTINATION_TYPE, "DUMMY");
source.configure(context);
}
@Test(expected = IllegalArgumentException.class)
public void testConfigureWithEmptyDestinationType() throws Exception {
context.put(JMSSourceConfiguration.DESTINATION_TYPE, "");
source.configure(context);
}
@Test(expected = IllegalArgumentException.class)
public void testConfigureWithLdapProvider() throws Exception {
context.put(JMSSourceConfiguration.PROVIDER_URL, "ldap://localhost:389/test");
source.configure(context);
}
@Test
public void testStartConsumerCreateThrowsException() throws Exception {
doThrow(new RuntimeException("Expected")).when(source).createConsumer();
source.configure(context);
source.start();
try {
source.process();
Assert.fail();
} catch (FlumeException expected) {
}
}
@Test(expected = FlumeException.class)
public void testConfigureWithContextLookupThrowsException() throws Exception {
when(initialContext.lookup(anyString())).thenThrow(new NamingException());
source.configure(context);
}
@Test(expected = FlumeException.class)
public void testConfigureWithContextCreateThrowsException() throws Exception {
when(contextFactory.create(any(Properties.class)))
.thenThrow(new NamingException());
source.configure(context);
}
@Test(expected = IllegalArgumentException.class)
public void testConfigureWithInvalidBatchSize() throws Exception {
context.put(JMSSourceConfiguration.BATCH_SIZE, "0");
source.configure(context);
}
@Test(expected = FlumeException.class)
public void testConfigureWithInvalidPasswordFile() throws Exception {
context.put(JMSSourceConfiguration.PASSWORD_FILE,
"/dev/does/not/exist/nor/will/ever/exist");
source.configure(context);
}
@Test
public void testConfigureWithUserNameButNoPasswordFile() throws Exception {
context.put(JMSSourceConfiguration.USERNAME, "dummy");
source.configure(context);
source.start();
Assert.assertEquals(Status.READY, source.process());
Assert.assertEquals(batchSize, events.size());
assertBodyIsExpected(events);
}
@Test
public void testConfigureWithUserNameAndPasswordFile() throws Exception {
context.put(JMSSourceConfiguration.USERNAME, "dummy");
context.put(JMSSourceConfiguration.PASSWORD_FILE,
passwordFile.getAbsolutePath());
source.configure(context);
source.start();
Assert.assertEquals(Status.READY, source.process());
Assert.assertEquals(batchSize, events.size());
assertBodyIsExpected(events);
}
@Test(expected = FlumeException.class)
public void testConfigureWithInvalidConverterClass() throws Exception {
context.put(JMSSourceConfiguration.CONVERTER_TYPE, "not a valid classname");
source.configure(context);
}
@Test
public void testProcessNoStart() throws Exception {
try {
source.process();
Assert.fail();
} catch (EventDeliveryException expected) {
}
}
@Test
public void testNonDefaultConverter() throws Exception {
// tests that a classname can be specified
context.put(JMSSourceConfiguration.CONVERTER_TYPE,
DefaultJMSMessageConverter.Builder.class.getName());
source.configure(context);
source.start();
Assert.assertEquals(Status.READY, source.process());
Assert.assertEquals(batchSize, events.size());
assertBodyIsExpected(events);
verify(consumer).commit();
}
public static class NonBuilderNonConfigurableConverter implements JMSMessageConverter {
@Override
public List<Event> convert(Message message) throws JMSException {
throw new UnsupportedOperationException();
}
}
public static class NonBuilderConfigurableConverter implements JMSMessageConverter, Configurable {
@Override
public List<Event> convert(Message message) throws JMSException {
throw new UnsupportedOperationException();
}
@Override
public void configure(Context context) {
}
}
@Test
public void testNonBuilderConfigurableConverter() throws Exception {
// tests that a non builder by configurable converter works
context.put(JMSSourceConfiguration.CONVERTER_TYPE,
NonBuilderConfigurableConverter.class.getName());
source.configure(context);
source.start();
Assert.assertEquals(Status.READY, source.process());
Assert.assertEquals(batchSize, events.size());
assertBodyIsExpected(events);
verify(consumer).commit();
}
@Test
public void testNonBuilderNonConfigurableConverter() throws Exception {
// tests that a non builder non configurable converter
context.put(JMSSourceConfiguration.CONVERTER_TYPE,
NonBuilderNonConfigurableConverter.class.getName());
source.configure(context);
source.start();
Assert.assertEquals(Status.READY, source.process());
Assert.assertEquals(batchSize, events.size());
assertBodyIsExpected(events);
verify(consumer).commit();
}
@Test
public void testProcessFullBatch() throws Exception {
source.configure(context);
source.start();
Assert.assertEquals(Status.READY, source.process());
Assert.assertEquals(batchSize, events.size());
assertBodyIsExpected(events);
verify(consumer).commit();
}
@Test
public void testProcessNoEvents() throws Exception {
when(messageConsumer.receive(anyLong())).thenReturn(null);
source.configure(context);
source.start();
Assert.assertEquals(Status.BACKOFF, source.process());
Assert.assertEquals(0, events.size());
verify(consumer).commit();
}
@Test
public void testProcessPartialBatch() throws Exception {
when(messageConsumer.receiveNoWait()).thenReturn(message, (Message)null);
source.configure(context);
source.start();
Assert.assertEquals(Status.READY, source.process());
Assert.assertEquals(2, events.size());
assertBodyIsExpected(events);
verify(consumer).commit();
}
@SuppressWarnings("unchecked")
@Test
public void testProcessChannelProcessorThrowsChannelException() throws Exception {
doThrow(new ChannelException("dummy"))
.when(channelProcessor).processEventBatch(any(List.class));
source.configure(context);
source.start();
Assert.assertEquals(Status.BACKOFF, source.process());
verify(consumer).rollback();
}
@SuppressWarnings("unchecked")
@Test
public void testProcessChannelProcessorThrowsError() throws Exception {
doThrow(new Error())
.when(channelProcessor).processEventBatch(any(List.class));
source.configure(context);
source.start();
try {
source.process();
Assert.fail();
} catch (Error ignores) {
}
verify(consumer).rollback();
}
@Test
public void testProcessReconnect() throws Exception {
source.configure(context);
source.start();
when(consumer.take()).thenThrow(new JMSException("dummy"));
int attempts = JMSSourceConfiguration.ERROR_THRESHOLD_DEFAULT;
for (int i = 0; i < attempts; i++) {
Assert.assertEquals(Status.BACKOFF, source.process());
}
Assert.assertEquals(Status.BACKOFF, source.process());
SourceCounter sc = (SourceCounter) Whitebox.getInternalState(source, "sourceCounter");
Assert.assertEquals(1, sc.getEventReadFail());
verify(consumer, times(attempts + 1)).rollback();
verify(consumer, times(1)).close();
}
@Test
public void testErrorCounterEventReadFail() throws Exception {
source.configure(context);
source.start();
when(consumer.take()).thenThrow(new RuntimeException("dummy"));
source.process();
SourceCounter sc = (SourceCounter) Whitebox.getInternalState(source, "sourceCounter");
Assert.assertEquals(1, sc.getEventReadFail());
}
@Test
public void testErrorCounterChannelWriteFail() throws Exception {
source.configure(context);
source.start();
when(source.getChannelProcessor()).thenThrow(new ChannelException("dummy"));
source.process();
SourceCounter sc = (SourceCounter) Whitebox.getInternalState(source, "sourceCounter");
Assert.assertEquals(1, sc.getChannelWriteFail());
}
} | 9,612 |
0 | Create_ds/flume/flume-ng-sources/flume-jms-source/src/test/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-jms-source/src/test/java/org/apache/flume/source/jms/TestJMSSourceCreation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.source.jms;
import org.apache.flume.FlumeException;
import org.apache.flume.Source;
import org.apache.flume.SourceFactory;
import org.apache.flume.source.DefaultSourceFactory;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestJMSSourceCreation {
private SourceFactory sourceFactory;
@Before
public void setUp() {
sourceFactory = new DefaultSourceFactory();
}
private void verifySourceCreation(String name, String type,
Class<?> typeClass) throws FlumeException {
Source src = sourceFactory.create(name, type);
Assert.assertNotNull(src);
Assert.assertTrue(typeClass.isInstance(src));
}
@Test
public void testJMSSourceCreation() {
verifySourceCreation("jms-src", "jms", JMSSource.class);
}
}
| 9,613 |
0 | Create_ds/flume/flume-ng-sources/flume-jms-source/src/test/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-jms-source/src/test/java/org/apache/flume/source/jms/TestIntegrationActiveMQ.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.source.jms;
import static org.mockito.Matchers.*;
import static org.mockito.Mockito.*;
import java.io.File;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.UUID;
import javax.jms.Connection;
import javax.jms.ConnectionFactory;
import javax.jms.Destination;
import javax.jms.MessageProducer;
import javax.jms.Session;
import javax.jms.TextMessage;
import junit.framework.Assert;
import org.apache.activemq.ActiveMQConnectionFactory;
import org.apache.activemq.broker.BrokerPlugin;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.security.AuthenticationUser;
import org.apache.activemq.security.SimpleAuthenticationPlugin;
import org.apache.commons.io.FileUtils;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.PollableSource.Status;
import org.apache.flume.channel.ChannelProcessor;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@RunWith(Parameterized.class)
public class TestIntegrationActiveMQ {
private static final Logger LOGGER = LoggerFactory.getLogger(TestIntegrationActiveMQ.class);
private static final String INITIAL_CONTEXT_FACTORY =
"org.apache.activemq.jndi.ActiveMQInitialContextFactory";
public static final String BROKER_BIND_URL = "tcp://localhost:61516";
private static final String DESTINATION_NAME = "test";
// specific for dynamic queues on ActiveMq
public static final String JNDI_PREFIX = "dynamicQueues/";
private enum TestMode {
WITH_AUTHENTICATION,
WITHOUT_AUTHENTICATION
}
private File baseDir;
private File tmpDir;
private File dataDir;
private BrokerService broker;
private Context context;
private JMSSource source;
private List<Event> events;
private final String jmsUserName;
private final String jmsPassword;
public TestIntegrationActiveMQ(TestMode testMode) {
System.setProperty(JMSSource.JNDI_ALLOWED_PROTOCOLS, "tcp");
LOGGER.info("Testing with test mode {}", testMode);
switch (testMode) {
case WITH_AUTHENTICATION:
jmsUserName = "user";
jmsPassword = "pass";
break;
case WITHOUT_AUTHENTICATION:
jmsUserName = null;
jmsPassword = null;
break;
default:
throw new IllegalArgumentException("Unhandled test mode: " + testMode);
}
}
@Parameterized.Parameters
public static Collection<Object[]> parameters() {
return Arrays.asList(new Object[][]{
{TestMode.WITH_AUTHENTICATION},
{TestMode.WITHOUT_AUTHENTICATION}
});
}
@SuppressWarnings("unchecked")
@Before
public void setup() throws Exception {
baseDir = Files.createTempDir();
tmpDir = new File(baseDir, "tmp");
dataDir = new File(baseDir, "data");
Assert.assertTrue(tmpDir.mkdir());
broker = new BrokerService();
broker.addConnector(BROKER_BIND_URL);
broker.setTmpDataDirectory(tmpDir);
broker.setDataDirectoryFile(dataDir);
context = new Context();
context.put(JMSSourceConfiguration.INITIAL_CONTEXT_FACTORY, INITIAL_CONTEXT_FACTORY);
context.put(JMSSourceConfiguration.PROVIDER_URL, BROKER_BIND_URL);
context.put(JMSSourceConfiguration.DESTINATION_NAME, DESTINATION_NAME);
if (jmsUserName != null) {
File passwordFile = new File(baseDir, "password");
Files.write(jmsPassword.getBytes(Charsets.UTF_8), passwordFile);
AuthenticationUser jmsUser = new AuthenticationUser(jmsUserName, jmsPassword, "");
List<AuthenticationUser> users = Collections.singletonList(jmsUser);
SimpleAuthenticationPlugin authentication = new SimpleAuthenticationPlugin(users);
broker.setPlugins(new BrokerPlugin[]{authentication});
context.put(JMSSourceConfiguration.USERNAME, jmsUserName);
context.put(JMSSourceConfiguration.PASSWORD_FILE, passwordFile.getAbsolutePath());
}
broker.start();
events = Lists.newArrayList();
source = new JMSSource();
source.setName("JMSSource-" + UUID.randomUUID());
ChannelProcessor channelProcessor = mock(ChannelProcessor.class);
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
events.addAll((List<Event>)invocation.getArguments()[0]);
return null;
}
}).when(channelProcessor).processEventBatch(any(List.class));
source.setChannelProcessor(channelProcessor);
}
@After
public void tearDown() throws Exception {
if (source != null) {
source.stop();
}
if (broker != null) {
broker.stop();
}
FileUtils.deleteDirectory(baseDir);
}
private void putQueue(List<String> events) throws Exception {
ConnectionFactory factory = new ActiveMQConnectionFactory(jmsUserName, jmsPassword,
BROKER_BIND_URL);
Connection connection = factory.createConnection();
connection.start();
Session session = connection.createSession(true,
Session.AUTO_ACKNOWLEDGE);
Destination destination = session.createQueue(DESTINATION_NAME);
MessageProducer producer = session.createProducer(destination);
for (String event : events) {
TextMessage message = session.createTextMessage();
message.setText(event);
producer.send(message);
}
session.commit();
session.close();
connection.close();
}
private void putTopic(List<String> events) throws Exception {
ConnectionFactory factory = new ActiveMQConnectionFactory(jmsUserName, jmsPassword,
BROKER_BIND_URL);
Connection connection = factory.createConnection();
connection.start();
Session session = connection.createSession(true,
Session.AUTO_ACKNOWLEDGE);
Destination destination = session.createTopic(DESTINATION_NAME);
MessageProducer producer = session.createProducer(destination);
for (String event : events) {
TextMessage message = session.createTextMessage();
message.setText(event);
producer.send(message);
}
session.commit();
session.close();
connection.close();
}
@Test
public void testQueueLocatedWithJndi() throws Exception {
context.put(JMSSourceConfiguration.DESTINATION_NAME,
JNDI_PREFIX + DESTINATION_NAME);
context.put(JMSSourceConfiguration.DESTINATION_LOCATOR,
JMSDestinationLocator.JNDI.name());
testQueue();
}
@Test
public void testQueue() throws Exception {
context.put(JMSSourceConfiguration.DESTINATION_TYPE,
JMSSourceConfiguration.DESTINATION_TYPE_QUEUE);
source.configure(context);
source.start();
Thread.sleep(500L);
List<String> expected = Lists.newArrayList();
for (int i = 0; i < 10; i++) {
expected.add(String.valueOf(i));
}
putQueue(expected);
Thread.sleep(500L);
Assert.assertEquals(Status.READY, source.process());
Assert.assertEquals(Status.BACKOFF, source.process());
Assert.assertEquals(expected.size(), events.size());
List<String> actual = Lists.newArrayList();
for (Event event : events) {
actual.add(new String(event.getBody(), Charsets.UTF_8));
}
Collections.sort(expected);
Collections.sort(actual);
Assert.assertEquals(expected, actual);
}
@Test
public void testTopic() throws Exception {
context.put(JMSSourceConfiguration.DESTINATION_TYPE,
JMSSourceConfiguration.DESTINATION_TYPE_TOPIC);
source.configure(context);
source.start();
Thread.sleep(500L);
List<String> expected = Lists.newArrayList();
for (int i = 0; i < 10; i++) {
expected.add(String.valueOf(i));
}
putTopic(expected);
Thread.sleep(500L);
Assert.assertEquals(Status.READY, source.process());
Assert.assertEquals(Status.BACKOFF, source.process());
Assert.assertEquals(expected.size(), events.size());
List<String> actual = Lists.newArrayList();
for (Event event : events) {
actual.add(new String(event.getBody(), Charsets.UTF_8));
}
Collections.sort(expected);
Collections.sort(actual);
Assert.assertEquals(expected, actual);
}
@Test
public void testDurableSubscription() throws Exception {
context.put(JMSSourceConfiguration.DESTINATION_TYPE,
JMSSourceConfiguration.DESTINATION_TYPE_TOPIC);
context.put(JMSSourceConfiguration.CLIENT_ID, "FLUME");
context.put(JMSSourceConfiguration.DURABLE_SUBSCRIPTION_NAME, "SOURCE1");
context.put(JMSSourceConfiguration.CREATE_DURABLE_SUBSCRIPTION, "true");
context.put(JMSSourceConfiguration.BATCH_SIZE, "10");
source.configure(context);
source.start();
Thread.sleep(5000L);
List<String> expected = Lists.newArrayList();
List<String> input = Lists.newArrayList();
for (int i = 0; i < 10; i++) {
input.add("before " + String.valueOf(i));
}
expected.addAll(input);
putTopic(input);
Thread.sleep(500L);
Assert.assertEquals(Status.READY, source.process());
Assert.assertEquals(Status.BACKOFF, source.process());
source.stop();
Thread.sleep(500L);
input = Lists.newArrayList();
for (int i = 0; i < 10; i++) {
input.add("during " + String.valueOf(i));
}
expected.addAll(input);
putTopic(input);
source.start();
Thread.sleep(500L);
input = Lists.newArrayList();
for (int i = 0; i < 10; i++) {
input.add("after " + String.valueOf(i));
}
expected.addAll(input);
putTopic(input);
Assert.assertEquals(Status.READY, source.process());
Assert.assertEquals(Status.READY, source.process());
Assert.assertEquals(Status.BACKOFF, source.process());
Assert.assertEquals(expected.size(), events.size());
List<String> actual = Lists.newArrayList();
for (Event event : events) {
actual.add(new String(event.getBody(), Charsets.UTF_8));
}
Collections.sort(expected);
Collections.sort(actual);
Assert.assertEquals(expected, actual);
}
} | 9,614 |
0 | Create_ds/flume/flume-ng-sources/flume-jms-source/src/main/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-jms-source/src/main/java/org/apache/flume/source/jms/JMSMessageConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.source.jms;
import java.util.List;
import javax.jms.JMSException;
import javax.jms.Message;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
/**
* Converts a JMS Message to an Event. It's recommended
* that sub-classes define a static sub-class of the
* inner Builder class to handle configuration. Alternatively,
* the sub-class can the Configurable interface.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface JMSMessageConverter {
List<Event> convert(Message message) throws JMSException;
/**
* Implementors of JMSMessageConverter must either provide
* a suitable builder or implement the Configurable interface.
*/
interface Builder {
JMSMessageConverter build(Context context);
}
}
| 9,615 |
0 | Create_ds/flume/flume-ng-sources/flume-jms-source/src/main/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-jms-source/src/main/java/org/apache/flume/source/jms/JMSMessageConsumer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.source.jms;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.flume.Event;
import org.apache.flume.FlumeException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.jms.Connection;
import javax.jms.ConnectionFactory;
import javax.jms.Destination;
import javax.jms.JMSException;
import javax.jms.Message;
import javax.jms.MessageConsumer;
import javax.jms.Session;
import javax.jms.Topic;
import javax.naming.InitialContext;
import javax.naming.NamingException;
import java.util.ArrayList;
import java.util.List;
class JMSMessageConsumer {
private static final Logger logger = LoggerFactory.getLogger(JMSMessageConsumer.class);
private final int batchSize;
private final long pollTimeout;
private final JMSMessageConverter messageConverter;
private final Connection connection;
private final Session session;
private final Destination destination;
private final MessageConsumer messageConsumer;
JMSMessageConsumer(InitialContext initialContext, ConnectionFactory connectionFactory,
String destinationName, JMSDestinationLocator destinationLocator,
JMSDestinationType destinationType, String messageSelector, int batchSize,
long pollTimeout, JMSMessageConverter messageConverter,
Optional<String> userName, Optional<String> password,
Optional<String> clientId, boolean createDurableSubscription,
String durableSubscriptionName) {
this.batchSize = batchSize;
this.pollTimeout = pollTimeout;
this.messageConverter = messageConverter;
Preconditions.checkArgument(batchSize > 0, "Batch size must be greater "
+ "than zero");
Preconditions.checkArgument(pollTimeout >= 0, "Poll timeout cannot be " +
"negative");
try {
try {
if (userName.isPresent() && password.isPresent()) {
connection = connectionFactory.createConnection(userName.get(), password.get());
} else {
connection = connectionFactory.createConnection();
}
if (clientId.isPresent()) {
connection.setClientID(clientId.get());
}
connection.start();
} catch (JMSException e) {
throw new FlumeException("Could not create connection to broker", e);
}
try {
session = connection.createSession(true, Session.SESSION_TRANSACTED);
} catch (JMSException e) {
throw new FlumeException("Could not create session", e);
}
try {
if (destinationLocator.equals(JMSDestinationLocator.CDI)) {
switch (destinationType) {
case QUEUE:
destination = session.createQueue(destinationName);
break;
case TOPIC:
destination = session.createTopic(destinationName);
break;
default:
throw new IllegalStateException(String.valueOf(destinationType));
}
} else {
JMSSource.verifyContext(destinationName);
destination = (Destination) initialContext.lookup(destinationName);
}
} catch (JMSException e) {
throw new FlumeException("Could not create destination " + destinationName, e);
} catch (NamingException e) {
throw new FlumeException("Could not find destination " + destinationName, e);
}
try {
if (createDurableSubscription) {
messageConsumer = session.createDurableSubscriber(
(Topic) destination, durableSubscriptionName,
messageSelector.isEmpty() ? null : messageSelector, true);
} else {
messageConsumer = session.createConsumer(destination,
messageSelector.isEmpty() ? null : messageSelector);
}
} catch (JMSException e) {
throw new FlumeException("Could not create consumer", e);
}
String startupMsg = String.format("Connected to '%s' of type '%s' with " +
"user '%s', batch size '%d', selector '%s' ", destinationName,
destinationType, userName.isPresent() ? userName.get() : "null",
batchSize, messageSelector.isEmpty() ? null : messageSelector);
logger.info(startupMsg);
} catch (Exception e) {
close();
throw e;
}
}
List<Event> take() throws JMSException {
List<Event> result = new ArrayList<Event>(batchSize);
Message message;
message = receive();
if (message != null) {
result.addAll(messageConverter.convert(message));
int max = batchSize - 1;
for (int i = 0; i < max; i++) {
message = receiveNoWait();
if (message == null) {
break;
}
result.addAll(messageConverter.convert(message));
}
}
if (logger.isDebugEnabled()) {
logger.debug(String.format("Took batch of %s from %s", result.size(), destination));
}
return result;
}
private Message receive() throws JMSException {
try {
return messageConsumer.receive(pollTimeout);
} catch (RuntimeException runtimeException) {
JMSException jmsException = new JMSException("JMS provider has thrown runtime exception: "
+ runtimeException.getMessage());
jmsException.setLinkedException(runtimeException);
throw jmsException;
}
}
private Message receiveNoWait() throws JMSException {
try {
return messageConsumer.receiveNoWait();
} catch (RuntimeException runtimeException) {
JMSException jmsException = new JMSException("JMS provider has thrown runtime exception: "
+ runtimeException.getMessage());
jmsException.setLinkedException(runtimeException);
throw jmsException;
}
}
void commit() {
try {
session.commit();
} catch (JMSException jmsException) {
logger.warn("JMS Exception processing commit", jmsException);
} catch (RuntimeException runtimeException) {
logger.warn("Runtime Exception processing commit", runtimeException);
}
}
void rollback() {
try {
session.rollback();
} catch (JMSException jmsException) {
logger.warn("JMS Exception processing rollback", jmsException);
} catch (RuntimeException runtimeException) {
logger.warn("Runtime Exception processing rollback", runtimeException);
}
}
void close() {
try {
if (session != null) {
session.close();
}
} catch (JMSException e) {
logger.error("Could not destroy session", e);
}
try {
if (connection != null) {
connection.close();
}
} catch (JMSException e) {
logger.error("Could not destroy connection", e);
}
}
}
| 9,616 |
0 | Create_ds/flume/flume-ng-sources/flume-jms-source/src/main/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-jms-source/src/main/java/org/apache/flume/source/jms/JMSSourceConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.source.jms;
public class JMSSourceConfiguration {
public static final String INITIAL_CONTEXT_FACTORY = "initialContextFactory";
public static final String CONNECTION_FACTORY = "connectionFactory";
public static final String CONNECTION_FACTORY_DEFAULT = "ConnectionFactory";
public static final String PROVIDER_URL = "providerURL";
public static final String DESTINATION_NAME = "destinationName";
public static final String DESTINATION_TYPE = "destinationType";
public static final String DESTINATION_LOCATOR = "destinationLocator";
public static final String DESTINATION_LOCATOR_DEFAULT = "CDI";
public static final String DESTINATION_TYPE_QUEUE = "queue";
public static final String DESTINATION_TYPE_TOPIC = "topic";
public static final String MESSAGE_SELECTOR = "messageSelector";
public static final String CLIENT_ID = "clientId";
public static final String USERNAME = "userName";
public static final String PASSWORD_FILE = "passwordFile";
public static final String BATCH_SIZE = "batchSize";
public static final int BATCH_SIZE_DEFAULT = 100;
public static final String ERROR_THRESHOLD = "errorThreshold";
public static final int ERROR_THRESHOLD_DEFAULT = 10;
public static final String POLL_TIMEOUT = "pollTimeout";
public static final long POLL_TIMEOUT_DEFAULT = 1000L;
public static final String CONVERTER = "converter";
public static final String CONVERTER_TYPE = CONVERTER + ".type";
public static final String CONVERTER_TYPE_DEFAULT = "DEFAULT";
public static final String CONVERTER_CHARSET = CONVERTER + ".charset";
public static final String CONVERTER_CHARSET_DEFAULT = "UTF-8";
public static final String CREATE_DURABLE_SUBSCRIPTION = "createDurableSubscription";
public static final boolean DEFAULT_CREATE_DURABLE_SUBSCRIPTION = false;
public static final String DURABLE_SUBSCRIPTION_NAME = "durableSubscriptionName";
public static final String DEFAULT_DURABLE_SUBSCRIPTION_NAME = "";
}
| 9,617 |
0 | Create_ds/flume/flume-ng-sources/flume-jms-source/src/main/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-jms-source/src/main/java/org/apache/flume/source/jms/DefaultJMSMessageConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.source.jms;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectOutput;
import java.io.ObjectOutputStream;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Map;
import javax.jms.BytesMessage;
import javax.jms.JMSException;
import javax.jms.Message;
import javax.jms.ObjectMessage;
import javax.jms.TextMessage;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.FlumeException;
import org.apache.flume.event.SimpleEvent;
/**
* <p>Converts BytesMessage, TextMessage, and ObjectMessage
* to a Flume Event. All Message Property names are added
* as headers to the Event. The conversion of the body is
* as follows:</p>
*
* <p><strong>BytesMessage:</strong> Body from message is
* set as the body of the Event.</p>
* <p><strong>TextMessage:</strong> String body converted to a byte
* array byte getBytes(charset). Charset defaults to UTF-8 but can be
* configured.</p>
* <p><strong>ObjectMessage:</strong> Object is written to
* an ByteArrayOutputStream wrapped by an ObjectOutputStream
* and the resulting byte array is the body of the message.</p>
*/
public class DefaultJMSMessageConverter implements JMSMessageConverter {
private final Charset charset;
private DefaultJMSMessageConverter(String charset) {
this.charset = Charset.forName(charset);
}
public static class Builder implements JMSMessageConverter.Builder {
@Override
public JMSMessageConverter build(Context context) {
return new DefaultJMSMessageConverter(context.getString(
JMSSourceConfiguration.CONVERTER_CHARSET,
JMSSourceConfiguration.CONVERTER_CHARSET_DEFAULT).trim());
}
}
@Override
public List<Event> convert(Message message) throws JMSException {
Event event = new SimpleEvent();
Map<String, String> headers = event.getHeaders();
@SuppressWarnings("rawtypes")
Enumeration propertyNames = message.getPropertyNames();
while (propertyNames.hasMoreElements()) {
String name = propertyNames.nextElement().toString();
String value = message.getStringProperty(name);
headers.put(name, value);
}
if (message instanceof BytesMessage) {
BytesMessage bytesMessage = (BytesMessage)message;
long length = bytesMessage.getBodyLength();
if (length > 0L) {
if (length > Integer.MAX_VALUE) {
throw new JMSException("Unable to process message " + "of size "
+ length);
}
byte[] body = new byte[(int)length];
int count = bytesMessage.readBytes(body);
if (count != length) {
throw new JMSException("Unable to read full message. " +
"Read " + count + " of total " + length);
}
event.setBody(body);
}
} else if (message instanceof TextMessage) {
TextMessage textMessage = (TextMessage)message;
String text = textMessage.getText();
if (text != null) {
event.setBody(text.getBytes(charset));
}
} else if (message instanceof ObjectMessage) {
ObjectMessage objectMessage = (ObjectMessage)message;
Object object = objectMessage.getObject();
if (object != null) {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ObjectOutput out = null;
try {
out = new ObjectOutputStream(bos);
out.writeObject(object);
event.setBody(bos.toByteArray());
} catch (IOException e) {
throw new FlumeException("Error serializing object", e);
} finally {
try {
if (out != null) {
out.close();
}
} catch (IOException e) {
throw new FlumeException("Error closing ObjectOutputStream", e);
}
try {
if (bos != null) {
bos.close();
}
} catch (IOException e) {
throw new FlumeException("Error closing ByteArrayOutputStream", e);
}
}
}
}
List<Event> events = new ArrayList<Event>(1);
events.add(event);
return events;
}
}
| 9,618 |
0 | Create_ds/flume/flume-ng-sources/flume-jms-source/src/main/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-jms-source/src/main/java/org/apache/flume/source/jms/JMSDestinationLocator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.source.jms;
public enum JMSDestinationLocator {
JNDI, CDI
} | 9,619 |
0 | Create_ds/flume/flume-ng-sources/flume-jms-source/src/main/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-jms-source/src/main/java/org/apache/flume/source/jms/JMSSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.source.jms;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Properties;
import javax.jms.ConnectionFactory;
import javax.jms.JMSException;
import javax.naming.InitialContext;
import javax.naming.NamingException;
import org.apache.commons.lang.StringUtils;
import org.apache.flume.ChannelException;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.apache.flume.conf.BatchSizeSupported;
import org.apache.flume.conf.Configurables;
import org.apache.flume.instrumentation.SourceCounter;
import org.apache.flume.source.AbstractPollableSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.io.Files;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JMSSource extends AbstractPollableSource implements BatchSizeSupported {
private static final Logger logger = LoggerFactory.getLogger(JMSSource.class);
private static final String JAVA_SCHEME = "java";
public static final String JNDI_ALLOWED_PROTOCOLS = "JndiAllowedProtocols";
// setup by constructor
private final InitialContextFactory initialContextFactory;
// setup by configuration
private ConnectionFactory connectionFactory;
private int batchSize;
private JMSMessageConverter converter;
private JMSMessageConsumer consumer;
private String initialContextFactoryName;
private String providerUrl;
private String destinationName;
private JMSDestinationType destinationType;
private JMSDestinationLocator destinationLocator;
private String messageSelector;
private Optional<String> userName;
private Optional<String> password;
private SourceCounter sourceCounter;
private int errorThreshold;
private long pollTimeout;
private Optional<String> clientId;
private boolean createDurableSubscription;
private String durableSubscriptionName;
private int jmsExceptionCounter;
private InitialContext initialContext;
private static List<String> allowedSchemes = getAllowedProtocols();
public JMSSource() {
this(new InitialContextFactory());
}
public JMSSource(InitialContextFactory initialContextFactory) {
super();
this.initialContextFactory = initialContextFactory;
}
private static List<String> getAllowedProtocols() {
String allowed = System.getProperty(JNDI_ALLOWED_PROTOCOLS, null);
if (allowed == null) {
return Collections.singletonList(JAVA_SCHEME);
} else {
String[] items = allowed.split(",");
List<String> schemes = new ArrayList<>();
schemes.add(JAVA_SCHEME);
for (String item : items) {
if (!item.equals(JAVA_SCHEME)) {
schemes.add(item.trim());
}
}
return schemes;
}
}
public static void verifyContext(String location) {
try {
String scheme = new URI(location).getScheme();
if (scheme != null && !allowedSchemes.contains(scheme)) {
throw new IllegalArgumentException("Invalid JNDI URI: " + location);
}
} catch (URISyntaxException ex) {
logger.trace("{}} is not a valid URI", location);
}
}
@Override
protected void doConfigure(Context context) throws FlumeException {
sourceCounter = new SourceCounter(getName());
initialContextFactoryName = context.getString(
JMSSourceConfiguration.INITIAL_CONTEXT_FACTORY, "").trim();
providerUrl = context.getString(JMSSourceConfiguration.PROVIDER_URL, "").trim();
verifyContext(providerUrl);
destinationName = context.getString(JMSSourceConfiguration.DESTINATION_NAME, "").trim();
String destinationTypeName = context.getString(
JMSSourceConfiguration.DESTINATION_TYPE, "").trim().toUpperCase(Locale.ENGLISH);
String destinationLocatorName = context.getString(
JMSSourceConfiguration.DESTINATION_LOCATOR,
JMSSourceConfiguration.DESTINATION_LOCATOR_DEFAULT)
.trim().toUpperCase(Locale.ENGLISH);
messageSelector = context.getString(
JMSSourceConfiguration.MESSAGE_SELECTOR, "").trim();
batchSize = context.getInteger(JMSSourceConfiguration.BATCH_SIZE,
JMSSourceConfiguration.BATCH_SIZE_DEFAULT);
errorThreshold = context.getInteger(JMSSourceConfiguration.ERROR_THRESHOLD,
JMSSourceConfiguration.ERROR_THRESHOLD_DEFAULT);
userName = Optional.fromNullable(context.getString(JMSSourceConfiguration.USERNAME));
pollTimeout = context.getLong(JMSSourceConfiguration.POLL_TIMEOUT,
JMSSourceConfiguration.POLL_TIMEOUT_DEFAULT);
clientId = Optional.fromNullable(context.getString(JMSSourceConfiguration.CLIENT_ID));
createDurableSubscription = context.getBoolean(
JMSSourceConfiguration.CREATE_DURABLE_SUBSCRIPTION,
JMSSourceConfiguration.DEFAULT_CREATE_DURABLE_SUBSCRIPTION);
durableSubscriptionName = context.getString(
JMSSourceConfiguration.DURABLE_SUBSCRIPTION_NAME,
JMSSourceConfiguration.DEFAULT_DURABLE_SUBSCRIPTION_NAME);
String passwordFile = context.getString(JMSSourceConfiguration.PASSWORD_FILE, "").trim();
if (passwordFile.isEmpty()) {
password = Optional.absent();
} else {
try {
password = Optional.of(Files.toString(new File(passwordFile),
Charsets.UTF_8).trim());
} catch (IOException e) {
throw new FlumeException(String.format(
"Could not read password file %s", passwordFile), e);
}
}
String converterClassName = context.getString(
JMSSourceConfiguration.CONVERTER_TYPE,
JMSSourceConfiguration.CONVERTER_TYPE_DEFAULT).trim();
if (JMSSourceConfiguration.CONVERTER_TYPE_DEFAULT.equalsIgnoreCase(converterClassName)) {
converterClassName = DefaultJMSMessageConverter.Builder.class.getName();
}
Context converterContext = new Context(context.getSubProperties(
JMSSourceConfiguration.CONVERTER + "."));
try {
@SuppressWarnings("rawtypes")
Class clazz = Class.forName(converterClassName);
boolean isBuilder = JMSMessageConverter.Builder.class
.isAssignableFrom(clazz);
if (isBuilder) {
JMSMessageConverter.Builder builder = (JMSMessageConverter.Builder)clazz.newInstance();
converter = builder.build(converterContext);
} else {
Preconditions.checkState(JMSMessageConverter.class.isAssignableFrom(clazz),
String.format("Class %s is not a subclass of JMSMessageConverter", clazz.getName()));
converter = (JMSMessageConverter)clazz.newInstance();
boolean configured = Configurables.configure(converter, converterContext);
if (logger.isDebugEnabled()) {
logger.debug(String.format("Attempted configuration of %s, result = %s",
converterClassName, String.valueOf(configured)));
}
}
} catch (Exception e) {
throw new FlumeException(String.format(
"Unable to create instance of converter %s", converterClassName), e);
}
String connectionFactoryName = context.getString(
JMSSourceConfiguration.CONNECTION_FACTORY,
JMSSourceConfiguration.CONNECTION_FACTORY_DEFAULT).trim();
verifyContext(connectionFactoryName);
assertNotEmpty(initialContextFactoryName, String.format(
"Initial Context Factory is empty. This is specified by %s",
JMSSourceConfiguration.INITIAL_CONTEXT_FACTORY));
assertNotEmpty(providerUrl, String.format(
"Provider URL is empty. This is specified by %s",
JMSSourceConfiguration.PROVIDER_URL));
assertNotEmpty(destinationName, String.format(
"Destination Name is empty. This is specified by %s",
JMSSourceConfiguration.DESTINATION_NAME));
assertNotEmpty(destinationTypeName, String.format(
"Destination Type is empty. This is specified by %s",
JMSSourceConfiguration.DESTINATION_TYPE));
try {
destinationType = JMSDestinationType.valueOf(destinationTypeName);
} catch (IllegalArgumentException e) {
throw new FlumeException(String.format("Destination type '%s' is " +
"invalid.", destinationTypeName), e);
}
if (createDurableSubscription) {
if (JMSDestinationType.TOPIC != destinationType) {
throw new FlumeException(String.format(
"Only Destination type '%s' supports durable subscriptions.",
JMSDestinationType.TOPIC.toString()));
}
if (!clientId.isPresent()) {
throw new FlumeException(String.format(
"You have to specify '%s' when using durable subscriptions.",
JMSSourceConfiguration.CLIENT_ID));
}
if (StringUtils.isEmpty(durableSubscriptionName)) {
throw new FlumeException(String.format("If '%s' is set to true, '%s' has to be specified.",
JMSSourceConfiguration.CREATE_DURABLE_SUBSCRIPTION,
JMSSourceConfiguration.DURABLE_SUBSCRIPTION_NAME));
}
} else if (!StringUtils.isEmpty(durableSubscriptionName)) {
logger.warn(String.format("'%s' is set, but '%s' is false."
+ "If you want to create a durable subscription, set %s to true.",
JMSSourceConfiguration.DURABLE_SUBSCRIPTION_NAME,
JMSSourceConfiguration.CREATE_DURABLE_SUBSCRIPTION,
JMSSourceConfiguration.CREATE_DURABLE_SUBSCRIPTION));
}
try {
destinationLocator = JMSDestinationLocator.valueOf(destinationLocatorName);
} catch (IllegalArgumentException e) {
throw new FlumeException(String.format("Destination locator '%s' is " +
"invalid.", destinationLocatorName), e);
}
Preconditions.checkArgument(batchSize > 0, "Batch size must be greater than 0");
try {
Properties contextProperties = new Properties();
contextProperties.setProperty(
javax.naming.Context.INITIAL_CONTEXT_FACTORY,
initialContextFactoryName);
contextProperties.setProperty(
javax.naming.Context.PROVIDER_URL, providerUrl);
// Provide properties for connecting via JNDI
if (this.userName.isPresent()) {
contextProperties.setProperty(javax.naming.Context.SECURITY_PRINCIPAL,
this.userName.get());
}
if (this.password.isPresent()) {
contextProperties.setProperty(javax.naming.Context.SECURITY_CREDENTIALS,
this.password.get());
}
initialContext = initialContextFactory.create(contextProperties);
} catch (NamingException e) {
throw new FlumeException(String.format(
"Could not create initial context %s provider %s",
initialContextFactoryName, providerUrl), e);
}
try {
connectionFactory = (ConnectionFactory) initialContext.lookup(connectionFactoryName);
} catch (NamingException e) {
throw new FlumeException("Could not lookup ConnectionFactory", e);
}
}
private void assertNotEmpty(String arg, String msg) {
Preconditions.checkArgument(!arg.isEmpty(), msg);
}
@Override
protected synchronized Status doProcess() throws EventDeliveryException {
boolean error = true;
try {
if (consumer == null) {
consumer = createConsumer();
}
List<Event> events = consumer.take();
int size = events.size();
if (size == 0) {
error = false;
return Status.BACKOFF;
}
sourceCounter.incrementAppendBatchReceivedCount();
sourceCounter.addToEventReceivedCount(size);
getChannelProcessor().processEventBatch(events);
error = false;
sourceCounter.addToEventAcceptedCount(size);
sourceCounter.incrementAppendBatchAcceptedCount();
return Status.READY;
} catch (ChannelException channelException) {
logger.warn("Error appending event to channel. "
+ "Channel might be full. Consider increasing the channel "
+ "capacity or make sure the sinks perform faster.", channelException);
sourceCounter.incrementChannelWriteFail();
} catch (JMSException jmsException) {
logger.warn("JMSException consuming events", jmsException);
if (++jmsExceptionCounter > errorThreshold && consumer != null) {
logger.warn("Exceeded JMSException threshold, closing consumer");
sourceCounter.incrementEventReadFail();
consumer.rollback();
consumer.close();
consumer = null;
}
} catch (Throwable throwable) {
logger.error("Unexpected error processing events", throwable);
sourceCounter.incrementEventReadFail();
if (throwable instanceof Error) {
throw (Error) throwable;
}
} finally {
if (error) {
if (consumer != null) {
consumer.rollback();
}
} else {
if (consumer != null) {
consumer.commit();
jmsExceptionCounter = 0;
}
}
}
return Status.BACKOFF;
}
@Override
protected synchronized void doStart() {
try {
consumer = createConsumer();
jmsExceptionCounter = 0;
sourceCounter.start();
} catch (JMSException e) {
throw new FlumeException("Unable to create consumer", e);
}
}
@Override
protected synchronized void doStop() {
if (consumer != null) {
consumer.close();
consumer = null;
}
sourceCounter.stop();
}
@VisibleForTesting
JMSMessageConsumer createConsumer() throws JMSException {
logger.info("Creating new consumer for " + destinationName);
JMSMessageConsumer consumer = new JMSMessageConsumer(initialContext,
connectionFactory, destinationName, destinationLocator, destinationType,
messageSelector, batchSize, pollTimeout, converter, userName, password, clientId,
createDurableSubscription, durableSubscriptionName);
jmsExceptionCounter = 0;
return consumer;
}
@Override
public long getBatchSize() {
return batchSize;
}
} | 9,620 |
0 | Create_ds/flume/flume-ng-sources/flume-jms-source/src/main/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-jms-source/src/main/java/org/apache/flume/source/jms/JMSDestinationType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.source.jms;
public enum JMSDestinationType {
QUEUE(), TOPIC(),
} | 9,621 |
0 | Create_ds/flume/flume-ng-sources/flume-jms-source/src/main/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-jms-source/src/main/java/org/apache/flume/source/jms/InitialContextFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.source.jms;
import java.util.Properties;
import javax.naming.InitialContext;
import javax.naming.NamingException;
public class InitialContextFactory {
public InitialContext create(Properties properties) throws NamingException {
return new InitialContext(properties);
}
}
| 9,622 |
0 | Create_ds/flume/flume-ng-sources/flume-taildir-source/src/test/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-taildir-source/src/test/java/org/apache/flume/source/taildir/TestTaildirSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.flume.source.taildir;
import static org.mockito.Mockito.anyListOf;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.when;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import org.apache.flume.Channel;
import org.apache.flume.ChannelException;
import org.apache.flume.ChannelSelector;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.PollableSource.Status;
import org.apache.flume.Transaction;
import org.apache.flume.channel.ChannelProcessor;
import org.apache.flume.channel.MemoryChannel;
import org.apache.flume.channel.ReplicatingChannelSelector;
import org.apache.flume.conf.Configurables;
import org.apache.flume.lifecycle.LifecycleController;
import org.apache.flume.lifecycle.LifecycleState;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import static org.apache.flume.source.taildir.TaildirSourceConfigurationConstants.FILE_GROUPS;
import static org.apache.flume.source.taildir.TaildirSourceConfigurationConstants.FILE_GROUPS_PREFIX;
import static org.apache.flume.source.taildir.TaildirSourceConfigurationConstants.HEADERS_PREFIX;
import static org.apache.flume.source.taildir.TaildirSourceConfigurationConstants.POSITION_FILE;
import static org.apache.flume.source.taildir.TaildirSourceConfigurationConstants.FILENAME_HEADER;
import static org.apache.flume.source.taildir.TaildirSourceConfigurationConstants.FILENAME_HEADER_KEY;
import static org.apache.flume.source.taildir.TaildirSourceConfigurationConstants.BATCH_SIZE;
import static org.apache.flume.source.taildir.TaildirSourceConfigurationConstants.MAX_BATCH_COUNT;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
public class TestTaildirSource {
static TaildirSource source;
static MemoryChannel channel;
private File tmpDir;
private String posFilePath;
@Before
public void setUp() {
source = new TaildirSource();
channel = new MemoryChannel();
Configurables.configure(channel, new Context());
List<Channel> channels = new ArrayList<Channel>();
channels.add(channel);
ChannelSelector rcs = new ReplicatingChannelSelector();
rcs.setChannels(channels);
source.setChannelProcessor(new ChannelProcessor(rcs));
tmpDir = Files.createTempDir();
posFilePath = tmpDir.getAbsolutePath() + "/taildir_position_test.json";
}
@After
public void tearDown() {
for (File f : tmpDir.listFiles()) {
f.delete();
}
tmpDir.delete();
}
@Test
public void testRegexFileNameFilteringEndToEnd() throws IOException {
File f1 = new File(tmpDir, "a.log");
File f2 = new File(tmpDir, "a.log.1");
File f3 = new File(tmpDir, "b.log");
File f4 = new File(tmpDir, "c.log.yyyy-MM-01");
File f5 = new File(tmpDir, "c.log.yyyy-MM-02");
Files.write("a.log\n", f1, Charsets.UTF_8);
Files.write("a.log.1\n", f2, Charsets.UTF_8);
Files.write("b.log\n", f3, Charsets.UTF_8);
Files.write("c.log.yyyy-MM-01\n", f4, Charsets.UTF_8);
Files.write("c.log.yyyy-MM-02\n", f5, Charsets.UTF_8);
Context context = new Context();
context.put(POSITION_FILE, posFilePath);
context.put(FILE_GROUPS, "ab c");
// Tail a.log and b.log
context.put(FILE_GROUPS_PREFIX + "ab", tmpDir.getAbsolutePath() + "/[ab].log");
// Tail files that starts with c.log
context.put(FILE_GROUPS_PREFIX + "c", tmpDir.getAbsolutePath() + "/c.log.*");
Configurables.configure(source, context);
source.start();
source.process();
Transaction txn = channel.getTransaction();
txn.begin();
List<String> out = Lists.newArrayList();
for (int i = 0; i < 5; i++) {
Event e = channel.take();
if (e != null) {
out.add(TestTaildirEventReader.bodyAsString(e));
}
}
txn.commit();
txn.close();
assertEquals(4, out.size());
// Make sure we got every file
assertTrue(out.contains("a.log"));
assertFalse(out.contains("a.log.1"));
assertTrue(out.contains("b.log"));
assertTrue(out.contains("c.log.yyyy-MM-01"));
assertTrue(out.contains("c.log.yyyy-MM-02"));
}
@Test
public void testHeaderMapping() throws IOException {
File f1 = new File(tmpDir, "file1");
File f2 = new File(tmpDir, "file2");
File f3 = new File(tmpDir, "file3");
Files.write("file1line1\nfile1line2\n", f1, Charsets.UTF_8);
Files.write("file2line1\nfile2line2\n", f2, Charsets.UTF_8);
Files.write("file3line1\nfile3line2\n", f3, Charsets.UTF_8);
Context context = new Context();
context.put(POSITION_FILE, posFilePath);
context.put(FILE_GROUPS, "f1 f2 f3");
context.put(FILE_GROUPS_PREFIX + "f1", tmpDir.getAbsolutePath() + "/file1$");
context.put(FILE_GROUPS_PREFIX + "f2", tmpDir.getAbsolutePath() + "/file2$");
context.put(FILE_GROUPS_PREFIX + "f3", tmpDir.getAbsolutePath() + "/file3$");
context.put(HEADERS_PREFIX + "f1.headerKeyTest", "value1");
context.put(HEADERS_PREFIX + "f2.headerKeyTest", "value2");
context.put(HEADERS_PREFIX + "f2.headerKeyTest2", "value2-2");
Configurables.configure(source, context);
source.start();
source.process();
Transaction txn = channel.getTransaction();
txn.begin();
for (int i = 0; i < 6; i++) {
Event e = channel.take();
String body = new String(e.getBody(), Charsets.UTF_8);
String headerValue = e.getHeaders().get("headerKeyTest");
String headerValue2 = e.getHeaders().get("headerKeyTest2");
if (body.startsWith("file1")) {
assertEquals("value1", headerValue);
assertNull(headerValue2);
} else if (body.startsWith("file2")) {
assertEquals("value2", headerValue);
assertEquals("value2-2", headerValue2);
} else if (body.startsWith("file3")) {
// No header
assertNull(headerValue);
assertNull(headerValue2);
}
}
txn.commit();
txn.close();
}
@Test
public void testLifecycle() throws IOException, InterruptedException {
File f1 = new File(tmpDir, "file1");
Files.write("file1line1\nfile1line2\n", f1, Charsets.UTF_8);
Context context = new Context();
context.put(POSITION_FILE, posFilePath);
context.put(FILE_GROUPS, "f1");
context.put(FILE_GROUPS_PREFIX + "f1", tmpDir.getAbsolutePath() + "/file1$");
Configurables.configure(source, context);
for (int i = 0; i < 3; i++) {
source.start();
source.process();
assertTrue("Reached start or error", LifecycleController.waitForOneOf(
source, LifecycleState.START_OR_ERROR));
assertEquals("Server is started", LifecycleState.START,
source.getLifecycleState());
source.stop();
assertTrue("Reached stop or error",
LifecycleController.waitForOneOf(source, LifecycleState.STOP_OR_ERROR));
assertEquals("Server is stopped", LifecycleState.STOP,
source.getLifecycleState());
}
}
private ArrayList<String> prepareFileConsumeOrder() throws IOException {
System.out.println(tmpDir.toString());
// 1) Create 1st file
File f1 = new File(tmpDir, "file1");
String line1 = "file1line1\n";
String line2 = "file1line2\n";
String line3 = "file1line3\n";
Files.write(line1 + line2 + line3, f1, Charsets.UTF_8);
try {
Thread.sleep(1000); // wait before creating a new file
} catch (InterruptedException e) {
}
// 1) Create 2nd file
String line1b = "file2line1\n";
String line2b = "file2line2\n";
String line3b = "file2line3\n";
File f2 = new File(tmpDir, "file2");
Files.write(line1b + line2b + line3b, f2, Charsets.UTF_8);
try {
Thread.sleep(1000); // wait before creating next file
} catch (InterruptedException e) {
}
// 3) Create 3rd file
String line1c = "file3line1\n";
String line2c = "file3line2\n";
String line3c = "file3line3\n";
File f3 = new File(tmpDir, "file3");
Files.write(line1c + line2c + line3c, f3, Charsets.UTF_8);
try {
Thread.sleep(1000); // wait before creating a new file
} catch (InterruptedException e) {
}
// 4) Create 4th file
String line1d = "file4line1\n";
String line2d = "file4line2\n";
String line3d = "file4line3\n";
File f4 = new File(tmpDir, "file4");
Files.write(line1d + line2d + line3d, f4, Charsets.UTF_8);
try {
Thread.sleep(1000); // wait before creating a new file
} catch (InterruptedException e) {
}
// 5) Now update the 3rd file so that its the latest file and gets consumed last
f3.setLastModified(System.currentTimeMillis());
// 4) Consume the files
Context context = new Context();
context.put(POSITION_FILE, posFilePath);
context.put(FILE_GROUPS, "g1");
context.put(FILE_GROUPS_PREFIX + "g1", tmpDir.getAbsolutePath() + "/.*");
Configurables.configure(source, context);
// 6) Ensure consumption order is in order of last update time
ArrayList<String> expected = Lists.newArrayList(line1, line2, line3, // file1
line1b, line2b, line3b, // file2
line1d, line2d, line3d, // file4
line1c, line2c, line3c // file3
);
for (int i = 0; i != expected.size(); ++i) {
expected.set(i, expected.get(i).trim());
}
return expected;
}
@Test
public void testFileConsumeOrder() throws IOException {
ArrayList<String> consumedOrder = Lists.newArrayList();
ArrayList<String> expected = prepareFileConsumeOrder();
source.start();
source.process();
Transaction txn = channel.getTransaction();
txn.begin();
for (int i = 0; i < 12; i++) {
Event e = channel.take();
String body = new String(e.getBody(), Charsets.UTF_8);
consumedOrder.add(body);
}
txn.commit();
txn.close();
System.out.println(consumedOrder);
assertArrayEquals("Files not consumed in expected order", expected.toArray(),
consumedOrder.toArray());
}
private File configureSource() throws IOException {
File f1 = new File(tmpDir, "file1");
Files.write("f1\n", f1, Charsets.UTF_8);
Context context = new Context();
context.put(POSITION_FILE, posFilePath);
context.put(FILE_GROUPS, "fg");
context.put(FILE_GROUPS_PREFIX + "fg", tmpDir.getAbsolutePath() + "/file.*");
context.put(FILENAME_HEADER, "true");
context.put(FILENAME_HEADER_KEY, "path");
Configurables.configure(source, context);
return f1;
}
@Test
public void testPutFilenameHeader() throws IOException {
File f1 = configureSource();
source.start();
source.process();
Transaction txn = channel.getTransaction();
txn.begin();
Event e = channel.take();
txn.commit();
txn.close();
assertNotNull(e.getHeaders().get("path"));
assertEquals(f1.getAbsolutePath(),
e.getHeaders().get("path"));
}
@Test
public void testErrorCounterEventReadFail() throws Exception {
configureSource();
source.start();
ReliableTaildirEventReader reader = Mockito.mock(ReliableTaildirEventReader.class);
Whitebox.setInternalState(source, "reader", reader);
when(reader.updateTailFiles()).thenReturn(Collections.singletonList(123L));
when(reader.getTailFiles()).thenThrow(new RuntimeException("hello"));
source.process();
assertEquals(1, source.getSourceCounter().getEventReadFail());
source.stop();
}
@Test
public void testErrorCounterFileHandlingFail() throws Exception {
configureSource();
Whitebox.setInternalState(source, "idleTimeout", 0);
Whitebox.setInternalState(source, "checkIdleInterval", 60);
source.start();
ReliableTaildirEventReader reader = Mockito.mock(ReliableTaildirEventReader.class);
when(reader.getTailFiles()).thenThrow(new RuntimeException("hello"));
Whitebox.setInternalState(source, "reader", reader);
TimeUnit.MILLISECONDS.sleep(200);
assertTrue(0 < source.getSourceCounter().getGenericProcessingFail());
source.stop();
}
@Test
public void testErrorCounterChannelWriteFail() throws Exception {
prepareFileConsumeOrder();
ChannelProcessor cp = Mockito.mock(ChannelProcessor.class);
source.setChannelProcessor(cp);
doThrow(new ChannelException("dummy")).doNothing().when(cp)
.processEventBatch(anyListOf(Event.class));
source.start();
source.process();
assertEquals(1, source.getSourceCounter().getChannelWriteFail());
source.stop();
}
@Test
public void testMaxBatchCount() throws IOException {
File f1 = new File(tmpDir, "file1");
File f2 = new File(tmpDir, "file2");
Files.write("file1line1\nfile1line2\n" +
"file1line3\nfile1line4\n", f1, Charsets.UTF_8);
Files.write("file2line1\nfile2line2\n" +
"file2line3\nfile2line4\n", f2, Charsets.UTF_8);
Context context = new Context();
context.put(POSITION_FILE, posFilePath);
context.put(FILE_GROUPS, "fg");
context.put(FILE_GROUPS_PREFIX + "fg", tmpDir.getAbsolutePath() + "/file.*");
context.put(BATCH_SIZE, String.valueOf(1));
context.put(MAX_BATCH_COUNT, String.valueOf(2));
Configurables.configure(source, context);
source.start();
// 2 x 4 lines will be processed in 2 rounds
source.process();
source.process();
List<Event> eventList = new ArrayList<Event>();
for (int i = 0; i < 8; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
Event e = channel.take();
txn.commit();
txn.close();
if (e == null) {
break;
}
eventList.add(e);
}
assertEquals("1", context.getString(BATCH_SIZE));
assertEquals("2", context.getString(MAX_BATCH_COUNT));
assertEquals(8, eventList.size());
// the processing order of the files is not deterministic
String firstFile = new String(eventList.get(0).getBody()).substring(0, 5);
String secondFile = firstFile.equals("file1") ? "file2" : "file1";
assertEquals(firstFile + "line1", new String(eventList.get(0).getBody()));
assertEquals(firstFile + "line2", new String(eventList.get(1).getBody()));
assertEquals(secondFile + "line1", new String(eventList.get(2).getBody()));
assertEquals(secondFile + "line2", new String(eventList.get(3).getBody()));
assertEquals(firstFile + "line3", new String(eventList.get(4).getBody()));
assertEquals(firstFile + "line4", new String(eventList.get(5).getBody()));
assertEquals(secondFile + "line3", new String(eventList.get(6).getBody()));
assertEquals(secondFile + "line4", new String(eventList.get(7).getBody()));
}
@Test
public void testStatus() throws IOException {
File f1 = new File(tmpDir, "file1");
File f2 = new File(tmpDir, "file2");
Files.write("file1line1\nfile1line2\n" +
"file1line3\nfile1line4\nfile1line5\n", f1, Charsets.UTF_8);
Files.write("file2line1\nfile2line2\n" +
"file2line3\n", f2, Charsets.UTF_8);
Context context = new Context();
context.put(POSITION_FILE, posFilePath);
context.put(FILE_GROUPS, "fg");
context.put(FILE_GROUPS_PREFIX + "fg", tmpDir.getAbsolutePath() + "/file.*");
context.put(BATCH_SIZE, String.valueOf(1));
context.put(MAX_BATCH_COUNT, String.valueOf(2));
Configurables.configure(source, context);
source.start();
Status status;
status = source.process();
assertEquals(Status.READY, status);
status = source.process();
assertEquals(Status.READY, status);
status = source.process();
assertEquals(Status.BACKOFF, status);
status = source.process();
assertEquals(Status.BACKOFF, status);
}
}
| 9,623 |
0 | Create_ds/flume/flume-ng-sources/flume-taildir-source/src/test/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-taildir-source/src/test/java/org/apache/flume/source/taildir/TestTaildirEventReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.source.taildir;
import com.google.common.base.Charsets;
import com.google.common.base.Throwables;
import com.google.common.collect.HashBasedTable;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.collect.Table;
import com.google.common.io.Files;
import org.apache.flume.Event;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static org.apache.flume.source.taildir.TaildirSourceConfigurationConstants.BYTE_OFFSET_HEADER_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
public class TestTaildirEventReader {
private File tmpDir;
private String posFilePath;
public static String bodyAsString(Event event) {
return new String(event.getBody());
}
static List<String> bodiesAsStrings(List<Event> events) {
List<String> bodies = Lists.newArrayListWithCapacity(events.size());
for (Event event : events) {
bodies.add(new String(event.getBody()));
}
return bodies;
}
static List<String> headersAsStrings(List<Event> events, String headerKey) {
List<String> headers = Lists.newArrayListWithCapacity(events.size());
for (Event event : events) {
headers.add(new String(event.getHeaders().get(headerKey)));
}
return headers;
}
private ReliableTaildirEventReader getReader(Map<String, String> filePaths,
Table<String, String, String> headerTable, boolean addByteOffset,
boolean cachedPatternMatching) {
ReliableTaildirEventReader reader;
try {
reader = new ReliableTaildirEventReader.Builder()
.filePaths(filePaths)
.headerTable(headerTable)
.positionFilePath(posFilePath)
.skipToEnd(false)
.addByteOffset(addByteOffset)
.cachePatternMatching(cachedPatternMatching)
.build();
reader.updateTailFiles();
} catch (IOException ioe) {
throw Throwables.propagate(ioe);
}
return reader;
}
private ReliableTaildirEventReader getReader(boolean addByteOffset,
boolean cachedPatternMatching) {
Map<String, String> filePaths = ImmutableMap.of("testFiles",
tmpDir.getAbsolutePath() + "/file.*");
Table<String, String, String> headerTable = HashBasedTable.create();
return getReader(filePaths, headerTable, addByteOffset, cachedPatternMatching);
}
private ReliableTaildirEventReader getReader() {
return getReader(false, false);
}
@Before
public void setUp() {
tmpDir = Files.createTempDir();
posFilePath = tmpDir.getAbsolutePath() + "/taildir_position_test.json";
}
@After
public void tearDown() {
for (File f : tmpDir.listFiles()) {
if (f.isDirectory()) {
for (File sdf : f.listFiles()) {
sdf.delete();
}
}
f.delete();
}
tmpDir.delete();
}
@Test
// Create three multi-line files then read them back out. Ensures that
// lines and appended ones are read correctly from files.
public void testBasicReadFiles() throws IOException {
File f1 = new File(tmpDir, "file1");
File f2 = new File(tmpDir, "file2");
File f3 = new File(tmpDir, "file3");
Files.write("file1line1\nfile1line2\n", f1, Charsets.UTF_8);
Files.write("file2line1\nfile2line2\n", f2, Charsets.UTF_8);
Files.write("file3line1\nfile3line2\n", f3, Charsets.UTF_8);
ReliableTaildirEventReader reader = getReader();
List<String> out = Lists.newArrayList();
for (TailFile tf : reader.getTailFiles().values()) {
List<String> bodies = bodiesAsStrings(reader.readEvents(tf, 2));
out.addAll(bodies);
reader.commit();
}
assertEquals(6, out.size());
// Make sure we got every line
assertTrue(out.contains("file1line1"));
assertTrue(out.contains("file1line2"));
assertTrue(out.contains("file2line1"));
assertTrue(out.contains("file2line2"));
assertTrue(out.contains("file3line1"));
assertTrue(out.contains("file3line2"));
Files.append("file3line3\nfile3line4\n", f3, Charsets.UTF_8);
reader.updateTailFiles();
for (TailFile tf : reader.getTailFiles().values()) {
List<String> bodies = bodiesAsStrings(reader.readEvents(tf, 2));
out.addAll(bodies);
reader.commit();
}
assertEquals(8, out.size());
assertTrue(out.contains("file3line3"));
assertTrue(out.contains("file3line4"));
}
@Test
// Tests deleting a file
public void testDeleteFiles() throws IOException {
File f1 = new File(tmpDir, "file1");
Files.write("file1line1\nfile1line2\n", f1, Charsets.UTF_8);
// Caching is used to be able to reproduce the problem when a file is deleted
// right before the inode is fetched
ReliableTaildirEventReader reader = getReader(false, true);
File dir = f1.getParentFile();
long lastModified = dir.lastModified();
f1.delete();
dir.setLastModified(lastModified - 1000); //substract a second to be sure the cache is used
reader.updateTailFiles();
}
@Test
// Make sure this works when there are initially no files
// and we finish reading all files and fully commit.
public void testInitiallyEmptyDirAndBehaviorAfterReadingAll() throws IOException {
ReliableTaildirEventReader reader = getReader();
List<Long> fileInodes = reader.updateTailFiles();
assertEquals(0, fileInodes.size());
File f1 = new File(tmpDir, "file1");
Files.write("file1line1\nfile1line2\n", f1, Charsets.UTF_8);
reader.updateTailFiles();
List<String> out = null;
for (TailFile tf : reader.getTailFiles().values()) {
out = bodiesAsStrings(reader.readEvents(tf, 2));
reader.commit();
}
assertEquals(2, out.size());
// Make sure we got every line
assertTrue(out.contains("file1line1"));
assertTrue(out.contains("file1line2"));
reader.updateTailFiles();
List<String> empty = null;
for (TailFile tf : reader.getTailFiles().values()) {
empty = bodiesAsStrings(reader.readEvents(tf, 15));
reader.commit();
}
assertEquals(0, empty.size());
}
@Test
// Test a basic case where a commit is missed.
public void testBasicCommitFailure() throws IOException {
File f1 = new File(tmpDir, "file1");
Files.write("file1line1\nfile1line2\nfile1line3\nfile1line4\n" +
"file1line5\nfile1line6\nfile1line7\nfile1line8\n" +
"file1line9\nfile1line10\nfile1line11\nfile1line12\n",
f1, Charsets.UTF_8);
ReliableTaildirEventReader reader = getReader();
List<String> out1 = null;
for (TailFile tf : reader.getTailFiles().values()) {
out1 = bodiesAsStrings(reader.readEvents(tf, 4));
}
assertTrue(out1.contains("file1line1"));
assertTrue(out1.contains("file1line2"));
assertTrue(out1.contains("file1line3"));
assertTrue(out1.contains("file1line4"));
List<String> out2 = bodiesAsStrings(reader.readEvents(4));
assertTrue(out2.contains("file1line1"));
assertTrue(out2.contains("file1line2"));
assertTrue(out2.contains("file1line3"));
assertTrue(out2.contains("file1line4"));
reader.commit();
List<String> out3 = bodiesAsStrings(reader.readEvents(4));
assertTrue(out3.contains("file1line5"));
assertTrue(out3.contains("file1line6"));
assertTrue(out3.contains("file1line7"));
assertTrue(out3.contains("file1line8"));
reader.commit();
List<String> out4 = bodiesAsStrings(reader.readEvents(4));
assertEquals(4, out4.size());
assertTrue(out4.contains("file1line9"));
assertTrue(out4.contains("file1line10"));
assertTrue(out4.contains("file1line11"));
assertTrue(out4.contains("file1line12"));
}
@Test
// Test a case where a commit is missed and the batch size changes.
public void testBasicCommitFailureAndBatchSizeChanges() throws IOException {
File f1 = new File(tmpDir, "file1");
Files.write("file1line1\nfile1line2\nfile1line3\nfile1line4\n" +
"file1line5\nfile1line6\nfile1line7\nfile1line8\n",
f1, Charsets.UTF_8);
ReliableTaildirEventReader reader = getReader();
List<String> out1 = null;
for (TailFile tf : reader.getTailFiles().values()) {
out1 = bodiesAsStrings(reader.readEvents(tf, 5));
}
assertTrue(out1.contains("file1line1"));
assertTrue(out1.contains("file1line2"));
assertTrue(out1.contains("file1line3"));
assertTrue(out1.contains("file1line4"));
assertTrue(out1.contains("file1line5"));
List<String> out2 = bodiesAsStrings(reader.readEvents(2));
assertTrue(out2.contains("file1line1"));
assertTrue(out2.contains("file1line2"));
reader.commit();
List<String> out3 = bodiesAsStrings(reader.readEvents(2));
assertTrue(out3.contains("file1line3"));
assertTrue(out3.contains("file1line4"));
reader.commit();
List<String> out4 = bodiesAsStrings(reader.readEvents(15));
assertTrue(out4.contains("file1line5"));
assertTrue(out4.contains("file1line6"));
assertTrue(out4.contains("file1line7"));
assertTrue(out4.contains("file1line8"));
}
@Test
public void testIncludeEmptyFile() throws IOException {
File f1 = new File(tmpDir, "file1");
File f2 = new File(tmpDir, "file2");
Files.write("file1line1\nfile1line2\n", f1, Charsets.UTF_8);
Files.touch(f2);
ReliableTaildirEventReader reader = getReader();
// Expect to read nothing from empty file
List<String> out = Lists.newArrayList();
for (TailFile tf : reader.getTailFiles().values()) {
out.addAll(bodiesAsStrings(reader.readEvents(tf, 5)));
reader.commit();
}
assertEquals(2, out.size());
assertTrue(out.contains("file1line1"));
assertTrue(out.contains("file1line2"));
assertNull(reader.readEvent());
}
@Test
public void testBackoffWithoutNewLine() throws IOException {
File f1 = new File(tmpDir, "file1");
Files.write("file1line1\nfile1", f1, Charsets.UTF_8);
ReliableTaildirEventReader reader = getReader();
List<String> out = Lists.newArrayList();
// Expect to read only the line with newline
for (TailFile tf : reader.getTailFiles().values()) {
out.addAll(bodiesAsStrings(reader.readEvents(tf, 5)));
reader.commit();
}
assertEquals(1, out.size());
assertTrue(out.contains("file1line1"));
Files.append("line2\nfile1line3\nfile1line4", f1, Charsets.UTF_8);
for (TailFile tf : reader.getTailFiles().values()) {
out.addAll(bodiesAsStrings(reader.readEvents(tf, 5)));
reader.commit();
}
assertEquals(3, out.size());
assertTrue(out.contains("file1line2"));
assertTrue(out.contains("file1line3"));
// Should read the last line if it finally has no newline
out.addAll(bodiesAsStrings(reader.readEvents(5, false)));
reader.commit();
assertEquals(4, out.size());
assertTrue(out.contains("file1line4"));
}
@Test
public void testBatchedReadsAcrossFileBoundary() throws IOException {
File f1 = new File(tmpDir, "file1");
Files.write("file1line1\nfile1line2\nfile1line3\nfile1line4\n" +
"file1line5\nfile1line6\nfile1line7\nfile1line8\n",
f1, Charsets.UTF_8);
ReliableTaildirEventReader reader = getReader();
List<String> out1 = Lists.newArrayList();
for (TailFile tf : reader.getTailFiles().values()) {
out1.addAll(bodiesAsStrings(reader.readEvents(tf, 5)));
reader.commit();
}
File f2 = new File(tmpDir, "file2");
Files.write("file2line1\nfile2line2\nfile2line3\nfile2line4\n" +
"file2line5\nfile2line6\nfile2line7\nfile2line8\n",
f2, Charsets.UTF_8);
List<String> out2 = bodiesAsStrings(reader.readEvents(5));
reader.commit();
reader.updateTailFiles();
List<String> out3 = Lists.newArrayList();
for (TailFile tf : reader.getTailFiles().values()) {
out3.addAll(bodiesAsStrings(reader.readEvents(tf, 5)));
reader.commit();
}
// Should have first 5 lines of file1
assertEquals(5, out1.size());
assertTrue(out1.contains("file1line1"));
assertTrue(out1.contains("file1line2"));
assertTrue(out1.contains("file1line3"));
assertTrue(out1.contains("file1line4"));
assertTrue(out1.contains("file1line5"));
// Should have 3 remaining lines of file1
assertEquals(3, out2.size());
assertTrue(out2.contains("file1line6"));
assertTrue(out2.contains("file1line7"));
assertTrue(out2.contains("file1line8"));
// Should have first 5 lines of file2
assertEquals(5, out3.size());
assertTrue(out3.contains("file2line1"));
assertTrue(out3.contains("file2line2"));
assertTrue(out3.contains("file2line3"));
assertTrue(out3.contains("file2line4"));
assertTrue(out3.contains("file2line5"));
}
@Test
public void testLargeNumberOfFiles() throws IOException {
int fileNum = 1000;
Set<String> expected = Sets.newHashSet();
for (int i = 0; i < fileNum; i++) {
String data = "data" + i;
File f = new File(tmpDir, "file" + i);
Files.write(data + "\n", f, Charsets.UTF_8);
expected.add(data);
}
ReliableTaildirEventReader reader = getReader();
for (TailFile tf : reader.getTailFiles().values()) {
List<Event> events = reader.readEvents(tf, 10);
for (Event e : events) {
expected.remove(new String(e.getBody()));
}
reader.commit();
}
assertEquals(0, expected.size());
}
@Test
public void testLoadPositionFile() throws IOException {
File f1 = new File(tmpDir, "file1");
File f2 = new File(tmpDir, "file2");
File f3 = new File(tmpDir, "file3");
Files.write("file1line1\nfile1line2\nfile1line3\n", f1, Charsets.UTF_8);
Files.write("file2line1\nfile2line2\n", f2, Charsets.UTF_8);
Files.write("file3line1\n", f3, Charsets.UTF_8);
ReliableTaildirEventReader reader = getReader();
Map<Long, TailFile> tailFiles = reader.getTailFiles();
long pos = f2.length();
int i = 1;
File posFile = new File(posFilePath);
for (TailFile tf : tailFiles.values()) {
Files.append(i == 1 ? "[" : "", posFile, Charsets.UTF_8);
Files.append(String.format("{\"inode\":%s,\"pos\":%s,\"file\":\"%s\"}",
tf.getInode(), pos, tf.getPath()), posFile, Charsets.UTF_8);
Files.append(i == 3 ? "]" : ",", posFile, Charsets.UTF_8);
i++;
}
reader.loadPositionFile(posFilePath);
for (TailFile tf : tailFiles.values()) {
if (tf.getPath().equals(tmpDir + "file3")) {
// when given position is larger than file size
assertEquals(0, tf.getPos());
} else {
assertEquals(pos, tf.getPos());
}
}
}
@Test
public void testSkipToEndPosition() throws IOException {
ReliableTaildirEventReader reader = getReader();
File f1 = new File(tmpDir, "file1");
Files.write("file1line1\nfile1line2\n", f1, Charsets.UTF_8);
reader.updateTailFiles();
for (TailFile tf : reader.getTailFiles().values()) {
if (tf.getPath().equals(tmpDir + "file1")) {
assertEquals(0, tf.getPos());
}
}
File f2 = new File(tmpDir, "file2");
Files.write("file2line1\nfile2line2\n", f2, Charsets.UTF_8);
// Expect to skip to EOF the read position when skipToEnd option is true
reader.updateTailFiles(true);
for (TailFile tf : reader.getTailFiles().values()) {
if (tf.getPath().equals(tmpDir + "file2")) {
assertEquals(f2.length(), tf.getPos());
}
}
}
@Test
public void testByteOffsetHeader() throws IOException {
File f1 = new File(tmpDir, "file1");
String line1 = "file1line1\n";
String line2 = "file1line2\n";
String line3 = "file1line3\n";
Files.write(line1 + line2 + line3, f1, Charsets.UTF_8);
ReliableTaildirEventReader reader = getReader(true, false);
List<String> headers = null;
for (TailFile tf : reader.getTailFiles().values()) {
headers = headersAsStrings(reader.readEvents(tf, 5), BYTE_OFFSET_HEADER_KEY);
reader.commit();
}
assertEquals(3, headers.size());
// Make sure we got byte offset position
assertTrue(headers.contains(String.valueOf(0)));
assertTrue(headers.contains(String.valueOf(line1.length())));
assertTrue(headers.contains(String.valueOf((line1 + line2).length())));
}
@Test
public void testNewLineBoundaries() throws IOException {
File f1 = new File(tmpDir, "file1");
Files.write("file1line1\nfile1line2\rfile1line2\nfile1line3\r\nfile1line4\n",
f1, Charsets.UTF_8);
ReliableTaildirEventReader reader = getReader();
List<String> out = Lists.newArrayList();
for (TailFile tf : reader.getTailFiles().values()) {
out.addAll(bodiesAsStrings(reader.readEvents(tf, 5)));
reader.commit();
}
assertEquals(4, out.size());
//Should treat \n as line boundary
assertTrue(out.contains("file1line1"));
//Should not treat \r as line boundary
assertTrue(out.contains("file1line2\rfile1line2"));
//Should treat \r\n as line boundary
assertTrue(out.contains("file1line3"));
assertTrue(out.contains("file1line4"));
}
@Test
// Ensure tail file is set to be read when its last updated time
// equals the underlying file's modification time and there are
// pending bytes to be read.
public void testUpdateWhenLastUpdatedSameAsModificationTime() throws IOException {
File file = new File(tmpDir, "file");
Files.write("line1\n", file, Charsets.UTF_8);
ReliableTaildirEventReader reader = getReader();
for (TailFile tf : reader.getTailFiles().values()) {
reader.readEvents(tf, 1);
reader.commit();
}
Files.append("line2\n", file, Charsets.UTF_8);
for (TailFile tf : reader.getTailFiles().values()) {
tf.setLastUpdated(file.lastModified());
}
reader.updateTailFiles();
for (TailFile tf : reader.getTailFiles().values()) {
assertEquals(true, tf.needTail());
}
}
}
| 9,624 |
0 | Create_ds/flume/flume-ng-sources/flume-taildir-source/src/test/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-taildir-source/src/test/java/org/apache/flume/source/taildir/TestTaildirMatcher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.source.taildir;
import com.google.common.base.Charsets;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.io.Files;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
public class TestTaildirMatcher {
private File tmpDir;
private Map<String, File> files;
private boolean isCachingNeeded = true;
final String msgAlreadyExistingFile = "a file was not found but it was created before matcher";
final String msgAfterNewFileCreated = "files which were created after last check are not found";
final String msgAfterAppend = "a file was not found although it was just appended within the dir";
final String msgEmptyDir = "empty dir should return an empty list";
final String msgNoMatch = "no match should return an empty list";
final String msgSubDirs = "only files on the same level as the pattern should be returned";
final String msgNoChange = "file wasn't touched after last check cannot be found";
final String msgAfterDelete = "file was returned even after it was deleted";
/**
* Append a line to the specified file within tmpDir.
* If file doesn't exist it will be created.
*/
private void append(String fileName) throws IOException {
File f;
if (!files.containsKey(fileName)) {
f = new File(tmpDir, fileName);
files.put(fileName, f);
} else {
f = files.get(fileName);
}
Files.append(fileName + "line\n", f, Charsets.UTF_8);
}
/**
* Translate a list of files to list of filename strings.
*/
private static List<String> filesToNames(List<File> origList) {
Function<File, String> file2nameFn = new Function<File, String>() {
@Override
public String apply(File input) {
return input.getName();
}
};
return Lists.transform(origList, file2nameFn);
}
@Before
public void setUp() throws Exception {
files = Maps.newHashMap();
tmpDir = Files.createTempDir();
}
@After
public void tearDown() throws Exception {
for (File f : tmpDir.listFiles()) {
if (f.isDirectory()) {
for (File sdf : f.listFiles()) {
sdf.delete();
}
}
f.delete();
}
tmpDir.delete();
files = null;
}
@Test
public void getMatchingFiles() throws Exception {
append("file0");
append("file1");
TaildirMatcher tm = new TaildirMatcher("f1",
tmpDir.getAbsolutePath() + File.separator + "file.*",
isCachingNeeded);
List<String> files = filesToNames(tm.getMatchingFiles());
assertEquals(msgAlreadyExistingFile, 2, files.size());
assertTrue(msgAlreadyExistingFile, files.contains("file1"));
append("file1");
files = filesToNames(tm.getMatchingFiles());
assertEquals(msgAfterNewFileCreated, 2, files.size());
assertTrue(msgAfterNewFileCreated, files.contains("file0"));
assertTrue(msgAfterNewFileCreated, files.contains("file1"));
append("file2");
append("file3");
files = filesToNames(tm.getMatchingFiles());
assertEquals(msgAfterAppend, 4, files.size());
assertTrue(msgAfterAppend, files.contains("file0"));
assertTrue(msgAfterAppend, files.contains("file1"));
assertTrue(msgAfterAppend, files.contains("file2"));
assertTrue(msgAfterAppend, files.contains("file3"));
this.files.get("file0").delete();
files = filesToNames(tm.getMatchingFiles());
assertEquals(msgAfterDelete, 3, files.size());
assertFalse(msgAfterDelete, files.contains("file0"));
assertTrue(msgNoChange, files.contains("file1"));
assertTrue(msgNoChange, files.contains("file2"));
assertTrue(msgNoChange, files.contains("file3"));
}
@Test
public void getMatchingFilesNoCache() throws Exception {
append("file0");
append("file1");
TaildirMatcher tm = new TaildirMatcher("f1",
tmpDir.getAbsolutePath() + File.separator + "file.*",
false);
List<String> files = filesToNames(tm.getMatchingFiles());
assertEquals(msgAlreadyExistingFile, 2, files.size());
assertTrue(msgAlreadyExistingFile, files.contains("file1"));
append("file1");
files = filesToNames(tm.getMatchingFiles());
assertEquals(msgAfterAppend, 2, files.size());
assertTrue(msgAfterAppend, files.contains("file0"));
assertTrue(msgAfterAppend, files.contains("file1"));
append("file2");
append("file3");
files = filesToNames(tm.getMatchingFiles());
assertEquals(msgAfterNewFileCreated, 4, files.size());
assertTrue(msgAfterNewFileCreated, files.contains("file0"));
assertTrue(msgAfterNewFileCreated, files.contains("file1"));
assertTrue(msgAfterNewFileCreated, files.contains("file2"));
assertTrue(msgAfterNewFileCreated, files.contains("file3"));
this.files.get("file0").delete();
files = filesToNames(tm.getMatchingFiles());
assertEquals(msgAfterDelete, 3, files.size());
assertFalse(msgAfterDelete, files.contains("file0"));
assertTrue(msgNoChange, files.contains("file1"));
assertTrue(msgNoChange, files.contains("file2"));
assertTrue(msgNoChange, files.contains("file3"));
}
@Test
public void testEmtpyDirMatching() throws Exception {
TaildirMatcher tm = new TaildirMatcher("empty",
tmpDir.getAbsolutePath() + File.separator + ".*",
isCachingNeeded);
List<File> files = tm.getMatchingFiles();
assertNotNull(msgEmptyDir, files);
assertTrue(msgEmptyDir, files.isEmpty());
}
@Test
public void testNoMatching() throws Exception {
TaildirMatcher tm = new TaildirMatcher(
"nomatch",
tmpDir.getAbsolutePath() + File.separator + "abracadabra_nonexisting",
isCachingNeeded);
List<File> files = tm.getMatchingFiles();
assertNotNull(msgNoMatch, files);
assertTrue(msgNoMatch, files.isEmpty());
}
@Test(expected = IllegalStateException.class)
public void testNonExistingDir() {
TaildirMatcher tm = new TaildirMatcher("exception", "/abracadabra/doesntexist/.*",
isCachingNeeded);
}
@Test
public void testDirectoriesAreNotListed() throws Exception {
new File(tmpDir, "outerFile").createNewFile();
new File(tmpDir, "recursiveDir").mkdir();
new File(tmpDir + File.separator + "recursiveDir", "innerFile").createNewFile();
TaildirMatcher tm = new TaildirMatcher("f1", tmpDir.getAbsolutePath() + File.separator + ".*",
isCachingNeeded);
List<String> files = filesToNames(tm.getMatchingFiles());
assertEquals(msgSubDirs, 1, files.size());
assertTrue(msgSubDirs, files.contains("outerFile"));
}
@Test
public void testRegexFileNameFiltering() throws IOException {
append("a.log");
append("a.log.1");
append("b.log");
append("c.log.yyyy.MM-01");
append("c.log.yyyy.MM-02");
// Tail a.log and b.log
TaildirMatcher tm1 = new TaildirMatcher("ab",
tmpDir.getAbsolutePath() + File.separator + "[ab].log",
isCachingNeeded);
// Tail files that starts with c.log
TaildirMatcher tm2 = new TaildirMatcher("c",
tmpDir.getAbsolutePath() + File.separator + "c.log.*",
isCachingNeeded);
List<String> files1 = filesToNames(tm1.getMatchingFiles());
List<String> files2 = filesToNames(tm2.getMatchingFiles());
assertEquals(2, files1.size());
assertEquals(2, files2.size());
// Make sure we got every file
assertTrue("Regex pattern for ab should have matched a.log file",
files1.contains("a.log"));
assertFalse("Regex pattern for ab should NOT have matched a.log.1 file",
files1.contains("a.log.1"));
assertTrue("Regex pattern for ab should have matched b.log file",
files1.contains("b.log"));
assertTrue("Regex pattern for c should have matched c.log.yyyy-MM-01 file",
files2.contains("c.log.yyyy.MM-01"));
assertTrue("Regex pattern for c should have matched c.log.yyyy-MM-02 file",
files2.contains("c.log.yyyy.MM-02"));
}
} | 9,625 |
0 | Create_ds/flume/flume-ng-sources/flume-taildir-source/src/main/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-taildir-source/src/main/java/org/apache/flume/source/taildir/TailFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.source.taildir;
import com.google.common.collect.Lists;
import org.apache.flume.Event;
import org.apache.flume.event.EventBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.List;
import java.util.Map;
import static org.apache.flume.source.taildir.TaildirSourceConfigurationConstants.BYTE_OFFSET_HEADER_KEY;
public class TailFile {
private static final Logger logger = LoggerFactory.getLogger(TailFile.class);
private static final byte BYTE_NL = (byte) 10;
private static final byte BYTE_CR = (byte) 13;
private static final int BUFFER_SIZE = 8192;
private static final int NEED_READING = -1;
private RandomAccessFile raf;
private final String path;
private final long inode;
private long pos;
private long lastUpdated;
private boolean needTail;
private final Map<String, String> headers;
private byte[] buffer;
private byte[] oldBuffer;
private int bufferPos;
private long lineReadPos;
public TailFile(File file, Map<String, String> headers, long inode, long pos)
throws IOException {
this.raf = new RandomAccessFile(file, "r");
if (pos > 0) {
raf.seek(pos);
lineReadPos = pos;
}
this.path = file.getAbsolutePath();
this.inode = inode;
this.pos = pos;
this.lastUpdated = 0L;
this.needTail = true;
this.headers = headers;
this.oldBuffer = new byte[0];
this.bufferPos = NEED_READING;
}
public RandomAccessFile getRaf() {
return raf;
}
public String getPath() {
return path;
}
public long getInode() {
return inode;
}
public long getPos() {
return pos;
}
public long getLastUpdated() {
return lastUpdated;
}
public boolean needTail() {
return needTail;
}
public Map<String, String> getHeaders() {
return headers;
}
public long getLineReadPos() {
return lineReadPos;
}
public void setPos(long pos) {
this.pos = pos;
}
public void setLastUpdated(long lastUpdated) {
this.lastUpdated = lastUpdated;
}
public void setNeedTail(boolean needTail) {
this.needTail = needTail;
}
public void setLineReadPos(long lineReadPos) {
this.lineReadPos = lineReadPos;
}
public boolean updatePos(String path, long inode, long pos) throws IOException {
if (this.inode == inode && this.path.equals(path)) {
setPos(pos);
updateFilePos(pos);
logger.info("Updated position, file: " + path + ", inode: " + inode + ", pos: " + pos);
return true;
}
return false;
}
public void updateFilePos(long pos) throws IOException {
raf.seek(pos);
lineReadPos = pos;
bufferPos = NEED_READING;
oldBuffer = new byte[0];
}
public List<Event> readEvents(int numEvents, boolean backoffWithoutNL,
boolean addByteOffset) throws IOException {
List<Event> events = Lists.newLinkedList();
for (int i = 0; i < numEvents; i++) {
Event event = readEvent(backoffWithoutNL, addByteOffset);
if (event == null) {
break;
}
events.add(event);
}
return events;
}
private Event readEvent(boolean backoffWithoutNL, boolean addByteOffset) throws IOException {
Long posTmp = getLineReadPos();
LineResult line = readLine();
if (line == null) {
return null;
}
if (backoffWithoutNL && !line.lineSepInclude) {
logger.info("Backing off in file without newline: "
+ path + ", inode: " + inode + ", pos: " + raf.getFilePointer());
updateFilePos(posTmp);
return null;
}
Event event = EventBuilder.withBody(line.line);
if (addByteOffset == true) {
event.getHeaders().put(BYTE_OFFSET_HEADER_KEY, posTmp.toString());
}
return event;
}
private void readFile() throws IOException {
if ((raf.length() - raf.getFilePointer()) < BUFFER_SIZE) {
buffer = new byte[(int) (raf.length() - raf.getFilePointer())];
} else {
buffer = new byte[BUFFER_SIZE];
}
raf.read(buffer, 0, buffer.length);
bufferPos = 0;
}
private byte[] concatByteArrays(byte[] a, int startIdxA, int lenA,
byte[] b, int startIdxB, int lenB) {
byte[] c = new byte[lenA + lenB];
System.arraycopy(a, startIdxA, c, 0, lenA);
System.arraycopy(b, startIdxB, c, lenA, lenB);
return c;
}
public LineResult readLine() throws IOException {
LineResult lineResult = null;
while (true) {
if (bufferPos == NEED_READING) {
if (raf.getFilePointer() < raf.length()) {
readFile();
} else {
if (oldBuffer.length > 0) {
lineResult = new LineResult(false, oldBuffer);
oldBuffer = new byte[0];
setLineReadPos(lineReadPos + lineResult.line.length);
}
break;
}
}
for (int i = bufferPos; i < buffer.length; i++) {
if (buffer[i] == BYTE_NL) {
int oldLen = oldBuffer.length;
// Don't copy last byte(NEW_LINE)
int lineLen = i - bufferPos;
// For windows, check for CR
if (i > 0 && buffer[i - 1] == BYTE_CR) {
lineLen -= 1;
} else if (oldBuffer.length > 0 && oldBuffer[oldBuffer.length - 1] == BYTE_CR) {
oldLen -= 1;
}
lineResult = new LineResult(true,
concatByteArrays(oldBuffer, 0, oldLen, buffer, bufferPos, lineLen));
setLineReadPos(lineReadPos + (oldBuffer.length + (i - bufferPos + 1)));
oldBuffer = new byte[0];
if (i + 1 < buffer.length) {
bufferPos = i + 1;
} else {
bufferPos = NEED_READING;
}
break;
}
}
if (lineResult != null) {
break;
}
// NEW_LINE not showed up at the end of the buffer
oldBuffer = concatByteArrays(oldBuffer, 0, oldBuffer.length,
buffer, bufferPos, buffer.length - bufferPos);
bufferPos = NEED_READING;
}
return lineResult;
}
public void close() {
try {
raf.close();
raf = null;
long now = System.currentTimeMillis();
setLastUpdated(now);
} catch (IOException e) {
logger.error("Failed closing file: " + path + ", inode: " + inode, e);
}
}
private class LineResult {
final boolean lineSepInclude;
final byte[] line;
public LineResult(boolean lineSepInclude, byte[] line) {
super();
this.lineSepInclude = lineSepInclude;
this.line = line;
}
}
}
| 9,626 |
0 | Create_ds/flume/flume-ng-sources/flume-taildir-source/src/main/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-taildir-source/src/main/java/org/apache/flume/source/taildir/TaildirMatcher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.source.taildir;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.nio.file.DirectoryStream;
import java.nio.file.FileSystem;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.PathMatcher;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.TimeUnit;
/**
* Identifies and caches the files matched by single file pattern for {@code TAILDIR} source.
* <p></p>
* Since file patterns only apply to the fileNames and not the parent dictionaries, this
* implementation checks the parent directory for modification (additional or removed files
* update modification time of parent dir)
* If no modification happened to the parent dir that means the underlying files could only be
* written to but no need to rerun the pattern matching on fileNames.
* <p></p>
* This implementation provides lazy caching or no caching. Instances of this class keep the
* result file list from the last successful execution of {@linkplain #getMatchingFiles()}
* function invocation, and may serve the content without hitting the FileSystem for performance
* optimization.
* <p></p>
* <b>IMPORTANT:</b> It is assumed that the hosting system provides at least second granularity
* for both {@code System.currentTimeMillis()} and {@code File.lastModified()}. Also
* that system clock is used for file system timestamps. If it is not the case then configure it
* as uncached. Class is solely for package only usage. Member functions are not thread safe.
*
* @see TaildirSource
* @see ReliableTaildirEventReader
* @see TaildirSourceConfigurationConstants
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class TaildirMatcher {
private static final Logger logger = LoggerFactory.getLogger(TaildirMatcher.class);
private static final FileSystem FS = FileSystems.getDefault();
// flag from configuration to switch off caching completely
private final boolean cachePatternMatching;
// id from configuration
private final String fileGroup;
// plain string of the desired files from configuration
private final String filePattern;
// directory monitored for changes
private final File parentDir;
// cached instance for filtering files based on filePattern
private final DirectoryStream.Filter<Path> fileFilter;
// system time in milliseconds, stores the last modification time of the
// parent directory seen by the last check, rounded to seconds
// initial value is used in first check only when it will be replaced instantly
// (system time is positive)
private long lastSeenParentDirMTime = -1;
// system time in milliseconds, time of the last check, rounded to seconds
// initial value is used in first check only when it will be replaced instantly
// (system time is positive)
private long lastCheckedTime = -1;
// cached content, files which matched the pattern within the parent directory
private List<File> lastMatchedFiles = Lists.newArrayList();
/**
* Package accessible constructor. From configuration context it represents a single
* <code>filegroup</code> and encapsulates the corresponding <code>filePattern</code>.
* <code>filePattern</code> consists of two parts: first part has to be a valid path to an
* existing parent directory, second part has to be a valid regex
* {@link java.util.regex.Pattern} that match any non-hidden file names within parent directory
* . A valid example for filePattern is <code>/dir0/dir1/.*</code> given
* <code>/dir0/dir1</code> is an existing directory structure readable by the running user.
* <p></p>
* An instance of this class is created for each fileGroup
*
* @param fileGroup arbitrary name of the group given by the config
* @param filePattern parent directory plus regex pattern. No wildcards are allowed in directory
* name
* @param cachePatternMatching default true, recommended in every setup especially with huge
* parent directories. Don't set when local system clock is not used
* for stamping mtime (eg: remote filesystems)
* @see TaildirSourceConfigurationConstants
*/
TaildirMatcher(String fileGroup, String filePattern, boolean cachePatternMatching) {
// store whatever came from configuration
this.fileGroup = fileGroup;
this.filePattern = filePattern;
this.cachePatternMatching = cachePatternMatching;
// calculate final members
File f = new File(filePattern);
this.parentDir = f.getParentFile();
String regex = f.getName();
final PathMatcher matcher = FS.getPathMatcher("regex:" + regex);
this.fileFilter = new DirectoryStream.Filter<Path>() {
@Override
public boolean accept(Path entry) throws IOException {
return matcher.matches(entry.getFileName()) && !Files.isDirectory(entry);
}
};
// sanity check
Preconditions.checkState(parentDir.exists(),
"Directory does not exist: " + parentDir.getAbsolutePath());
}
/**
* Lists those files within the parentDir that match regex pattern passed in during object
* instantiation. Designed for frequent periodic invocation
* {@link org.apache.flume.source.PollableSourceRunner}.
* <p></p>
* Based on the modification of the parentDir this function may trigger cache recalculation by
* calling {@linkplain #getMatchingFilesNoCache()} or
* return the value stored in {@linkplain #lastMatchedFiles}.
* Parentdir is allowed to be a symbolic link.
* <p></p>
* Files returned by this call are weakly consistent (see {@link DirectoryStream}).
* It does not freeze the directory while iterating,
* so it may (or may not) reflect updates to the directory that occur during the call,
* In which case next call
* will return those files (as mtime is increasing it won't hit cache but trigger recalculation).
* It is guaranteed that invocation reflects every change which was observable at the time of
* invocation.
* <p></p>
* Matching file list recalculation is triggered when caching was turned off or
* if mtime is greater than the previously seen mtime
* (including the case of cache hasn't been calculated before).
* Additionally if a constantly updated directory was configured as parentDir
* then multiple changes to the parentDir may happen
* within the same second so in such case (assuming at least second granularity of reported mtime)
* it is impossible to tell whether a change of the dir happened before the check or after
* (unless the check happened after that second).
* Having said that implementation also stores system time of the previous invocation and previous
* invocation has to happen strictly after the current mtime to avoid further cache refresh
* (because then it is guaranteed that previous invocation resulted in valid cache content).
* If system clock hasn't passed the second of
* the current mtime then logic expects more changes as well
* (since it cannot be sure that there won't be any further changes still in that second
* and it would like to avoid data loss in first place)
* hence it recalculates matching files. If system clock finally
* passed actual mtime then a subsequent invocation guarantees that it picked up every
* change from the passed second so
* any further invocations can be served from cache associated with that second
* (given mtime is not updated again).
*
* @return List of files matching the pattern sorted by last modification time. No recursion.
* No directories. If nothing matches then returns an empty list. If I/O issue occurred then
* returns the list collected to the point when exception was thrown.
*
* @see #getMatchingFilesNoCache()
*/
List<File> getMatchingFiles() {
long now = TimeUnit.SECONDS.toMillis(
TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis()));
long currentParentDirMTime = parentDir.lastModified();
List<File> result;
// calculate matched files if
// - we don't want to use cache (recalculate every time) OR
// - directory was clearly updated after the last check OR
// - last mtime change wasn't already checked for sure
// (system clock hasn't passed that second yet)
if (!cachePatternMatching ||
lastSeenParentDirMTime < currentParentDirMTime ||
!(currentParentDirMTime < lastCheckedTime)) {
lastMatchedFiles = sortByLastModifiedTime(getMatchingFilesNoCache());
lastSeenParentDirMTime = currentParentDirMTime;
lastCheckedTime = now;
}
return lastMatchedFiles;
}
/**
* Provides the actual files within the parentDir which
* files are matching the regex pattern. Each invocation uses {@link DirectoryStream}
* to identify matching files.
*
* Files returned by this call are weakly consistent (see {@link DirectoryStream}).
* It does not freeze the directory while iterating, so it may (or may not) reflect updates
* to the directory that occur during the call. In which case next call will return those files.
*
* @return List of files matching the pattern unsorted. No recursion. No directories.
* If nothing matches then returns an empty list. If I/O issue occurred then returns the list
* collected to the point when exception was thrown.
*
* @see DirectoryStream
* @see DirectoryStream.Filter
*/
private List<File> getMatchingFilesNoCache() {
List<File> result = Lists.newArrayList();
try (DirectoryStream<Path> stream = Files.newDirectoryStream(parentDir.toPath(), fileFilter)) {
for (Path entry : stream) {
result.add(entry.toFile());
}
} catch (IOException e) {
logger.error("I/O exception occurred while listing parent directory. " +
"Files already matched will be returned. " + parentDir.toPath(), e);
}
return result;
}
/**
* Utility function to sort matched files based on last modification time.
* Sorting itself use only a snapshot of last modification times captured before the sorting
* to keep the number of stat system calls to the required minimum.
*
* @param files list of files in any order
* @return sorted list
*/
private static List<File> sortByLastModifiedTime(List<File> files) {
final HashMap<File, Long> lastModificationTimes = new HashMap<File, Long>(files.size());
for (File f: files) {
lastModificationTimes.put(f, f.lastModified());
}
Collections.sort(files, new Comparator<File>() {
@Override
public int compare(File o1, File o2) {
return lastModificationTimes.get(o1).compareTo(lastModificationTimes.get(o2));
}
});
return files;
}
@Override
public String toString() {
return "{" +
"filegroup='" + fileGroup + '\'' +
", filePattern='" + filePattern + '\'' +
", cached=" + cachePatternMatching +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TaildirMatcher that = (TaildirMatcher) o;
return fileGroup.equals(that.fileGroup);
}
@Override
public int hashCode() {
return fileGroup.hashCode();
}
public String getFileGroup() {
return fileGroup;
}
}
| 9,627 |
0 | Create_ds/flume/flume-ng-sources/flume-taildir-source/src/main/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-taildir-source/src/main/java/org/apache/flume/source/taildir/ReliableTaildirEventReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.source.taildir;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Table;
import com.google.gson.stream.JsonReader;
import org.apache.flume.Event;
import org.apache.flume.FlumeException;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.apache.flume.client.avro.ReliableEventReader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class ReliableTaildirEventReader implements ReliableEventReader {
private static final Logger logger = LoggerFactory.getLogger(ReliableTaildirEventReader.class);
private final List<TaildirMatcher> taildirCache;
private final Table<String, String, String> headerTable;
private TailFile currentFile = null;
private Map<Long, TailFile> tailFiles = Maps.newHashMap();
private long updateTime;
private boolean addByteOffset;
private boolean cachePatternMatching;
private boolean committed = true;
private final boolean annotateFileName;
private final String fileNameHeader;
/**
* Create a ReliableTaildirEventReader to watch the given directory.
*/
private ReliableTaildirEventReader(Map<String, String> filePaths,
Table<String, String, String> headerTable, String positionFilePath,
boolean skipToEnd, boolean addByteOffset, boolean cachePatternMatching,
boolean annotateFileName, String fileNameHeader) throws IOException {
// Sanity checks
Preconditions.checkNotNull(filePaths);
Preconditions.checkNotNull(positionFilePath);
if (logger.isDebugEnabled()) {
logger.debug("Initializing {} with directory={}",
new Object[] { ReliableTaildirEventReader.class.getSimpleName(), filePaths });
}
List<TaildirMatcher> taildirCache = Lists.newArrayList();
for (Entry<String, String> e : filePaths.entrySet()) {
taildirCache.add(new TaildirMatcher(e.getKey(), e.getValue(), cachePatternMatching));
}
logger.info("taildirCache: " + taildirCache.toString());
logger.info("headerTable: " + headerTable.toString());
this.taildirCache = taildirCache;
this.headerTable = headerTable;
this.addByteOffset = addByteOffset;
this.cachePatternMatching = cachePatternMatching;
this.annotateFileName = annotateFileName;
this.fileNameHeader = fileNameHeader;
updateTailFiles(skipToEnd);
logger.info("Updating position from position file: " + positionFilePath);
loadPositionFile(positionFilePath);
}
/**
* Load a position file which has the last read position of each file.
* If the position file exists, update tailFiles mapping.
*/
public void loadPositionFile(String filePath) {
Long inode, pos;
String path;
FileReader fr = null;
JsonReader jr = null;
try {
fr = new FileReader(filePath);
jr = new JsonReader(fr);
jr.beginArray();
while (jr.hasNext()) {
inode = null;
pos = null;
path = null;
jr.beginObject();
while (jr.hasNext()) {
switch (jr.nextName()) {
case "inode":
inode = jr.nextLong();
break;
case "pos":
pos = jr.nextLong();
break;
case "file":
path = jr.nextString();
break;
}
}
jr.endObject();
for (Object v : Arrays.asList(inode, pos, path)) {
Preconditions.checkNotNull(v, "Detected missing value in position file. "
+ "inode: " + inode + ", pos: " + pos + ", path: " + path);
}
TailFile tf = tailFiles.get(inode);
if (tf != null && tf.updatePos(path, inode, pos)) {
tailFiles.put(inode, tf);
} else {
logger.info("Missing file: " + path + ", inode: " + inode + ", pos: " + pos);
}
}
jr.endArray();
} catch (FileNotFoundException e) {
logger.info("File not found: " + filePath + ", not updating position");
} catch (IOException e) {
logger.error("Failed loading positionFile: " + filePath, e);
} finally {
try {
if (fr != null) fr.close();
if (jr != null) jr.close();
} catch (IOException e) {
logger.error("Error: " + e.getMessage(), e);
}
}
}
public Map<Long, TailFile> getTailFiles() {
return tailFiles;
}
public void setCurrentFile(TailFile currentFile) {
this.currentFile = currentFile;
}
@Override
public Event readEvent() throws IOException {
List<Event> events = readEvents(1);
if (events.isEmpty()) {
return null;
}
return events.get(0);
}
@Override
public List<Event> readEvents(int numEvents) throws IOException {
return readEvents(numEvents, false);
}
@VisibleForTesting
public List<Event> readEvents(TailFile tf, int numEvents) throws IOException {
setCurrentFile(tf);
return readEvents(numEvents, true);
}
public List<Event> readEvents(int numEvents, boolean backoffWithoutNL)
throws IOException {
if (!committed) {
if (currentFile == null) {
throw new IllegalStateException("current file does not exist. " + currentFile.getPath());
}
logger.info("Last read was never committed - resetting position");
long lastPos = currentFile.getPos();
currentFile.updateFilePos(lastPos);
}
List<Event> events = currentFile.readEvents(numEvents, backoffWithoutNL, addByteOffset);
if (events.isEmpty()) {
return events;
}
Map<String, String> headers = currentFile.getHeaders();
if (annotateFileName || (headers != null && !headers.isEmpty())) {
for (Event event : events) {
if (headers != null && !headers.isEmpty()) {
event.getHeaders().putAll(headers);
}
if (annotateFileName) {
event.getHeaders().put(fileNameHeader, currentFile.getPath());
}
}
}
committed = false;
return events;
}
@Override
public void close() throws IOException {
for (TailFile tf : tailFiles.values()) {
if (tf.getRaf() != null) tf.getRaf().close();
}
}
/** Commit the last lines which were read. */
@Override
public void commit() throws IOException {
if (!committed && currentFile != null) {
long pos = currentFile.getLineReadPos();
currentFile.setPos(pos);
currentFile.setLastUpdated(updateTime);
committed = true;
}
}
/**
* Update tailFiles mapping if a new file is created or appends are detected
* to the existing file.
*/
public List<Long> updateTailFiles(boolean skipToEnd) throws IOException {
updateTime = System.currentTimeMillis();
List<Long> updatedInodes = Lists.newArrayList();
for (TaildirMatcher taildir : taildirCache) {
Map<String, String> headers = headerTable.row(taildir.getFileGroup());
for (File f : taildir.getMatchingFiles()) {
long inode;
try {
inode = getInode(f);
} catch (NoSuchFileException e) {
logger.info("File has been deleted in the meantime: " + e.getMessage());
continue;
}
TailFile tf = tailFiles.get(inode);
if (tf == null || !tf.getPath().equals(f.getAbsolutePath())) {
long startPos = skipToEnd ? f.length() : 0;
tf = openFile(f, headers, inode, startPos);
} else {
boolean updated = tf.getLastUpdated() < f.lastModified() || tf.getPos() != f.length();
if (updated) {
if (tf.getRaf() == null) {
tf = openFile(f, headers, inode, tf.getPos());
}
if (f.length() < tf.getPos()) {
logger.info("Pos " + tf.getPos() + " is larger than file size! "
+ "Restarting from pos 0, file: " + tf.getPath() + ", inode: " + inode);
tf.updatePos(tf.getPath(), inode, 0);
}
}
tf.setNeedTail(updated);
}
tailFiles.put(inode, tf);
updatedInodes.add(inode);
}
}
return updatedInodes;
}
public List<Long> updateTailFiles() throws IOException {
return updateTailFiles(false);
}
private long getInode(File file) throws IOException {
long inode = (long) Files.getAttribute(file.toPath(), "unix:ino");
return inode;
}
private TailFile openFile(File file, Map<String, String> headers, long inode, long pos) {
try {
logger.info("Opening file: " + file + ", inode: " + inode + ", pos: " + pos);
return new TailFile(file, headers, inode, pos);
} catch (IOException e) {
throw new FlumeException("Failed opening file: " + file, e);
}
}
/**
* Special builder class for ReliableTaildirEventReader
*/
public static class Builder {
private Map<String, String> filePaths;
private Table<String, String, String> headerTable;
private String positionFilePath;
private boolean skipToEnd;
private boolean addByteOffset;
private boolean cachePatternMatching;
private Boolean annotateFileName =
TaildirSourceConfigurationConstants.DEFAULT_FILE_HEADER;
private String fileNameHeader =
TaildirSourceConfigurationConstants.DEFAULT_FILENAME_HEADER_KEY;
public Builder filePaths(Map<String, String> filePaths) {
this.filePaths = filePaths;
return this;
}
public Builder headerTable(Table<String, String, String> headerTable) {
this.headerTable = headerTable;
return this;
}
public Builder positionFilePath(String positionFilePath) {
this.positionFilePath = positionFilePath;
return this;
}
public Builder skipToEnd(boolean skipToEnd) {
this.skipToEnd = skipToEnd;
return this;
}
public Builder addByteOffset(boolean addByteOffset) {
this.addByteOffset = addByteOffset;
return this;
}
public Builder cachePatternMatching(boolean cachePatternMatching) {
this.cachePatternMatching = cachePatternMatching;
return this;
}
public Builder annotateFileName(boolean annotateFileName) {
this.annotateFileName = annotateFileName;
return this;
}
public Builder fileNameHeader(String fileNameHeader) {
this.fileNameHeader = fileNameHeader;
return this;
}
public ReliableTaildirEventReader build() throws IOException {
return new ReliableTaildirEventReader(filePaths, headerTable, positionFilePath, skipToEnd,
addByteOffset, cachePatternMatching,
annotateFileName, fileNameHeader);
}
}
}
| 9,628 |
0 | Create_ds/flume/flume-ng-sources/flume-taildir-source/src/main/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-taildir-source/src/main/java/org/apache/flume/source/taildir/TaildirSourceConfigurationConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.flume.source.taildir;
public class TaildirSourceConfigurationConstants {
/** Mapping for tailing file groups. */
public static final String FILE_GROUPS = "filegroups";
public static final String FILE_GROUPS_PREFIX = FILE_GROUPS + ".";
/** Mapping for putting headers to events grouped by file groups. */
public static final String HEADERS_PREFIX = "headers.";
/** Path of position file. */
public static final String POSITION_FILE = "positionFile";
public static final String DEFAULT_POSITION_FILE = "/.flume/taildir_position.json";
/** What size to batch with before sending to ChannelProcessor. */
public static final String BATCH_SIZE = "batchSize";
public static final int DEFAULT_BATCH_SIZE = 100;
/** Whether to skip the position to EOF in the case of files not written on the position file. */
public static final String SKIP_TO_END = "skipToEnd";
public static final boolean DEFAULT_SKIP_TO_END = false;
/** Time (ms) to close idle files. */
public static final String IDLE_TIMEOUT = "idleTimeout";
public static final int DEFAULT_IDLE_TIMEOUT = 120000;
/** Interval time (ms) to write the last position of each file on the position file. */
public static final String WRITE_POS_INTERVAL = "writePosInterval";
public static final int DEFAULT_WRITE_POS_INTERVAL = 3000;
/** Whether to add the byte offset of a tailed line to the header */
public static final String BYTE_OFFSET_HEADER = "byteOffsetHeader";
public static final String BYTE_OFFSET_HEADER_KEY = "byteoffset";
public static final boolean DEFAULT_BYTE_OFFSET_HEADER = false;
/** Whether to cache the list of files matching the specified file patterns till parent directory
* is modified.
*/
public static final String CACHE_PATTERN_MATCHING = "cachePatternMatching";
public static final boolean DEFAULT_CACHE_PATTERN_MATCHING = true;
/** Header in which to put absolute path filename. */
public static final String FILENAME_HEADER_KEY = "fileHeaderKey";
public static final String DEFAULT_FILENAME_HEADER_KEY = "file";
/** Whether to include absolute path filename in a header. */
public static final String FILENAME_HEADER = "fileHeader";
public static final boolean DEFAULT_FILE_HEADER = false;
/** The max number of batch reads from a file in one loop */
public static final String MAX_BATCH_COUNT = "maxBatchCount";
public static final Long DEFAULT_MAX_BATCH_COUNT = Long.MAX_VALUE;
}
| 9,629 |
0 | Create_ds/flume/flume-ng-sources/flume-taildir-source/src/main/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-taildir-source/src/main/java/org/apache/flume/source/taildir/TaildirSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.flume.source.taildir;
import static org.apache.flume.source.taildir.TaildirSourceConfigurationConstants.*;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import org.apache.flume.ChannelException;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.FlumeException;
import org.apache.flume.PollableSource;
import org.apache.flume.conf.BatchSizeSupported;
import org.apache.flume.conf.Configurable;
import org.apache.flume.instrumentation.SourceCounter;
import org.apache.flume.source.AbstractSource;
import org.apache.flume.source.PollableSourceConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.HashBasedTable;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Table;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.google.gson.Gson;
public class TaildirSource extends AbstractSource implements
PollableSource, Configurable, BatchSizeSupported {
private static final Logger logger = LoggerFactory.getLogger(TaildirSource.class);
private Map<String, String> filePaths;
private Table<String, String, String> headerTable;
private int batchSize;
private String positionFilePath;
private boolean skipToEnd;
private boolean byteOffsetHeader;
private SourceCounter sourceCounter;
private ReliableTaildirEventReader reader;
private ScheduledExecutorService idleFileChecker;
private ScheduledExecutorService positionWriter;
private int retryInterval = 1000;
private int maxRetryInterval = 5000;
private int idleTimeout;
private int checkIdleInterval = 5000;
private int writePosInitDelay = 5000;
private int writePosInterval;
private boolean cachePatternMatching;
private List<Long> existingInodes = new CopyOnWriteArrayList<Long>();
private List<Long> idleInodes = new CopyOnWriteArrayList<Long>();
private Long backoffSleepIncrement;
private Long maxBackOffSleepInterval;
private boolean fileHeader;
private String fileHeaderKey;
private Long maxBatchCount;
@Override
public synchronized void start() {
logger.info("{} TaildirSource source starting with directory: {}", getName(), filePaths);
try {
reader = new ReliableTaildirEventReader.Builder()
.filePaths(filePaths)
.headerTable(headerTable)
.positionFilePath(positionFilePath)
.skipToEnd(skipToEnd)
.addByteOffset(byteOffsetHeader)
.cachePatternMatching(cachePatternMatching)
.annotateFileName(fileHeader)
.fileNameHeader(fileHeaderKey)
.build();
} catch (IOException e) {
throw new FlumeException("Error instantiating ReliableTaildirEventReader", e);
}
idleFileChecker = Executors.newSingleThreadScheduledExecutor(
new ThreadFactoryBuilder().setNameFormat("idleFileChecker").build());
idleFileChecker.scheduleWithFixedDelay(new idleFileCheckerRunnable(),
idleTimeout, checkIdleInterval, TimeUnit.MILLISECONDS);
positionWriter = Executors.newSingleThreadScheduledExecutor(
new ThreadFactoryBuilder().setNameFormat("positionWriter").build());
positionWriter.scheduleWithFixedDelay(new PositionWriterRunnable(),
writePosInitDelay, writePosInterval, TimeUnit.MILLISECONDS);
super.start();
logger.debug("TaildirSource started");
sourceCounter.start();
}
@Override
public synchronized void stop() {
try {
super.stop();
ExecutorService[] services = {idleFileChecker, positionWriter};
for (ExecutorService service : services) {
service.shutdown();
if (!service.awaitTermination(1, TimeUnit.SECONDS)) {
service.shutdownNow();
}
}
// write the last position
writePosition();
reader.close();
} catch (InterruptedException e) {
logger.info("Interrupted while awaiting termination", e);
} catch (IOException e) {
logger.info("Failed: " + e.getMessage(), e);
}
sourceCounter.stop();
logger.info("Taildir source {} stopped. Metrics: {}", getName(), sourceCounter);
}
@Override
public String toString() {
return String.format("Taildir source: { positionFile: %s, skipToEnd: %s, "
+ "byteOffsetHeader: %s, idleTimeout: %s, writePosInterval: %s }",
positionFilePath, skipToEnd, byteOffsetHeader, idleTimeout, writePosInterval);
}
@Override
public synchronized void configure(Context context) {
String fileGroups = context.getString(FILE_GROUPS);
Preconditions.checkState(fileGroups != null, "Missing param: " + FILE_GROUPS);
filePaths = selectByKeys(context.getSubProperties(FILE_GROUPS_PREFIX),
fileGroups.split("\\s+"));
Preconditions.checkState(!filePaths.isEmpty(),
"Mapping for tailing files is empty or invalid: '" + FILE_GROUPS_PREFIX + "'");
String homePath = System.getProperty("user.home").replace('\\', '/');
positionFilePath = context.getString(POSITION_FILE, homePath + DEFAULT_POSITION_FILE);
Path positionFile = Paths.get(positionFilePath);
try {
Files.createDirectories(positionFile.getParent());
} catch (IOException e) {
throw new FlumeException("Error creating positionFile parent directories", e);
}
headerTable = getTable(context, HEADERS_PREFIX);
batchSize = context.getInteger(BATCH_SIZE, DEFAULT_BATCH_SIZE);
skipToEnd = context.getBoolean(SKIP_TO_END, DEFAULT_SKIP_TO_END);
byteOffsetHeader = context.getBoolean(BYTE_OFFSET_HEADER, DEFAULT_BYTE_OFFSET_HEADER);
idleTimeout = context.getInteger(IDLE_TIMEOUT, DEFAULT_IDLE_TIMEOUT);
writePosInterval = context.getInteger(WRITE_POS_INTERVAL, DEFAULT_WRITE_POS_INTERVAL);
cachePatternMatching = context.getBoolean(CACHE_PATTERN_MATCHING,
DEFAULT_CACHE_PATTERN_MATCHING);
backoffSleepIncrement = context.getLong(PollableSourceConstants.BACKOFF_SLEEP_INCREMENT,
PollableSourceConstants.DEFAULT_BACKOFF_SLEEP_INCREMENT);
maxBackOffSleepInterval = context.getLong(PollableSourceConstants.MAX_BACKOFF_SLEEP,
PollableSourceConstants.DEFAULT_MAX_BACKOFF_SLEEP);
fileHeader = context.getBoolean(FILENAME_HEADER,
DEFAULT_FILE_HEADER);
fileHeaderKey = context.getString(FILENAME_HEADER_KEY,
DEFAULT_FILENAME_HEADER_KEY);
maxBatchCount = context.getLong(MAX_BATCH_COUNT, DEFAULT_MAX_BATCH_COUNT);
if (maxBatchCount <= 0) {
maxBatchCount = DEFAULT_MAX_BATCH_COUNT;
logger.warn("Invalid maxBatchCount specified, initializing source "
+ "default maxBatchCount of {}", maxBatchCount);
}
if (sourceCounter == null) {
sourceCounter = new SourceCounter(getName());
}
}
@Override
public long getBatchSize() {
return batchSize;
}
private Map<String, String> selectByKeys(Map<String, String> map, String[] keys) {
Map<String, String> result = Maps.newHashMap();
for (String key : keys) {
if (map.containsKey(key)) {
result.put(key, map.get(key));
}
}
return result;
}
private Table<String, String, String> getTable(Context context, String prefix) {
Table<String, String, String> table = HashBasedTable.create();
for (Entry<String, String> e : context.getSubProperties(prefix).entrySet()) {
String[] parts = e.getKey().split("\\.", 2);
table.put(parts[0], parts[1], e.getValue());
}
return table;
}
@VisibleForTesting
protected SourceCounter getSourceCounter() {
return sourceCounter;
}
@Override
public Status process() {
Status status = Status.BACKOFF;
try {
existingInodes.clear();
existingInodes.addAll(reader.updateTailFiles());
for (long inode : existingInodes) {
TailFile tf = reader.getTailFiles().get(inode);
if (tf.needTail()) {
boolean hasMoreLines = tailFileProcess(tf, true);
if (hasMoreLines) {
status = Status.READY;
}
}
}
closeTailFiles();
} catch (Throwable t) {
logger.error("Unable to tail files", t);
sourceCounter.incrementEventReadFail();
status = Status.BACKOFF;
}
return status;
}
@Override
public long getBackOffSleepIncrement() {
return backoffSleepIncrement;
}
@Override
public long getMaxBackOffSleepInterval() {
return maxBackOffSleepInterval;
}
private boolean tailFileProcess(TailFile tf, boolean backoffWithoutNL)
throws IOException, InterruptedException {
long batchCount = 0;
while (true) {
reader.setCurrentFile(tf);
List<Event> events = reader.readEvents(batchSize, backoffWithoutNL);
if (events.isEmpty()) {
return false;
}
sourceCounter.addToEventReceivedCount(events.size());
sourceCounter.incrementAppendBatchReceivedCount();
try {
getChannelProcessor().processEventBatch(events);
reader.commit();
} catch (ChannelException ex) {
logger.warn("The channel is full or unexpected failure. " +
"The source will try again after " + retryInterval + " ms");
sourceCounter.incrementChannelWriteFail();
TimeUnit.MILLISECONDS.sleep(retryInterval);
retryInterval = retryInterval << 1;
retryInterval = Math.min(retryInterval, maxRetryInterval);
continue;
}
retryInterval = 1000;
sourceCounter.addToEventAcceptedCount(events.size());
sourceCounter.incrementAppendBatchAcceptedCount();
if (events.size() < batchSize) {
logger.debug("The events taken from " + tf.getPath() + " is less than " + batchSize);
return false;
}
if (++batchCount >= maxBatchCount) {
logger.debug("The batches read from the same file is larger than " + maxBatchCount );
return true;
}
}
}
private void closeTailFiles() throws IOException, InterruptedException {
for (long inode : idleInodes) {
TailFile tf = reader.getTailFiles().get(inode);
if (tf.getRaf() != null) { // when file has not closed yet
tailFileProcess(tf, false);
tf.close();
logger.info("Closed file: " + tf.getPath() + ", inode: " + inode + ", pos: " + tf.getPos());
}
}
idleInodes.clear();
}
/**
* Runnable class that checks whether there are files which should be closed.
*/
private class idleFileCheckerRunnable implements Runnable {
@Override
public void run() {
try {
long now = System.currentTimeMillis();
for (TailFile tf : reader.getTailFiles().values()) {
if (tf.getLastUpdated() + idleTimeout < now && tf.getRaf() != null) {
idleInodes.add(tf.getInode());
}
}
} catch (Throwable t) {
logger.error("Uncaught exception in IdleFileChecker thread", t);
sourceCounter.incrementGenericProcessingFail();
}
}
}
/**
* Runnable class that writes a position file which has the last read position
* of each file.
*/
private class PositionWriterRunnable implements Runnable {
@Override
public void run() {
writePosition();
}
}
private void writePosition() {
File file = new File(positionFilePath);
FileWriter writer = null;
try {
writer = new FileWriter(file);
if (!existingInodes.isEmpty()) {
String json = toPosInfoJson();
writer.write(json);
}
} catch (Throwable t) {
logger.error("Failed writing positionFile", t);
sourceCounter.incrementGenericProcessingFail();
} finally {
try {
if (writer != null) writer.close();
} catch (IOException e) {
logger.error("Error: " + e.getMessage(), e);
sourceCounter.incrementGenericProcessingFail();
}
}
}
private String toPosInfoJson() {
@SuppressWarnings("rawtypes")
List<Map> posInfos = Lists.newArrayList();
for (Long inode : existingInodes) {
TailFile tf = reader.getTailFiles().get(inode);
posInfos.add(ImmutableMap.of("inode", inode, "pos", tf.getPos(), "file", tf.getPath()));
}
return new Gson().toJson(posInfos);
}
}
| 9,630 |
0 | Create_ds/flume/flume-ng-sources/flume-scribe-source/src/test/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-scribe-source/src/test/java/org/apache/flume/source/scribe/TestScribeSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.source.scribe;
import junit.framework.Assert;
import org.apache.flume.*;
import org.apache.flume.channel.ChannelProcessor;
import org.apache.flume.channel.MemoryChannel;
import org.apache.flume.channel.ReplicatingChannelSelector;
import org.apache.flume.conf.Configurables;
import org.apache.flume.instrumentation.SourceCounter;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.protocol.TProtocol;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.layered.TFramedTransport;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
import java.io.IOException;
import java.net.ServerSocket;
import java.util.ArrayList;
import java.util.List;
import static org.mockito.Matchers.anyListOf;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
/**
*
*/
public class TestScribeSource {
private static int port;
private static Channel memoryChannel;
private static ScribeSource scribeSource;
private static int findFreePort() throws IOException {
ServerSocket socket = new ServerSocket(0);
int port = socket.getLocalPort();
socket.close();
return port;
}
@BeforeClass
public static void setUpClass() throws Exception {
port = findFreePort();
Context context = new Context();
context.put("port", String.valueOf(port));
scribeSource = new ScribeSource();
scribeSource.setName("Scribe Source");
Configurables.configure(scribeSource, context);
memoryChannel = new MemoryChannel();
Configurables.configure(memoryChannel, context);
List<Channel> channels = new ArrayList<Channel>(1);
channels.add(memoryChannel);
ChannelSelector rcs = new ReplicatingChannelSelector();
rcs.setChannels(channels);
memoryChannel.start();
scribeSource.setChannelProcessor(new ChannelProcessor(rcs));
scribeSource.start();
}
private void sendSingle() throws org.apache.thrift.TException {
TTransport transport = new TFramedTransport(new TSocket("localhost", port));
TProtocol protocol = new TBinaryProtocol(transport);
Scribe.Client client = new Scribe.Client(protocol);
transport.open();
LogEntry logEntry = new LogEntry("INFO", "Sending info msg to scribe source");
List<LogEntry> logEntries = new ArrayList<LogEntry>(1);
logEntries.add(logEntry);
client.Log(logEntries);
}
@Test
public void testScribeMessage() throws Exception {
sendSingle();
// try to get it from Channels
Transaction tx = memoryChannel.getTransaction();
tx.begin();
Event e = memoryChannel.take();
Assert.assertNotNull(e);
Assert.assertEquals("Sending info msg to scribe source", new String(e.getBody()));
tx.commit();
tx.close();
}
@Test
public void testScribeMultipleMessages() throws Exception {
TTransport transport = new TFramedTransport(new TSocket("localhost", port));
TProtocol protocol = new TBinaryProtocol(transport);
Scribe.Client client = new Scribe.Client(protocol);
transport.open();
List<LogEntry> logEntries = new ArrayList<LogEntry>(10);
for (int i = 0; i < 10; i++) {
LogEntry logEntry = new LogEntry("INFO", String.format("Sending info msg# %d to scribe source", i));
logEntries.add(logEntry);
}
client.Log(logEntries);
// try to get it from Channels
Transaction tx = memoryChannel.getTransaction();
tx.begin();
for (int i = 0; i < 10; i++) {
Event e = memoryChannel.take();
Assert.assertNotNull(e);
Assert.assertEquals(String.format("Sending info msg# %d to scribe source", i), new String(e.getBody()));
}
tx.commit();
tx.close();
}
@Test
public void testErrorCounter() throws Exception {
ChannelProcessor cp = mock(ChannelProcessor.class);
doThrow(new ChannelException("dummy")).when(cp).processEventBatch(anyListOf(Event.class));
ChannelProcessor origCp = scribeSource.getChannelProcessor();
scribeSource.setChannelProcessor(cp);
sendSingle();
scribeSource.setChannelProcessor(origCp);
SourceCounter sc = (SourceCounter) Whitebox.getInternalState(scribeSource, "sourceCounter");
org.junit.Assert.assertEquals(1, sc.getChannelWriteFail());
}
@AfterClass
public static void cleanup() {
memoryChannel.stop();
scribeSource.stop();
}
}
| 9,631 |
0 | Create_ds/flume/flume-ng-sources/flume-scribe-source/src/main/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-scribe-source/src/main/java/org/apache/flume/source/scribe/LogEntry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Autogenerated by Thrift Compiler (0.9.3)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package org.apache.flume.source.scribe;
import org.apache.thrift.scheme.IScheme;
import org.apache.thrift.scheme.SchemeFactory;
import org.apache.thrift.scheme.StandardScheme;
import org.apache.thrift.scheme.TupleScheme;
import org.apache.thrift.protocol.TTupleProtocol;
import org.apache.thrift.protocol.TProtocolException;
import org.apache.thrift.EncodingUtils;
import org.apache.thrift.TException;
import org.apache.thrift.async.AsyncMethodCallback;
import org.apache.thrift.server.AbstractNonblockingServer.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.util.HashMap;
import java.util.EnumMap;
import java.util.Set;
import java.util.HashSet;
import java.util.EnumSet;
import java.util.Collections;
import java.util.BitSet;
import java.nio.ByteBuffer;
import java.util.Arrays;
import javax.annotation.Generated;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2017-09-01")
public class LogEntry implements org.apache.thrift.TBase<LogEntry, LogEntry._Fields>, java.io.Serializable, Cloneable, Comparable<LogEntry> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LogEntry");
private static final org.apache.thrift.protocol.TField CATEGORY_FIELD_DESC = new org.apache.thrift.protocol.TField("category", org.apache.thrift.protocol.TType.STRING, (short)1);
private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)2);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new LogEntryStandardSchemeFactory());
schemes.put(TupleScheme.class, new LogEntryTupleSchemeFactory());
}
public String category; // required
public String message; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
CATEGORY((short)1, "category"),
MESSAGE((short)2, "message");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // CATEGORY
return CATEGORY;
case 2: // MESSAGE
return MESSAGE;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.CATEGORY, new org.apache.thrift.meta_data.FieldMetaData("category", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(LogEntry.class, metaDataMap);
}
public LogEntry() {
}
public LogEntry(
String category,
String message)
{
this();
this.category = category;
this.message = message;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public LogEntry(LogEntry other) {
if (other.isSetCategory()) {
this.category = other.category;
}
if (other.isSetMessage()) {
this.message = other.message;
}
}
public LogEntry deepCopy() {
return new LogEntry(this);
}
@Override
public void clear() {
this.category = null;
this.message = null;
}
public String getCategory() {
return this.category;
}
public LogEntry setCategory(String category) {
this.category = category;
return this;
}
public void unsetCategory() {
this.category = null;
}
/** Returns true if field category is set (has been assigned a value) and false otherwise */
public boolean isSetCategory() {
return this.category != null;
}
public void setCategoryIsSet(boolean value) {
if (!value) {
this.category = null;
}
}
public String getMessage() {
return this.message;
}
public LogEntry setMessage(String message) {
this.message = message;
return this;
}
public void unsetMessage() {
this.message = null;
}
/** Returns true if field message is set (has been assigned a value) and false otherwise */
public boolean isSetMessage() {
return this.message != null;
}
public void setMessageIsSet(boolean value) {
if (!value) {
this.message = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case CATEGORY:
if (value == null) {
unsetCategory();
} else {
setCategory((String)value);
}
break;
case MESSAGE:
if (value == null) {
unsetMessage();
} else {
setMessage((String)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case CATEGORY:
return getCategory();
case MESSAGE:
return getMessage();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case CATEGORY:
return isSetCategory();
case MESSAGE:
return isSetMessage();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof LogEntry)
return this.equals((LogEntry)that);
return false;
}
public boolean equals(LogEntry that) {
if (that == null)
return false;
boolean this_present_category = true && this.isSetCategory();
boolean that_present_category = true && that.isSetCategory();
if (this_present_category || that_present_category) {
if (!(this_present_category && that_present_category))
return false;
if (!this.category.equals(that.category))
return false;
}
boolean this_present_message = true && this.isSetMessage();
boolean that_present_message = true && that.isSetMessage();
if (this_present_message || that_present_message) {
if (!(this_present_message && that_present_message))
return false;
if (!this.message.equals(that.message))
return false;
}
return true;
}
@Override
public int hashCode() {
List<Object> list = new ArrayList<Object>();
boolean present_category = true && (isSetCategory());
list.add(present_category);
if (present_category)
list.add(category);
boolean present_message = true && (isSetMessage());
list.add(present_message);
if (present_message)
list.add(message);
return list.hashCode();
}
@Override
public int compareTo(LogEntry other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetCategory()).compareTo(other.isSetCategory());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetCategory()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.category, other.category);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetMessage()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("LogEntry(");
boolean first = true;
sb.append("category:");
if (this.category == null) {
sb.append("null");
} else {
sb.append(this.category);
}
first = false;
if (!first) sb.append(", ");
sb.append("message:");
if (this.message == null) {
sb.append("null");
} else {
sb.append(this.message);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class LogEntryStandardSchemeFactory implements SchemeFactory {
public LogEntryStandardScheme getScheme() {
return new LogEntryStandardScheme();
}
}
private static class LogEntryStandardScheme extends StandardScheme<LogEntry> {
public void read(org.apache.thrift.protocol.TProtocol iprot, LogEntry struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // CATEGORY
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.category = iprot.readString();
struct.setCategoryIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 2: // MESSAGE
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.message = iprot.readString();
struct.setMessageIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, LogEntry struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.category != null) {
oprot.writeFieldBegin(CATEGORY_FIELD_DESC);
oprot.writeString(struct.category);
oprot.writeFieldEnd();
}
if (struct.message != null) {
oprot.writeFieldBegin(MESSAGE_FIELD_DESC);
oprot.writeString(struct.message);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class LogEntryTupleSchemeFactory implements SchemeFactory {
public LogEntryTupleScheme getScheme() {
return new LogEntryTupleScheme();
}
}
private static class LogEntryTupleScheme extends TupleScheme<LogEntry> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, LogEntry struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetCategory()) {
optionals.set(0);
}
if (struct.isSetMessage()) {
optionals.set(1);
}
oprot.writeBitSet(optionals, 2);
if (struct.isSetCategory()) {
oprot.writeString(struct.category);
}
if (struct.isSetMessage()) {
oprot.writeString(struct.message);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, LogEntry struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
struct.category = iprot.readString();
struct.setCategoryIsSet(true);
}
if (incoming.get(1)) {
struct.message = iprot.readString();
struct.setMessageIsSet(true);
}
}
}
}
| 9,632 |
0 | Create_ds/flume/flume-ng-sources/flume-scribe-source/src/main/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-scribe-source/src/main/java/org/apache/flume/source/scribe/ResultCode.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Autogenerated by Thrift Compiler (0.9.3)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package org.apache.flume.source.scribe;
import java.util.Map;
import java.util.HashMap;
import org.apache.thrift.TEnum;
public enum ResultCode implements org.apache.thrift.TEnum {
OK(0),
TRY_LATER(1);
private final int value;
private ResultCode(int value) {
this.value = value;
}
/**
* Get the integer value of this enum value, as defined in the Thrift IDL.
*/
public int getValue() {
return value;
}
/**
* Find a the enum type by its integer value, as defined in the Thrift IDL.
* @return null if the value is not found.
*/
public static ResultCode findByValue(int value) {
switch (value) {
case 0:
return OK;
case 1:
return TRY_LATER;
default:
return null;
}
}
}
| 9,633 |
0 | Create_ds/flume/flume-ng-sources/flume-scribe-source/src/main/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-scribe-source/src/main/java/org/apache/flume/source/scribe/Scribe.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Autogenerated by Thrift Compiler (0.9.3)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package org.apache.flume.source.scribe;
import org.apache.thrift.scheme.IScheme;
import org.apache.thrift.scheme.SchemeFactory;
import org.apache.thrift.scheme.StandardScheme;
import org.apache.thrift.scheme.TupleScheme;
import org.apache.thrift.protocol.TTupleProtocol;
import org.apache.thrift.TException;
import org.apache.thrift.async.AsyncMethodCallback;
import org.apache.thrift.server.AbstractNonblockingServer;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.util.HashMap;
import java.util.EnumMap;
import java.util.EnumSet;
import java.util.Collections;
import java.util.BitSet;
import javax.annotation.Generated;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2017-09-01")
public class Scribe {
public interface Iface {
public ResultCode Log(List<LogEntry> messages) throws org.apache.thrift.TException;
}
public interface AsyncIface {
public void Log(List<LogEntry> messages, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
}
public static class Client extends org.apache.thrift.TServiceClient implements Iface {
public static class Factory implements org.apache.thrift.TServiceClientFactory<Client> {
public Factory() {}
public Client getClient(org.apache.thrift.protocol.TProtocol prot) {
return new Client(prot);
}
public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
return new Client(iprot, oprot);
}
}
public Client(org.apache.thrift.protocol.TProtocol prot)
{
super(prot, prot);
}
public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
super(iprot, oprot);
}
public ResultCode Log(List<LogEntry> messages) throws org.apache.thrift.TException
{
send_Log(messages);
return recv_Log();
}
public void send_Log(List<LogEntry> messages) throws org.apache.thrift.TException
{
Log_args args = new Log_args();
args.setMessages(messages);
sendBase("Log", args);
}
public ResultCode recv_Log() throws org.apache.thrift.TException
{
Log_result result = new Log_result();
receiveBase(result, "Log");
if (result.isSetSuccess()) {
return result.success;
}
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "Log failed: unknown result");
}
}
public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface {
public static class Factory implements org.apache.thrift.async.TAsyncClientFactory<AsyncClient> {
private org.apache.thrift.async.TAsyncClientManager clientManager;
private org.apache.thrift.protocol.TProtocolFactory protocolFactory;
public Factory(org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.protocol.TProtocolFactory protocolFactory) {
this.clientManager = clientManager;
this.protocolFactory = protocolFactory;
}
public AsyncClient getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) {
return new AsyncClient(protocolFactory, clientManager, transport);
}
}
public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.transport.TNonblockingTransport transport) {
super(protocolFactory, clientManager, transport);
}
public void Log(List<LogEntry> messages, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
Log_call method_call = new Log_call(messages, resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class Log_call extends org.apache.thrift.async.TAsyncMethodCall {
private List<LogEntry> messages;
public Log_call(List<LogEntry> messages, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
this.messages = messages;
}
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("Log", org.apache.thrift.protocol.TMessageType.CALL, 0));
Log_args args = new Log_args();
args.setMessages(messages);
args.write(prot);
prot.writeMessageEnd();
}
public ResultCode getResult() throws org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
return (new Client(prot)).recv_Log();
}
}
}
public static class Processor<I extends Iface> extends org.apache.thrift.TBaseProcessor<I> implements org.apache.thrift.TProcessor {
private static final Logger LOGGER = LoggerFactory.getLogger(Processor.class.getName());
public Processor(I iface) {
super(iface, getProcessMap(new HashMap<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>>()));
}
protected Processor(I iface, Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> processMap) {
super(iface, getProcessMap(processMap));
}
private static <I extends Iface> Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> getProcessMap(Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> processMap) {
processMap.put("Log", new Log());
return processMap;
}
public static class Log<I extends Iface> extends org.apache.thrift.ProcessFunction<I, Log_args> {
public Log() {
super("Log");
}
public Log_args getEmptyArgsInstance() {
return new Log_args();
}
protected boolean isOneway() {
return false;
}
public Log_result getResult(I iface, Log_args args) throws org.apache.thrift.TException {
Log_result result = new Log_result();
result.success = iface.Log(args.messages);
return result;
}
}
}
public static class AsyncProcessor<I extends AsyncIface> extends org.apache.thrift.TBaseAsyncProcessor<I> {
private static final Logger LOGGER = LoggerFactory.getLogger(AsyncProcessor.class.getName());
public AsyncProcessor(I iface) {
super(iface, getProcessMap(new HashMap<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>>()));
}
protected AsyncProcessor(I iface, Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>> processMap) {
super(iface, getProcessMap(processMap));
}
private static <I extends AsyncIface> Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase,?>> getProcessMap(Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>> processMap) {
processMap.put("Log", new Log());
return processMap;
}
public static class Log<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, Log_args, ResultCode> {
public Log() {
super("Log");
}
public Log_args getEmptyArgsInstance() {
return new Log_args();
}
public AsyncMethodCallback<ResultCode> getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<ResultCode>() {
public void onComplete(ResultCode o) {
Log_result result = new Log_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
Log_result result = new Log_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb,msg,msgType,seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, Log_args args, org.apache.thrift.async.AsyncMethodCallback<ResultCode> resultHandler) throws TException {
iface.Log(args.messages,resultHandler);
}
}
}
public static class Log_args implements org.apache.thrift.TBase<Log_args, Log_args._Fields>, java.io.Serializable, Cloneable, Comparable<Log_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Log_args");
private static final org.apache.thrift.protocol.TField MESSAGES_FIELD_DESC = new org.apache.thrift.protocol.TField("messages", org.apache.thrift.protocol.TType.LIST, (short)1);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new Log_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new Log_argsTupleSchemeFactory());
}
public List<LogEntry> messages; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
MESSAGES((short)1, "messages");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // MESSAGES
return MESSAGES;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.MESSAGES, new org.apache.thrift.meta_data.FieldMetaData("messages", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, LogEntry.class))));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Log_args.class, metaDataMap);
}
public Log_args() {
}
public Log_args(
List<LogEntry> messages)
{
this();
this.messages = messages;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public Log_args(Log_args other) {
if (other.isSetMessages()) {
List<LogEntry> __this__messages = new ArrayList<LogEntry>(other.messages.size());
for (LogEntry other_element : other.messages) {
__this__messages.add(new LogEntry(other_element));
}
this.messages = __this__messages;
}
}
public Log_args deepCopy() {
return new Log_args(this);
}
@Override
public void clear() {
this.messages = null;
}
public int getMessagesSize() {
return (this.messages == null) ? 0 : this.messages.size();
}
public java.util.Iterator<LogEntry> getMessagesIterator() {
return (this.messages == null) ? null : this.messages.iterator();
}
public void addToMessages(LogEntry elem) {
if (this.messages == null) {
this.messages = new ArrayList<LogEntry>();
}
this.messages.add(elem);
}
public List<LogEntry> getMessages() {
return this.messages;
}
public Log_args setMessages(List<LogEntry> messages) {
this.messages = messages;
return this;
}
public void unsetMessages() {
this.messages = null;
}
/** Returns true if field messages is set (has been assigned a value) and false otherwise */
public boolean isSetMessages() {
return this.messages != null;
}
public void setMessagesIsSet(boolean value) {
if (!value) {
this.messages = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case MESSAGES:
if (value == null) {
unsetMessages();
} else {
setMessages((List<LogEntry>)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case MESSAGES:
return getMessages();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case MESSAGES:
return isSetMessages();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof Log_args)
return this.equals((Log_args)that);
return false;
}
public boolean equals(Log_args that) {
if (that == null)
return false;
boolean this_present_messages = true && this.isSetMessages();
boolean that_present_messages = true && that.isSetMessages();
if (this_present_messages || that_present_messages) {
if (!(this_present_messages && that_present_messages))
return false;
if (!this.messages.equals(that.messages))
return false;
}
return true;
}
@Override
public int hashCode() {
List<Object> list = new ArrayList<Object>();
boolean present_messages = true && (isSetMessages());
list.add(present_messages);
if (present_messages)
list.add(messages);
return list.hashCode();
}
@Override
public int compareTo(Log_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetMessages()).compareTo(other.isSetMessages());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetMessages()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.messages, other.messages);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("Log_args(");
boolean first = true;
sb.append("messages:");
if (this.messages == null) {
sb.append("null");
} else {
sb.append(this.messages);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class Log_argsStandardSchemeFactory implements SchemeFactory {
public Log_argsStandardScheme getScheme() {
return new Log_argsStandardScheme();
}
}
private static class Log_argsStandardScheme extends StandardScheme<Log_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, Log_args struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // MESSAGES
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list0 = iprot.readListBegin();
struct.messages = new ArrayList<LogEntry>(_list0.size);
LogEntry _elem1;
for (int _i2 = 0; _i2 < _list0.size; ++_i2)
{
_elem1 = new LogEntry();
_elem1.read(iprot);
struct.messages.add(_elem1);
}
iprot.readListEnd();
}
struct.setMessagesIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, Log_args struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.messages != null) {
oprot.writeFieldBegin(MESSAGES_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.messages.size()));
for (LogEntry _iter3 : struct.messages)
{
_iter3.write(oprot);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class Log_argsTupleSchemeFactory implements SchemeFactory {
public Log_argsTupleScheme getScheme() {
return new Log_argsTupleScheme();
}
}
private static class Log_argsTupleScheme extends TupleScheme<Log_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, Log_args struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetMessages()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetMessages()) {
{
oprot.writeI32(struct.messages.size());
for (LogEntry _iter4 : struct.messages)
{
_iter4.write(oprot);
}
}
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, Log_args struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
{
org.apache.thrift.protocol.TList _list5 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
struct.messages = new ArrayList<LogEntry>(_list5.size);
LogEntry _elem6;
for (int _i7 = 0; _i7 < _list5.size; ++_i7)
{
_elem6 = new LogEntry();
_elem6.read(iprot);
struct.messages.add(_elem6);
}
}
struct.setMessagesIsSet(true);
}
}
}
}
public static class Log_result implements org.apache.thrift.TBase<Log_result, Log_result._Fields>, java.io.Serializable, Cloneable, Comparable<Log_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Log_result");
private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new Log_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new Log_resultTupleSchemeFactory());
}
/**
*
* @see ResultCode
*/
public ResultCode success; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
/**
*
* @see ResultCode
*/
SUCCESS((short)0, "success");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 0: // SUCCESS
return SUCCESS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ResultCode.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Log_result.class, metaDataMap);
}
public Log_result() {
}
public Log_result(
ResultCode success)
{
this();
this.success = success;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public Log_result(Log_result other) {
if (other.isSetSuccess()) {
this.success = other.success;
}
}
public Log_result deepCopy() {
return new Log_result(this);
}
@Override
public void clear() {
this.success = null;
}
/**
*
* @see ResultCode
*/
public ResultCode getSuccess() {
return this.success;
}
/**
*
* @see ResultCode
*/
public Log_result setSuccess(ResultCode success) {
this.success = success;
return this;
}
public void unsetSuccess() {
this.success = null;
}
/** Returns true if field success is set (has been assigned a value) and false otherwise */
public boolean isSetSuccess() {
return this.success != null;
}
public void setSuccessIsSet(boolean value) {
if (!value) {
this.success = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case SUCCESS:
if (value == null) {
unsetSuccess();
} else {
setSuccess((ResultCode)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case SUCCESS:
return getSuccess();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case SUCCESS:
return isSetSuccess();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof Log_result)
return this.equals((Log_result)that);
return false;
}
public boolean equals(Log_result that) {
if (that == null)
return false;
boolean this_present_success = true && this.isSetSuccess();
boolean that_present_success = true && that.isSetSuccess();
if (this_present_success || that_present_success) {
if (!(this_present_success && that_present_success))
return false;
if (!this.success.equals(that.success))
return false;
}
return true;
}
@Override
public int hashCode() {
List<Object> list = new ArrayList<Object>();
boolean present_success = true && (isSetSuccess());
list.add(present_success);
if (present_success)
list.add(success.getValue());
return list.hashCode();
}
@Override
public int compareTo(Log_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSuccess()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("Log_result(");
boolean first = true;
sb.append("success:");
if (this.success == null) {
sb.append("null");
} else {
sb.append(this.success);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class Log_resultStandardSchemeFactory implements SchemeFactory {
public Log_resultStandardScheme getScheme() {
return new Log_resultStandardScheme();
}
}
private static class Log_resultStandardScheme extends StandardScheme<Log_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, Log_result struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.success = org.apache.flume.source.scribe.ResultCode.findByValue(iprot.readI32());
struct.setSuccessIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, Log_result struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.success != null) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
oprot.writeI32(struct.success.getValue());
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class Log_resultTupleSchemeFactory implements SchemeFactory {
public Log_resultTupleScheme getScheme() {
return new Log_resultTupleScheme();
}
}
private static class Log_resultTupleScheme extends TupleScheme<Log_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, Log_result struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetSuccess()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetSuccess()) {
oprot.writeI32(struct.success.getValue());
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, Log_result struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
struct.success = org.apache.flume.source.scribe.ResultCode.findByValue(iprot.readI32());
struct.setSuccessIsSet(true);
}
}
}
}
}
| 9,634 |
0 | Create_ds/flume/flume-ng-sources/flume-scribe-source/src/main/java/org/apache/flume/source | Create_ds/flume/flume-ng-sources/flume-scribe-source/src/main/java/org/apache/flume/source/scribe/ScribeSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.source.scribe;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDrivenSource;
import org.apache.flume.conf.Configurable;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.instrumentation.SourceCounter;
import org.apache.flume.source.AbstractSource;
import org.apache.flume.source.scribe.Scribe.Iface;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.server.THsHaServer;
import org.apache.thrift.server.TServer;
import org.apache.thrift.transport.TNonblockingServerSocket;
import org.apache.thrift.transport.TNonblockingServerTransport;
import org.apache.thrift.transport.layered.TFramedTransport;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Flume should adopt the Scribe entry {@code LogEntry} from existing
* Scribe system. Mostly, we may receive message from local Scribe and Flume
* take responsibility of central Scribe.
*
* <p>
* We use Thrift without deserializing, throughput has 2X increasing
*/
public class ScribeSource extends AbstractSource implements
EventDrivenSource, Configurable {
private static final Logger LOG = LoggerFactory.getLogger(ScribeSource.class);
public static final String SCRIBE_CATEGORY = "category";
private static final int DEFAULT_PORT = 1499;
private static final int DEFAULT_WORKERS = 5;
private static final int DEFAULT_MAX_READ_BUFFER_BYTES = 16384000;
private TServer server;
private int port;
private int workers;
private int maxReadBufferBytes;
private SourceCounter sourceCounter;
@Override
public void configure(Context context) {
port = context.getInteger("port", DEFAULT_PORT);
maxReadBufferBytes = context.getInteger("maxReadBufferBytes", DEFAULT_MAX_READ_BUFFER_BYTES);
if(maxReadBufferBytes <= 0){
maxReadBufferBytes = DEFAULT_MAX_READ_BUFFER_BYTES;
}
workers = context.getInteger("workerThreads", DEFAULT_WORKERS);
if (workers <= 0) {
workers = DEFAULT_WORKERS;
}
if (sourceCounter == null) {
sourceCounter = new SourceCounter(getName());
}
}
private class Startup extends Thread {
public void run() {
try {
Scribe.Processor processor = new Scribe.Processor(new Receiver());
TNonblockingServerTransport transport = new TNonblockingServerSocket(port);
THsHaServer.Args args = new THsHaServer.Args(transport);
args.minWorkerThreads(workers);
args.maxWorkerThreads(workers);
args.processor(processor);
args.transportFactory(new TFramedTransport.Factory(maxReadBufferBytes));
args.protocolFactory(new TBinaryProtocol.Factory(false, false));
args.maxReadBufferBytes = maxReadBufferBytes;
server = new THsHaServer(args);
LOG.info("Starting Scribe Source on port " + port);
server.serve();
} catch (Exception e) {
LOG.warn("Scribe failed", e);
}
}
}
@Override
public void start() {
Startup startupThread = new Startup();
startupThread.start();
try {
Thread.sleep(3000);
} catch (InterruptedException e) {}
if (!server.isServing()) {
throw new IllegalStateException("Failed initialization of ScribeSource");
}
sourceCounter.start();
super.start();
}
@Override
public void stop() {
LOG.info("Scribe source stopping");
if (server != null) {
server.stop();
}
sourceCounter.stop();
super.stop();
LOG.info("Scribe source stopped. Metrics:{}", sourceCounter);
}
class Receiver implements Iface {
public ResultCode Log(List<LogEntry> list) throws TException {
if (list != null) {
sourceCounter.addToEventReceivedCount(list.size());
try {
List<Event> events = new ArrayList<Event>(list.size());
for (LogEntry entry : list) {
Map<String, String> headers = new HashMap<String, String>(1, 1);
String category = entry.getCategory();
if (category != null) {
headers.put(SCRIBE_CATEGORY, category);
}
Event event = EventBuilder.withBody(entry.getMessage().getBytes(), headers);
events.add(event);
}
if (events.size() > 0) {
getChannelProcessor().processEventBatch(events);
}
sourceCounter.addToEventAcceptedCount(list.size());
return ResultCode.OK;
} catch (Exception e) {
LOG.warn("Scribe source handling failure", e);
sourceCounter.incrementEventReadOrChannelFail(e);
}
}
return ResultCode.TRY_LATER;
}
}
}
| 9,635 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/util/SSLUtilTruststoreTypeWithDefaultTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.util;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runners.Parameterized.Parameters;
import java.util.Arrays;
import java.util.Collection;
public class SSLUtilTruststoreTypeWithDefaultTest extends AbstractSSLUtilTest {
@Parameters
public static Collection<?> data() {
return Arrays.asList(new Object[][]{
// system property value, environment variable value, expected value
{ null, null, "default" },
{ "sysprop", null, "sysprop" },
{ null, "envvar", "envvar" },
{ "sysprop", "envvar", "sysprop" }
});
}
public SSLUtilTruststoreTypeWithDefaultTest(String sysPropValue, String envVarValue,
String expectedValue) {
super(sysPropValue, envVarValue, expectedValue);
}
@Override
protected String getSysPropName() {
return "javax.net.ssl.trustStoreType";
}
@Override
protected String getEnvVarName() {
return "FLUME_SSL_TRUSTSTORE_TYPE";
}
@Test
public void testTruststoreType() {
SSLUtil.initGlobalSSLParameters();
String truststoreType = SSLUtil.getGlobalTruststoreType("default");
Assert.assertEquals(expectedValue, truststoreType);
}
}
| 9,636 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/util/SSLUtilExcludeCipherSuitesTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.util;
import org.junit.Assert;
import org.junit.Test;
public class SSLUtilExcludeCipherSuitesTest extends AbstractSSLUtilListTest {
public SSLUtilExcludeCipherSuitesTest(
String sysPropValue, String envVarValue, String expectedValue) {
super(sysPropValue, envVarValue, expectedValue);
}
@Override
protected String getSysPropName() {
return "flume.ssl.exclude.cipherSuites";
}
@Override
protected String getEnvVarName() {
return "FLUME_SSL_EXCLUDE_CIPHERSUITES";
}
@Test
public void testIncludeProtocols() {
SSLUtil.initGlobalSSLParameters();
String actualValue = SSLUtil.getGlobalExcludeCipherSuites();
Assert.assertEquals(expectedValue, actualValue);
}
}
| 9,637 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/util/SSLUtilKeystorePasswordTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.util;
import org.junit.Assert;
import org.junit.Test;
public class SSLUtilKeystorePasswordTest extends AbstractSSLUtilTest {
public SSLUtilKeystorePasswordTest(String sysPropValue, String envVarValue,
String expectedValue) {
super(sysPropValue, envVarValue, expectedValue);
}
@Override
protected String getSysPropName() {
return "javax.net.ssl.keyStorePassword";
}
@Override
protected String getEnvVarName() {
return "FLUME_SSL_KEYSTORE_PASSWORD";
}
@Test
public void testKeystorePassword() {
SSLUtil.initGlobalSSLParameters();
String keystorePassword = SSLUtil.getGlobalKeystorePassword();
Assert.assertEquals(expectedValue, keystorePassword);
}
}
| 9,638 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/util/AbstractSSLUtilListTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.util;
import java.util.Arrays;
import java.util.Collection;
import org.junit.runners.Parameterized.Parameters;
public abstract class AbstractSSLUtilListTest extends AbstractSSLUtilTest {
@Parameters
public static Collection<?> data() {
return Arrays.asList(new Object[][]{
// system property value, environment variable value, expected value
{ null, null, null },
{ "sysprop", null, "sysprop" },
{ "sysprop,sysprop", null, "sysprop sysprop" },
{ null, "envvar", "envvar" },
{ null, "envvar,envvar", "envvar envvar" },
{ "sysprop", "envvar", "sysprop" },
{ "sysprop,sysprop", "envvar,envvar", "sysprop sysprop" }
});
}
protected AbstractSSLUtilListTest(String sysPropValue, String envVarValue, String expectedValue) {
super(sysPropValue, envVarValue, expectedValue);
}
}
| 9,639 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/util/SSLUtilTruststorePasswordTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.util;
import org.junit.Assert;
import org.junit.Test;
public class SSLUtilTruststorePasswordTest extends AbstractSSLUtilTest {
public SSLUtilTruststorePasswordTest(String sysPropValue, String envVarValue,
String expectedValue) {
super(sysPropValue, envVarValue, expectedValue);
}
@Override
protected String getSysPropName() {
return "javax.net.ssl.trustStorePassword";
}
@Override
protected String getEnvVarName() {
return "FLUME_SSL_TRUSTSTORE_PASSWORD";
}
@Test
public void testTruststorePassword() {
SSLUtil.initGlobalSSLParameters();
String truststorePassword = SSLUtil.getGlobalTruststorePassword();
Assert.assertEquals(expectedValue, truststorePassword);
}
}
| 9,640 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/util/SSLUtilKeystorePathTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.util;
import org.junit.Assert;
import org.junit.Test;
public class SSLUtilKeystorePathTest extends AbstractSSLUtilTest {
public SSLUtilKeystorePathTest(String sysPropValue, String envVarValue, String expectedValue) {
super(sysPropValue, envVarValue, expectedValue);
}
@Override
protected String getSysPropName() {
return "javax.net.ssl.keyStore";
}
@Override
protected String getEnvVarName() {
return "FLUME_SSL_KEYSTORE_PATH";
}
@Test
public void testKeystorePath() {
SSLUtil.initGlobalSSLParameters();
String keystorePath = SSLUtil.getGlobalKeystorePath();
Assert.assertEquals(expectedValue, keystorePath);
}
}
| 9,641 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/util/SSLUtilIncludeCipherSuitesTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.util;
import org.junit.Assert;
import org.junit.Test;
public class SSLUtilIncludeCipherSuitesTest extends AbstractSSLUtilListTest {
public SSLUtilIncludeCipherSuitesTest(
String sysPropValue, String envVarValue, String expectedValue) {
super(sysPropValue, envVarValue, expectedValue);
}
@Override
protected String getSysPropName() {
return "flume.ssl.include.cipherSuites";
}
@Override
protected String getEnvVarName() {
return "FLUME_SSL_INCLUDE_CIPHERSUITES";
}
@Test
public void testIncludeProtocols() {
SSLUtil.initGlobalSSLParameters();
String actualValue = SSLUtil.getGlobalIncludeCipherSuites();
Assert.assertEquals(expectedValue, actualValue);
}
}
| 9,642 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/util/SSLUtilExcludeProtocolsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.util;
import org.junit.Assert;
import org.junit.Test;
public class SSLUtilExcludeProtocolsTest extends AbstractSSLUtilListTest {
public SSLUtilExcludeProtocolsTest(
String sysPropValue, String envVarValue, String expectedValue) {
super(sysPropValue, envVarValue, expectedValue);
}
@Override
protected String getSysPropName() {
return "flume.ssl.exclude.protocols";
}
@Override
protected String getEnvVarName() {
return "FLUME_SSL_EXCLUDE_PROTOCOLS";
}
@Test
public void testExcludeProtocols() {
SSLUtil.initGlobalSSLParameters();
String actualValue = SSLUtil.getGlobalExcludeProtocols();
Assert.assertEquals(expectedValue, actualValue);
}
}
| 9,643 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/util/SSLUtilTruststoreTypeTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.util;
import org.junit.Assert;
import org.junit.Test;
public class SSLUtilTruststoreTypeTest extends AbstractSSLUtilTest {
public SSLUtilTruststoreTypeTest(String sysPropValue, String envVarValue, String expectedValue) {
super(sysPropValue, envVarValue, expectedValue);
}
@Override
protected String getSysPropName() {
return "javax.net.ssl.trustStoreType";
}
@Override
protected String getEnvVarName() {
return "FLUME_SSL_TRUSTSTORE_TYPE";
}
@Test
public void testTruststoreType() {
SSLUtil.initGlobalSSLParameters();
String truststoreType = SSLUtil.getGlobalTruststoreType(null);
Assert.assertEquals(expectedValue, truststoreType);
}
}
| 9,644 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/util/AbstractSSLUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.util;
import org.junit.After;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import java.lang.reflect.Field;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;
@RunWith(Parameterized.class)
public abstract class AbstractSSLUtilTest {
@Parameters
public static Collection<?> data() {
return Arrays.asList(new Object[][]{
// system property value, environment variable value, expected value
{ null, null, null },
{ "sysprop", null, "sysprop" },
{ null, "envvar", "envvar" },
{ "sysprop", "envvar", "sysprop" }
});
}
protected String sysPropValue;
protected String envVarValue;
protected String expectedValue;
protected AbstractSSLUtilTest(String sysPropValue, String envVarValue, String expectedValue) {
this.sysPropValue = sysPropValue;
this.envVarValue = envVarValue;
this.expectedValue = expectedValue;
}
protected abstract String getSysPropName();
protected abstract String getEnvVarName();
@Before
public void setUp() {
setSysProp(getSysPropName(), sysPropValue);
setEnvVar(getEnvVarName(), envVarValue);
}
@After
public void tearDown() {
setSysProp(getSysPropName(), null);
setEnvVar(getEnvVarName(), null);
}
private static void setSysProp(String name, String value) {
if (value != null) {
System.setProperty(name, value);
} else {
System.clearProperty(name);
}
}
private static void setEnvVar(String name, String value) {
try {
injectEnvironmentVariable(name, value);
} catch (ReflectiveOperationException e) {
throw new AssertionError("Test setup failed.", e);
}
}
// based on https://dzone.com/articles/how-to-change-environment-variables-in-java
private static void injectEnvironmentVariable(String key, String value)
throws ReflectiveOperationException {
Class<?> processEnvironment = Class.forName("java.lang.ProcessEnvironment");
Field unmodifiableMapField = getAccessibleField(processEnvironment,
"theUnmodifiableEnvironment");
Object unmodifiableMap = unmodifiableMapField.get(null);
injectIntoUnmodifiableMap(key, value, unmodifiableMap);
Field mapField = getAccessibleField(processEnvironment, "theEnvironment");
Map<String, String> map = (Map<String, String>) mapField.get(null);
if (value != null) {
map.put(key, value);
} else {
map.remove(key);
}
}
private static Field getAccessibleField(Class<?> clazz, String fieldName)
throws NoSuchFieldException {
Field field = clazz.getDeclaredField(fieldName);
field.setAccessible(true);
return field;
}
private static void injectIntoUnmodifiableMap(String key, String value, Object map)
throws ReflectiveOperationException {
Class unmodifiableMap = Class.forName("java.util.Collections$UnmodifiableMap");
Field field = getAccessibleField(unmodifiableMap, "m");
Object obj = field.get(map);
if (value != null) {
((Map<String, String>) obj).put(key, value);
} else {
((Map<String, String>) obj).remove(key);
}
}
}
| 9,645 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/util/SSLUtilKeystoreTypeTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.util;
import org.junit.Assert;
import org.junit.Test;
public class SSLUtilKeystoreTypeTest extends AbstractSSLUtilTest {
public SSLUtilKeystoreTypeTest(String sysPropValue, String envVarValue, String expectedValue) {
super(sysPropValue, envVarValue, expectedValue);
}
@Override
protected String getSysPropName() {
return "javax.net.ssl.keyStoreType";
}
@Override
protected String getEnvVarName() {
return "FLUME_SSL_KEYSTORE_TYPE";
}
@Test
public void testKeystoreType() {
SSLUtil.initGlobalSSLParameters();
String keystoreType = SSLUtil.getGlobalKeystoreType(null);
Assert.assertEquals(expectedValue, keystoreType);
}
}
| 9,646 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/util/SSLUtilKeystoreTypeWithDefaultTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.util;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runners.Parameterized.Parameters;
import java.util.Arrays;
import java.util.Collection;
public class SSLUtilKeystoreTypeWithDefaultTest extends AbstractSSLUtilTest {
@Parameters
public static Collection<?> data() {
return Arrays.asList(new Object[][]{
// system property value, environment variable value, expected value
{ null, null, "default" },
{ "sysprop", null, "sysprop" },
{ null, "envvar", "envvar" },
{ "sysprop", "envvar", "sysprop" }
});
}
public SSLUtilKeystoreTypeWithDefaultTest(String sysPropValue, String envVarValue,
String expectedValue) {
super(sysPropValue, envVarValue, expectedValue);
}
@Override
protected String getSysPropName() {
return "javax.net.ssl.keyStoreType";
}
@Override
protected String getEnvVarName() {
return "FLUME_SSL_KEYSTORE_TYPE";
}
@Test
public void testKeystoreType() {
SSLUtil.initGlobalSSLParameters();
String keystoreType = SSLUtil.getGlobalKeystoreType("default");
Assert.assertEquals(expectedValue, keystoreType);
}
}
| 9,647 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/util/SSLUtilTruststorePathTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.util;
import org.junit.Assert;
import org.junit.Test;
public class SSLUtilTruststorePathTest extends AbstractSSLUtilTest {
public SSLUtilTruststorePathTest(String sysPropValue, String envVarValue, String expectedValue) {
super(sysPropValue, envVarValue, expectedValue);
}
@Override
protected String getSysPropName() {
return "javax.net.ssl.trustStore";
}
@Override
protected String getEnvVarName() {
return "FLUME_SSL_TRUSTSTORE_PATH";
}
@Test
public void testTruststorePath() {
SSLUtil.initGlobalSSLParameters();
String truststorePath = SSLUtil.getGlobalTruststorePath();
Assert.assertEquals(expectedValue, truststorePath);
}
}
| 9,648 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/util/SSLUtilIncludeProtocolsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.util;
import org.junit.Assert;
import org.junit.Test;
public class SSLUtilIncludeProtocolsTest extends AbstractSSLUtilListTest {
public SSLUtilIncludeProtocolsTest(
String sysPropValue, String envVarValue, String expectedValue) {
super(sysPropValue, envVarValue, expectedValue);
}
@Override
protected String getSysPropName() {
return "flume.ssl.include.protocols";
}
@Override
protected String getEnvVarName() {
return "FLUME_SSL_INCLUDE_PROTOCOLS";
}
@Test
public void testIncludeProtocols() {
SSLUtil.initGlobalSSLParameters();
String actualValue = SSLUtil.getGlobalIncludeProtocols();
Assert.assertEquals(expectedValue, actualValue);
}
}
| 9,649 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/api/RpcTestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import java.net.InetSocketAddress;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import org.apache.avro.ipc.Responder;
import org.apache.avro.ipc.Server;
import org.apache.avro.ipc.netty.NettyServer;
import org.apache.avro.ipc.specific.SpecificResponder;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.source.avro.AvroFlumeEvent;
import org.apache.flume.source.avro.AvroSourceProtocol;
import org.apache.flume.source.avro.Status;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.netty.channel.ChannelPipeline;
import io.netty.handler.codec.compression.JZlibDecoder;
import io.netty.handler.codec.compression.JZlibEncoder;
import io.netty.handler.codec.compression.ZlibEncoder;
import junit.framework.Assert;
/**
* Helpers for Netty Avro RPC testing
*/
public class RpcTestUtils {
private static final Logger logger = LoggerFactory.getLogger(RpcTestUtils.class);
private static final String localhost = "localhost";
/**
* Helper method for testing simple (single) appends on handlers
* @param handler
* @throws FlumeException
* @throws EventDeliveryException
*/
public static void handlerSimpleAppendTest(AvroSourceProtocol handler)
throws FlumeException, EventDeliveryException {
handlerSimpleAppendTest(handler, false, false, 0);
}
/**
* Helper method for testing simple (single) with compression level 6 appends on handlers
* @param handler
* @throws FlumeException
* @throws EventDeliveryException
*/
public static void handlerSimpleAppendTest(AvroSourceProtocol handler,
boolean enableServerCompression,
boolean enableClientCompression, int compressionLevel)
throws FlumeException, EventDeliveryException {
NettyAvroRpcClient client = null;
Server server = startServer(handler, 0, enableServerCompression);
try {
Properties starterProp = new Properties();
if (enableClientCompression) {
starterProp.setProperty(RpcClientConfigurationConstants.CONFIG_COMPRESSION_TYPE, "deflate");
starterProp.setProperty(RpcClientConfigurationConstants.CONFIG_COMPRESSION_LEVEL,
"" + compressionLevel);
} else {
starterProp.setProperty(RpcClientConfigurationConstants.CONFIG_COMPRESSION_TYPE, "none");
}
client = getStockLocalClient(server.getPort(), starterProp);
boolean isActive = client.isActive();
Assert.assertTrue("Client should be active", isActive);
client.append(EventBuilder.withBody("wheee!!!", Charset.forName("UTF8")));
} finally {
stopServer(server);
if (client != null) client.close();
}
}
public static void handlerBatchAppendTest(AvroSourceProtocol handler)
throws FlumeException, EventDeliveryException {
handlerBatchAppendTest(handler, false, false, 0);
}
/**
* Helper method for testing batch appends on handlers
* @param handler
* @throws FlumeException
* @throws EventDeliveryException
*/
public static void handlerBatchAppendTest(AvroSourceProtocol handler,
boolean enableServerCompression,
boolean enableClientCompression, int compressionLevel)
throws FlumeException, EventDeliveryException {
NettyAvroRpcClient client = null;
Server server = startServer(handler, 0 , enableServerCompression);
try {
Properties starterProp = new Properties();
if (enableClientCompression) {
starterProp.setProperty(RpcClientConfigurationConstants.CONFIG_COMPRESSION_TYPE, "deflate");
starterProp.setProperty(RpcClientConfigurationConstants.CONFIG_COMPRESSION_LEVEL,
"" + compressionLevel);
} else {
starterProp.setProperty(RpcClientConfigurationConstants.CONFIG_COMPRESSION_TYPE, "none");
}
client = getStockLocalClient(server.getPort(), starterProp);
boolean isActive = client.isActive();
Assert.assertTrue("Client should be active", isActive);
int batchSize = client.getBatchSize();
List<Event> events = new ArrayList<Event>();
for (int i = 0; i < batchSize; i++) {
events.add(EventBuilder.withBody("evt: " + i, Charset.forName("UTF8")));
}
client.appendBatch(events);
} finally {
stopServer(server);
if (client != null) client.close();
}
}
/**
* Helper method for constructing a Netty RPC client that talks to localhost.
*/
public static NettyAvroRpcClient getStockLocalClient(int port) {
Properties props = new Properties();
return getStockLocalClient(port, props);
}
public static NettyAvroRpcClient getStockLocalClient(int port, Properties starterProp) {
starterProp.setProperty(RpcClientConfigurationConstants.CONFIG_HOSTS, "h1");
starterProp.setProperty(RpcClientConfigurationConstants.CONFIG_HOSTS_PREFIX + "h1",
"127.0.0.1" + ":" + port);
NettyAvroRpcClient client = new NettyAvroRpcClient();
client.configure(starterProp);
return client;
}
/**
* Start a NettyServer, wait a moment for it to spin up, and return it.
*/
public static Server startServer(AvroSourceProtocol handler, int port, boolean enableCompression) {
Responder responder = new SpecificResponder(AvroSourceProtocol.class, handler);
Server server = null;
try {
if (enableCompression) {
server = new NettyServer(responder, new InetSocketAddress(localhost, port),
(ch) -> {
ChannelPipeline pipeline = ch.pipeline();
ZlibEncoder encoder = new JZlibEncoder(6);
pipeline.addFirst("deflater", encoder);
pipeline.addFirst("inflater", new JZlibDecoder());
});
} else {
server = new NettyServer(responder, new InetSocketAddress(localhost, port));
}
server.start();
logger.info("Server started on hostname: {}, port: {}",
new Object[]{localhost, Integer.toString(server.getPort())});
Thread.sleep(300L);
} catch (InterruptedException ex) {
logger.error("Thread interrupted. Exception follows.", ex);
Thread.currentThread().interrupt();
}
return server;
}
public static Server startServer(AvroSourceProtocol handler) {
return startServer(handler, 0, false);
}
public static Server startServer(AvroSourceProtocol handler, int port) {
return startServer(handler, port, false);
}
/**
* Request that the specified Server stop, and attempt to wait for it to exit.
* @param server A running NettyServer
*/
public static void stopServer(Server server) {
try {
int port = server.getPort();
server.close();
server.join();
logger.info("Server stopped on port: {}", port);
} catch (InterruptedException ex) {
logger.error("Thread interrupted. Exception follows.", ex);
Thread.currentThread().interrupt();
}
}
public static class LoadBalancedAvroHandler implements AvroSourceProtocol {
private int appendCount = 0;
private int appendBatchCount = 0;
private boolean failed = false;
public int getAppendCount() {
return appendCount;
}
public int getAppendBatchCount() {
return appendBatchCount;
}
public boolean isFailed() {
return failed;
}
public void setFailed() {
this.failed = true;
}
public void setOK() {
this.failed = false;
}
@Override
public Status append(AvroFlumeEvent event) {
if (failed) {
logger.debug("Event rejected");
return Status.FAILED;
}
logger.debug("LB: Received event from append(): {}",
new String(event.getBody().array(), Charset.forName("UTF8")));
appendCount++;
return Status.OK;
}
@Override
public Status appendBatch(List<AvroFlumeEvent> events) {
if (failed) {
logger.debug("Event batch rejected");
return Status.FAILED;
}
logger.debug("LB: Received {} events from appendBatch()",
events.size());
appendBatchCount++;
return Status.OK;
}
}
/**
* A service that logs receipt of the request and returns OK
*/
public static class OKAvroHandler implements AvroSourceProtocol {
@Override
public Status append(AvroFlumeEvent event) {
logger.info("OK: Received event from append(): {}",
new String(event.getBody().array(), Charset.forName("UTF8")));
return Status.OK;
}
@Override
public Status appendBatch(List<AvroFlumeEvent> events) {
logger.info("OK: Received {} events from appendBatch()", events.size());
return Status.OK;
}
}
/**
* A service that logs receipt of the request and returns Failed
*/
public static class FailedAvroHandler implements AvroSourceProtocol {
@Override
public Status append(AvroFlumeEvent event) {
logger.info("Failed: Received event from append(): {}",
new String(event.getBody().array(), Charset.forName("UTF8")));
return Status.FAILED;
}
@Override
public Status appendBatch(List<AvroFlumeEvent> events) {
logger.info("Failed: Received {} events from appendBatch()", events.size());
return Status.FAILED;
}
}
/**
* A service that logs receipt of the request and returns Unknown
*/
public static class UnknownAvroHandler implements AvroSourceProtocol {
@Override
public Status append(AvroFlumeEvent event) {
logger.info("Unknown: Received event from append(): {}",
new String(event.getBody().array(), Charset.forName("UTF8")));
return Status.UNKNOWN;
}
@Override
public Status appendBatch(List<AvroFlumeEvent> events) {
logger.info("Unknown: Received {} events from appendBatch()", events.size());
return Status.UNKNOWN;
}
}
/**
* A service that logs receipt of the request and then throws an exception
*/
public static class ThrowingAvroHandler implements AvroSourceProtocol.Callback {
@Override
public void append(AvroFlumeEvent event, org.apache.avro.ipc.Callback<Status> callback)
throws java.io.IOException {
logger.info("Throwing: Received event from append(): {}",
new String(event.getBody().array(), Charset.forName("UTF8")));
throw new java.io.IOException("Handler smash!");
}
@Override
public Status append(AvroFlumeEvent event) {
logger.info("Throwing unavailable: Received event from append(): {}",
new String(event.getBody().array(), Charset.forName("UTF8")));
return null;
}
@Override
public void appendBatch(List<AvroFlumeEvent> events, org.apache.avro.ipc.Callback<Status> callback)
throws java.io.IOException {
logger.info("Throwing: Received {} events from appendBatch()", events.size());
throw new java.io.IOException("Handler smash!");
}
@Override
public Status appendBatch(List<AvroFlumeEvent> events) {
logger.info("Throwing unavailable: Received {} events from appendBatch()", events.size());
return null;
}
}
}
| 9,650 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/api/TestNettyAvroRpcClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import org.apache.avro.ipc.Server;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.apache.flume.api.RpcTestUtils.FailedAvroHandler;
import org.apache.flume.api.RpcTestUtils.OKAvroHandler;
import org.apache.flume.api.RpcTestUtils.ThrowingAvroHandler;
import org.apache.flume.api.RpcTestUtils.UnknownAvroHandler;
import org.apache.flume.event.EventBuilder;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
*
*/
public class TestNettyAvroRpcClient {
private static final Logger logger = LoggerFactory
.getLogger(TestNettyAvroRpcClient.class);
private static final String localhost = "127.0.0.1";
/**
* Simple request
* @throws FlumeException
* @throws EventDeliveryException
*/
@Test
public void testOKServerSimple() throws FlumeException,
EventDeliveryException {
RpcTestUtils.handlerSimpleAppendTest(new OKAvroHandler());
}
/**
* Simple request with compression on the server and client with compression level 6
* @throws FlumeException
* @throws EventDeliveryException
*/
@Test
public void testOKServerSimpleCompressionLevel6() throws FlumeException,
EventDeliveryException {
RpcTestUtils.handlerSimpleAppendTest(new OKAvroHandler(), true, true, 6);
}
/**
* Simple request with compression on the server and client with compression level 0
*
* Compression level 0 = no compression
* @throws FlumeException
* @throws EventDeliveryException
*/
@Test
public void testOKServerSimpleCompressionLevel0() throws FlumeException,
EventDeliveryException {
RpcTestUtils.handlerSimpleAppendTest(new OKAvroHandler(), true, true, 0);
}
/**
* Simple request with compression on the client only
* @throws FlumeException
* @throws EventDeliveryException
*/
@Test(expected = org.apache.flume.EventDeliveryException.class)
public void testOKServerSimpleCompressionClientOnly() throws FlumeException,
EventDeliveryException {
RpcTestUtils.handlerSimpleAppendTest(new OKAvroHandler(), false, true, 6);
}
/**
* Simple request with compression on the server only
* @throws FlumeException
* @throws EventDeliveryException
*/
@Test(expected = org.apache.flume.EventDeliveryException.class)
public void testOKServerSimpleCompressionServerOnly() throws FlumeException,
EventDeliveryException {
RpcTestUtils.handlerSimpleAppendTest(new OKAvroHandler(), true, false, 6);
}
/**
* Simple batch request
* @throws FlumeException
* @throws EventDeliveryException
*/
@Test
public void testOKServerBatch() throws FlumeException,
EventDeliveryException {
RpcTestUtils.handlerBatchAppendTest(new OKAvroHandler());
}
/**
* Simple batch request with compression deflate level 0
* @throws FlumeException
* @throws EventDeliveryException
*/
@Test
public void testOKServerBatchCompressionLevel0() throws FlumeException,
EventDeliveryException {
RpcTestUtils.handlerBatchAppendTest(new OKAvroHandler(), true, true, 0);
}
/**
* Simple batch request with compression deflate level 6
* @throws FlumeException
* @throws EventDeliveryException
*/
@Test
public void testOKServerBatchCompressionLevel6() throws FlumeException,
EventDeliveryException {
RpcTestUtils.handlerBatchAppendTest(new OKAvroHandler(), true, true, 6);
}
/**
* Simple batch request where the server only is using compression
* @throws FlumeException
* @throws EventDeliveryException
*/
@Test(expected = org.apache.flume.EventDeliveryException.class)
public void testOKServerBatchCompressionServerOnly() throws FlumeException,
EventDeliveryException {
RpcTestUtils.handlerBatchAppendTest(new OKAvroHandler(), true, false, 6);
}
/**
* Simple batch request where the client only is using compression
* @throws FlumeException
* @throws EventDeliveryException
*/
@Test(expected = org.apache.flume.EventDeliveryException.class)
public void testOKServerBatchCompressionClientOnly() throws FlumeException,
EventDeliveryException {
RpcTestUtils.handlerBatchAppendTest(new OKAvroHandler(), false, true, 6);
}
/**
* Try to connect to a closed port.
* Note: this test tries to connect to port 1 on localhost.
* @throws FlumeException
*/
@Test(expected = FlumeException.class)
public void testUnableToConnect() throws FlumeException {
@SuppressWarnings("unused")
NettyAvroRpcClient client = new NettyAvroRpcClient();
Properties props = new Properties();
props.setProperty(RpcClientConfigurationConstants.CONFIG_HOSTS, "localhost");
props.setProperty(RpcClientConfigurationConstants.CONFIG_HOSTS_PREFIX + "localhost",
localhost + ":" + 1);
client.configure(props);
}
/**
* Send too many events at once. Should handle this case gracefully.
* @throws FlumeException
* @throws EventDeliveryException
*/
@Test
public void testBatchOverrun() throws FlumeException, EventDeliveryException {
int batchSize = 10;
int moreThanBatchSize = batchSize + 1;
NettyAvroRpcClient client = null;
Server server = RpcTestUtils.startServer(new OKAvroHandler());
Properties props = new Properties();
props.setProperty(RpcClientConfigurationConstants.CONFIG_HOSTS, "localhost");
props.setProperty(RpcClientConfigurationConstants.CONFIG_HOSTS_PREFIX + "localhost",
localhost + ":" + server.getPort());
props.setProperty(RpcClientConfigurationConstants.CONFIG_BATCH_SIZE, "" + batchSize);
try {
client = new NettyAvroRpcClient();
client.configure(props);
// send one more than the batch size
List<Event> events = new ArrayList<Event>();
for (int i = 0; i < moreThanBatchSize; i++) {
events.add(EventBuilder.withBody("evt: " + i, Charset.forName("UTF8")));
}
client.appendBatch(events);
} finally {
RpcTestUtils.stopServer(server);
if (client != null) client.close();
}
}
/**
* First connect the client, then shut down the server, then send a request.
* @throws FlumeException
* @throws EventDeliveryException
* @throws InterruptedException
*/
@Test(expected = EventDeliveryException.class)
public void testServerDisconnect() throws FlumeException,
EventDeliveryException, InterruptedException {
NettyAvroRpcClient client = null;
Server server = RpcTestUtils.startServer(new OKAvroHandler());
try {
client = RpcTestUtils.getStockLocalClient(server.getPort());
server.close();
Thread.sleep(1000L); // wait a second for the close to occur
try {
server.join();
} catch (InterruptedException ex) {
logger.warn("Thread interrupted during join()", ex);
Thread.currentThread().interrupt();
}
try {
client.append(EventBuilder.withBody("hello", Charset.forName("UTF8")));
} finally {
Assert.assertFalse("Client should not be active", client.isActive());
}
} finally {
RpcTestUtils.stopServer(server);
if (client != null) client.close();
}
}
/**
* First connect the client, then close the client, then send a request.
* @throws FlumeException
* @throws EventDeliveryException
*/
@Test(expected = EventDeliveryException.class)
public void testClientClosedRequest() throws FlumeException,
EventDeliveryException {
NettyAvroRpcClient client = null;
Server server = RpcTestUtils.startServer(new OKAvroHandler());
try {
client = RpcTestUtils.getStockLocalClient(server.getPort());
client.close();
Assert.assertFalse("Client should not be active", client.isActive());
System.out.println("Yaya! I am not active after client close!");
client.append(EventBuilder.withBody("hello", Charset.forName("UTF8")));
} finally {
RpcTestUtils.stopServer(server);
if (client != null) client.close();
}
}
/**
* Send an event to an online server that returns FAILED.
*/
@Test(expected = EventDeliveryException.class)
public void testFailedServerSimple() throws FlumeException,
EventDeliveryException {
RpcTestUtils.handlerSimpleAppendTest(new FailedAvroHandler());
logger.error("Failed: I should never have gotten here!");
}
/**
* Send an event to an online server that returns UNKNOWN.
*/
@Test(expected = EventDeliveryException.class)
public void testUnknownServerSimple() throws FlumeException,
EventDeliveryException {
RpcTestUtils.handlerSimpleAppendTest(new UnknownAvroHandler());
logger.error("Unknown: I should never have gotten here!");
}
/**
* Send an event to an online server that throws an exception.
*/
@Test(expected = EventDeliveryException.class)
public void testThrowingServerSimple() throws FlumeException,
EventDeliveryException {
RpcTestUtils.handlerSimpleAppendTest(new ThrowingAvroHandler());
logger.error("Throwing: I should never have gotten here!");
}
/**
* Send a batch of events to a server that returns FAILED.
*/
@Test(expected = EventDeliveryException.class)
public void testFailedServerBatch() throws FlumeException,
EventDeliveryException {
RpcTestUtils.handlerBatchAppendTest(new FailedAvroHandler());
logger.error("Failed: I should never have gotten here!");
}
/**
* Send a batch of events to a server that returns UNKNOWN.
*/
@Test(expected = EventDeliveryException.class)
public void testUnknownServerBatch() throws FlumeException,
EventDeliveryException {
RpcTestUtils.handlerBatchAppendTest(new UnknownAvroHandler());
logger.error("Unknown: I should never have gotten here!");
}
/**
* Send a batch of events to a server that always throws exceptions.
*/
@Test(expected = EventDeliveryException.class)
public void testThrowingServerBatch() throws FlumeException,
EventDeliveryException {
RpcTestUtils.handlerBatchAppendTest(new ThrowingAvroHandler());
logger.error("Throwing: I should never have gotten here!");
}
/**
* configure the NettyAvroRpcClient with a non-default
* NioClientSocketChannelFactory number of io worker threads
*
* @throws FlumeException
* @throws EventDeliveryException
*/
@Test
public void testAppendWithMaxIOWorkers() throws FlumeException, EventDeliveryException {
NettyAvroRpcClient client = null;
Server server = RpcTestUtils.startServer(new OKAvroHandler());
Properties props = new Properties();
props.setProperty(RpcClientConfigurationConstants.CONFIG_HOSTS, "localhost");
props.setProperty(RpcClientConfigurationConstants.CONFIG_HOSTS_PREFIX + "localhost", localhost
+ ":" + server.getPort());
props.setProperty(RpcClientConfigurationConstants.MAX_IO_WORKERS, Integer.toString(2));
try {
client = new NettyAvroRpcClient();
client.configure(props);
for (int i = 0; i < 5; i++) {
client.append(EventBuilder.withBody("evt:" + i, Charset.forName("UTF8")));
}
} finally {
RpcTestUtils.stopServer(server);
if (client != null) {
client.close();
}
}
}
/**
* Simple request with compression on the server and client with compression
* level 0
*
* configure the NettyAvroRpcClient with a non-default
* NioClientSocketChannelFactory number of io worker threads
*
* Compression level 0 = no compression
*
* @throws FlumeException
* @throws EventDeliveryException
*/
@Test
public void testAppendWithMaxIOWorkersSimpleCompressionLevel0() throws FlumeException,
EventDeliveryException {
NettyAvroRpcClient client = null;
Server server = RpcTestUtils.startServer(new OKAvroHandler(), 0, true);
Properties props = new Properties();
props.setProperty(RpcClientConfigurationConstants.CONFIG_HOSTS, "localhost");
props.setProperty(RpcClientConfigurationConstants.CONFIG_HOSTS_PREFIX + "localhost", localhost
+ ":" + server.getPort());
props.setProperty(RpcClientConfigurationConstants.MAX_IO_WORKERS, Integer.toString(2));
props.setProperty(RpcClientConfigurationConstants.CONFIG_COMPRESSION_TYPE, "deflate");
props.setProperty(RpcClientConfigurationConstants.CONFIG_COMPRESSION_LEVEL, "" + 0);
try {
client = new NettyAvroRpcClient();
client.configure(props);
for (int i = 0; i < 5; i++) {
client.append(EventBuilder.withBody("evt:" + i, Charset.forName("UTF8")));
}
} finally {
RpcTestUtils.stopServer(server);
if (client != null) {
client.close();
}
}
}
}
| 9,651 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/api/TestRpcClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import org.apache.avro.ipc.Server;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.apache.flume.api.RpcTestUtils.OKAvroHandler;
import org.junit.Test;
import org.apache.flume.event.EventBuilder;
import org.junit.Assert;
/**
* Very light testing on the factory. The heavy testing is done on the test
* dedicated to the implementation.
*/
public class TestRpcClientFactory {
private static final String localhost = "localhost";
@Test
public void testTwoParamSimpleAppend() throws FlumeException,
EventDeliveryException {
RpcClient client = null;
Server server = RpcTestUtils.startServer(new OKAvroHandler());
try {
client = RpcClientFactory.getDefaultInstance(localhost, server.getPort());
client.append(EventBuilder.withBody("wheee!!!", Charset.forName("UTF8")));
} finally {
RpcTestUtils.stopServer(server);
if (client != null) client.close();
}
}
// testing deprecated API
@Test
public void testTwoParamDeprecatedAppend() throws FlumeException,
EventDeliveryException {
RpcClient client = null;
Server server = RpcTestUtils.startServer(new OKAvroHandler());
try {
client = RpcClientFactory.getInstance(localhost, server.getPort());
client.append(EventBuilder.withBody("wheee!!!", Charset.forName("UTF8")));
} finally {
RpcTestUtils.stopServer(server);
if (client != null) client.close();
}
}
// testing deprecated API
@Test
public void testThreeParamDeprecatedAppend() throws FlumeException,
EventDeliveryException {
RpcClient client = null;
Server server = RpcTestUtils.startServer(new OKAvroHandler());
try {
client = RpcClientFactory.getInstance(localhost, server.getPort(), 3);
Assert.assertEquals("Batch size was specified", 3, client.getBatchSize());
client.append(EventBuilder.withBody("wheee!!!", Charset.forName("UTF8")));
} finally {
RpcTestUtils.stopServer(server);
if (client != null) client.close();
}
}
@Test
public void testThreeParamBatchAppend() throws FlumeException,
EventDeliveryException {
int batchSize = 7;
RpcClient client = null;
Server server = RpcTestUtils.startServer(new OKAvroHandler());
try {
client = RpcClientFactory.getDefaultInstance(localhost, server.getPort(),
batchSize);
List<Event> events = new ArrayList<Event>();
for (int i = 0; i < batchSize; i++) {
events.add(EventBuilder.withBody("evt: " + i, Charset.forName("UTF8")));
}
client.appendBatch(events);
} finally {
RpcTestUtils.stopServer(server);
if (client != null) client.close();
}
}
@Test
public void testPropertiesBatchAppend() throws FlumeException,
EventDeliveryException {
int batchSize = 7;
RpcClient client = null;
Server server = RpcTestUtils.startServer(new OKAvroHandler());
try {
Properties p = new Properties();
p.put("hosts", "host1");
p.put("hosts.host1", localhost + ":" + String.valueOf(server.getPort()));
p.put("batch-size", String.valueOf(batchSize));
client = RpcClientFactory.getInstance(p);
List<Event> events = new ArrayList<Event>();
for (int i = 0; i < batchSize; i++) {
events.add(EventBuilder.withBody("evt: " + i, Charset.forName("UTF8")));
}
client.appendBatch(events);
} finally {
RpcTestUtils.stopServer(server);
if (client != null) client.close();
}
}
// we are supposed to handle this gracefully
@Test
public void testTwoParamBatchAppendOverflow() throws FlumeException,
EventDeliveryException {
RpcClient client = null;
Server server = RpcTestUtils.startServer(new OKAvroHandler());
try {
client = RpcClientFactory.getDefaultInstance(localhost, server.getPort());
int batchSize = client.getBatchSize();
int moreThanBatch = batchSize + 1;
List<Event> events = new ArrayList<Event>();
for (int i = 0; i < moreThanBatch; i++) {
events.add(EventBuilder.withBody("evt: " + i, Charset.forName("UTF8")));
}
client.appendBatch(events);
} finally {
RpcTestUtils.stopServer(server);
if (client != null) client.close();
}
}
}
| 9,652 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/api/ThriftTestingSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import org.apache.flume.Event;
import org.apache.flume.FlumeException;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.thrift.Status;
import org.apache.flume.thrift.ThriftFlumeEvent;
import org.apache.flume.thrift.ThriftSourceProtocol;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.protocol.TCompactProtocol;
import org.apache.thrift.protocol.TProtocolFactory;
import org.apache.thrift.server.THsHaServer;
import org.apache.thrift.server.TServer;
import org.apache.thrift.transport.TSSLTransportFactory;
import org.apache.thrift.transport.TNonblockingServerSocket;
import org.apache.thrift.transport.TServerSocket;
import org.apache.thrift.transport.TNonblockingServerTransport;
import org.apache.thrift.transport.TServerTransport;
import org.apache.thrift.transport.layered.TFastFramedTransport;
import java.lang.reflect.Method;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
public class ThriftTestingSource {
public final Queue<Event> flumeEvents = new ConcurrentLinkedQueue<Event>();
private final TServer server;
public int batchCount = 0;
public int individualCount = 0;
public int incompleteBatches = 0;
private AtomicLong delay = null;
public void setDelay(AtomicLong delay) {
this.delay = delay;
}
private class ThriftOKHandler implements ThriftSourceProtocol.Iface {
public ThriftOKHandler() {
}
@Override
public Status append(ThriftFlumeEvent event) throws TException {
flumeEvents.add(EventBuilder.withBody(event.getBody(), event.getHeaders()));
individualCount++;
return Status.OK;
}
@Override
public Status appendBatch(List<ThriftFlumeEvent> events) throws TException {
batchCount++;
if (events.size() < 10) {
incompleteBatches++;
}
for (ThriftFlumeEvent event : events) {
flumeEvents.add(EventBuilder.withBody(event.getBody(), event.getHeaders()));
}
return Status.OK;
}
}
private class ThriftFailHandler implements ThriftSourceProtocol.Iface {
@Override
public Status append(ThriftFlumeEvent event) throws TException {
return Status.FAILED;
}
@Override
public Status appendBatch(List<ThriftFlumeEvent> events) throws
TException {
return Status.FAILED;
}
}
private class ThriftErrorHandler implements ThriftSourceProtocol.Iface {
@Override
public Status append(ThriftFlumeEvent event) throws TException {
throw new FlumeException("Forced Error");
}
@Override
public Status appendBatch(List<ThriftFlumeEvent> events) throws TException {
throw new FlumeException("Forced Error");
}
}
private class ThriftSlowHandler extends ThriftOKHandler {
@Override
public Status append(ThriftFlumeEvent event) throws TException {
try {
TimeUnit.MILLISECONDS.sleep(1550);
} catch (InterruptedException e) {
throw new FlumeException("Error", e);
}
return super.append(event);
}
@Override
public Status appendBatch(List<ThriftFlumeEvent> events) throws TException {
try {
TimeUnit.MILLISECONDS.sleep(1550);
} catch (InterruptedException e) {
throw new FlumeException("Error", e);
}
return super.appendBatch(events);
}
}
private class ThriftTimeoutHandler extends ThriftOKHandler {
@Override
public Status append(ThriftFlumeEvent event) throws TException {
try {
TimeUnit.MILLISECONDS.sleep(5000);
} catch (InterruptedException e) {
throw new FlumeException("Error", e);
}
return super.append(event);
}
@Override
public Status appendBatch(List<ThriftFlumeEvent> events) throws TException {
try {
TimeUnit.MILLISECONDS.sleep(5000);
} catch (InterruptedException e) {
throw new FlumeException("Error", e);
}
return super.appendBatch(events);
}
}
private class ThriftAlternateHandler extends ThriftOKHandler {
@Override
public Status append(ThriftFlumeEvent event) throws TException {
try {
if (delay != null) {
TimeUnit.MILLISECONDS.sleep(delay.get());
}
} catch (InterruptedException e) {
throw new FlumeException("Error", e);
}
return super.append(event);
}
@Override
public Status appendBatch(List<ThriftFlumeEvent> events) throws TException {
try {
if (delay != null) {
TimeUnit.MILLISECONDS.sleep(delay.get());
}
} catch (InterruptedException e) {
throw new FlumeException("Error", e);
}
return super.appendBatch(events);
}
}
private ThriftSourceProtocol.Iface getHandler(String handlerName) {
ThriftSourceProtocol.Iface handler = null;
if (handlerName.equals(HandlerType.OK.name())) {
handler = new ThriftOKHandler();
} else if (handlerName.equals(HandlerType.FAIL.name())) {
handler = new ThriftFailHandler();
} else if (handlerName.equals(HandlerType.ERROR.name())) {
handler = new ThriftErrorHandler();
} else if (handlerName.equals(HandlerType.SLOW.name())) {
handler = new ThriftSlowHandler();
} else if (handlerName.equals(HandlerType.TIMEOUT.name())) {
handler = new ThriftTimeoutHandler();
} else if (handlerName.equals(HandlerType.ALTERNATE.name())) {
handler = new ThriftAlternateHandler();
}
return handler;
}
public ThriftTestingSource(String handlerName, int port, String protocol) throws Exception {
TNonblockingServerTransport serverTransport =
new TNonblockingServerSocket(new InetSocketAddress("0.0.0.0", port));
ThriftSourceProtocol.Iface handler = getHandler(handlerName);
TProtocolFactory transportProtocolFactory = null;
if (protocol != null && protocol == ThriftRpcClient.BINARY_PROTOCOL) {
transportProtocolFactory = new TBinaryProtocol.Factory();
} else {
transportProtocolFactory = new TCompactProtocol.Factory();
}
server = new THsHaServer(new THsHaServer.Args(serverTransport).processor(
new ThriftSourceProtocol.Processor(handler)).protocolFactory(
transportProtocolFactory));
Executors.newSingleThreadExecutor().submit(new Runnable() {
@Override
public void run() {
server.serve();
}
});
}
public ThriftTestingSource(String handlerName, int port,
String protocol, String keystore,
String keystorePassword, String keyManagerType,
String keystoreType) throws Exception {
TSSLTransportFactory.TSSLTransportParameters params =
new TSSLTransportFactory.TSSLTransportParameters();
params.setKeyStore(keystore, keystorePassword, keyManagerType, keystoreType);
TServerSocket serverTransport = TSSLTransportFactory.getServerSocket(
port, 10000, InetAddress.getByName("0.0.0.0"), params);
ThriftSourceProtocol.Iface handler = getHandler(handlerName);
Class serverClass = Class.forName("org.apache.thrift" +
".server.TThreadPoolServer");
Class argsClass = Class.forName("org.apache.thrift.server" +
".TThreadPoolServer$Args");
TServer.AbstractServerArgs args = (TServer.AbstractServerArgs) argsClass
.getConstructor(TServerTransport.class)
.newInstance(serverTransport);
Method m = argsClass.getDeclaredMethod("maxWorkerThreads", int.class);
m.invoke(args, Integer.MAX_VALUE);
TProtocolFactory transportProtocolFactory = null;
if (protocol != null && protocol == ThriftRpcClient.BINARY_PROTOCOL) {
transportProtocolFactory = new TBinaryProtocol.Factory();
} else {
transportProtocolFactory = new TCompactProtocol.Factory();
}
args.protocolFactory(transportProtocolFactory);
args.inputTransportFactory(new TFastFramedTransport.Factory());
args.outputTransportFactory(new TFastFramedTransport.Factory());
args.processor(new ThriftSourceProtocol.Processor<ThriftSourceProtocol.Iface>(handler));
server = (TServer) serverClass.getConstructor(argsClass).newInstance(args);
Executors.newSingleThreadExecutor().submit(new Runnable() {
@Override
public void run() {
server.serve();
}
});
}
public enum HandlerType {
OK,
FAIL,
ERROR,
SLOW,
TIMEOUT,
ALTERNATE;
}
public void stop() {
server.stop();
}
}
| 9,653 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/api/TestThriftRpcClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import junit.framework.Assert;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.event.EventBuilder;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.net.ServerSocket;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
public class TestThriftRpcClient {
private static final String SEQ = "sequence";
private final Properties props = new Properties();
private ThriftRpcClient client;
private ThriftTestingSource src;
private int port;
@Before
public void setUp() throws Exception {
props.setProperty("hosts", "h1");
try (ServerSocket socket = new ServerSocket(0)) {
port = socket.getLocalPort();
}
props.setProperty(RpcClientConfigurationConstants.CONFIG_CLIENT_TYPE, "thrift");
props.setProperty("hosts.h1", "0.0.0.0:" + String.valueOf(port));
props.setProperty(RpcClientConfigurationConstants.CONFIG_BATCH_SIZE, "10");
props.setProperty(RpcClientConfigurationConstants.CONFIG_REQUEST_TIMEOUT, "2000");
props.setProperty(ThriftRpcClient.CONFIG_PROTOCOL, ThriftRpcClient.COMPACT_PROTOCOL);
}
@After
public void tearDown() throws Exception {
src.stop();
}
/**
* Insert events 0..count-1
*
* @param client
* @param count
* @throws Exception
*/
private static void insertEvents(RpcClient client, int count) throws Exception {
for (int i = 0; i < count; i++) {
Map<String, String> header = new HashMap<String, String>();
header.put(SEQ, String.valueOf(i));
client.append(EventBuilder.withBody(String.valueOf(i).getBytes(), header));
}
}
/**
* Append events as a batch with seq starting at start and ending at limit.
*
* @param client
* @param start
* @param limit
* @throws Exception
*/
private static void insertAsBatch(RpcClient client, int start,
int limit) throws Exception {
List<Event> events = new ArrayList<Event>();
for (int i = start; i <= limit; i++) {
Map<String, String> header = new HashMap<String, String>();
header.put(SEQ, String.valueOf(i));
events.add(EventBuilder.withBody(String.valueOf(i).getBytes(), header));
}
client.appendBatch(events);
}
@Test
public void testOK() throws Exception {
src = new ThriftTestingSource(ThriftTestingSource.HandlerType.OK.name(),
port, ThriftRpcClient.COMPACT_PROTOCOL);
client = (ThriftRpcClient) RpcClientFactory.getInstance(props);
insertEvents(client, 10); //10 events
insertAsBatch(client, 10, 25); //16 events
insertAsBatch(client, 26, 37); //12 events
int count = 0;
Assert.assertEquals(38, src.flumeEvents.size());
for (Event e : src.flumeEvents) {
Assert.assertEquals(new String(e.getBody()), String.valueOf(count++));
}
Assert.assertEquals(10, src.individualCount);
Assert.assertEquals(4, src.batchCount);
Assert.assertEquals(2, src.incompleteBatches);
}
@Test
public void testSlow() throws Exception {
src = new ThriftTestingSource(ThriftTestingSource.HandlerType.SLOW.name(),
port, ThriftRpcClient.COMPACT_PROTOCOL);
client = (ThriftRpcClient) RpcClientFactory.getInstance(props);
insertEvents(client, 2); //2 events
insertAsBatch(client, 2, 25); //24 events (3 batches)
insertAsBatch(client, 26, 37); //12 events (2 batches)
int count = 0;
Assert.assertEquals(38, src.flumeEvents.size());
for (Event e : src.flumeEvents) {
Assert.assertEquals(new String(e.getBody()), String.valueOf(count++));
}
Assert.assertEquals(2, src.individualCount);
Assert.assertEquals(5, src.batchCount);
Assert.assertEquals(2, src.incompleteBatches);
}
@Test(expected = EventDeliveryException.class)
public void testFail() throws Exception {
src = new ThriftTestingSource(ThriftTestingSource.HandlerType.FAIL.name(),
port, ThriftRpcClient.COMPACT_PROTOCOL);
client = (ThriftRpcClient) RpcClientFactory.getInstance(props);
insertEvents(client, 2); //2 events
Assert.fail("Expected EventDeliveryException to be thrown.");
}
@Test
public void testError() throws Throwable {
try {
src = new ThriftTestingSource(ThriftTestingSource.HandlerType.ERROR.name(), port,
ThriftRpcClient.COMPACT_PROTOCOL);
client = (ThriftRpcClient) RpcClientFactory.getThriftInstance("0.0.0.0", port);
insertEvents(client, 2); //2 events
} catch (EventDeliveryException ex) {
Assert.assertEquals("Failed to send event. ", ex.getMessage());
}
}
@Test (expected = TimeoutException.class)
public void testTimeout() throws Throwable {
try {
src = new ThriftTestingSource(ThriftTestingSource.HandlerType.TIMEOUT.name(), port,
ThriftRpcClient.COMPACT_PROTOCOL);
client = (ThriftRpcClient) RpcClientFactory.getThriftInstance(props);
insertEvents(client, 2); //2 events
} catch (EventDeliveryException ex) {
throw ex.getCause();
}
}
@Test
public void testMultipleThreads() throws Throwable {
src = new ThriftTestingSource(ThriftTestingSource.HandlerType.OK.name(), port,
ThriftRpcClient.COMPACT_PROTOCOL);
client = (ThriftRpcClient) RpcClientFactory.getThriftInstance("0.0.0.0", port, 10);
int threadCount = 100;
ExecutorService submissionSvc = Executors.newFixedThreadPool(threadCount);
ArrayList<Future<?>> futures = new ArrayList<Future<?>>(threadCount);
for (int i = 0; i < threadCount; i++) {
futures.add(submissionSvc.submit(new Runnable() {
@Override
public void run() {
try {
insertAsBatch(client, 0, 9);
} catch (Exception e) {
e.printStackTrace(); //To change body of catch statement use
// File | Settings | File Templates.
}
}
}));
}
for (int i = 0; i < threadCount; i++) {
futures.get(i).get();
}
ArrayList<String> events = new ArrayList<String>();
for (Event e : src.flumeEvents) {
events.add(new String(e.getBody()));
}
int count = 0;
Collections.sort(events);
for (int i = 0; i < events.size();) {
for (int j = 0; j < threadCount; j++) {
Assert.assertEquals(String.valueOf(count), events.get(i++));
}
count++;
}
}
}
| 9,654 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/api/TestLoadBalancingRpcClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import junit.framework.Assert;
import org.apache.avro.ipc.Server;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.apache.flume.api.RpcTestUtils.LoadBalancedAvroHandler;
import org.apache.flume.api.RpcTestUtils.OKAvroHandler;
import org.apache.flume.event.EventBuilder;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Properties;
import java.util.Set;
public class TestLoadBalancingRpcClient {
private static final Logger LOGGER = LoggerFactory.getLogger(TestLoadBalancingRpcClient.class);
@Test(expected = FlumeException.class)
public void testCreatingLbClientSingleHost() {
Server server1 = null;
RpcClient c = null;
try {
server1 = RpcTestUtils.startServer(new OKAvroHandler());
Properties p = new Properties();
p.put("host1", "127.0.0.1:" + server1.getPort());
p.put("hosts", "host1");
p.put("client.type", "default_loadbalance");
RpcClientFactory.getInstance(p);
} finally {
if (server1 != null) server1.close();
if (c != null) c.close();
}
}
@Test
public void testTwoHostFailover() throws Exception {
Server s1 = null;
Server s2 = null;
RpcClient c = null;
try {
LoadBalancedAvroHandler h1 = new LoadBalancedAvroHandler();
LoadBalancedAvroHandler h2 = new LoadBalancedAvroHandler();
s1 = RpcTestUtils.startServer(h1);
s2 = RpcTestUtils.startServer(h2);
Properties p = new Properties();
p.put("hosts", "h1 h2");
p.put("client.type", "default_loadbalance");
p.put("hosts.h1", "127.0.0.1:" + s1.getPort());
p.put("hosts.h2", "127.0.0.1:" + s2.getPort());
c = RpcClientFactory.getInstance(p);
Assert.assertTrue(c instanceof LoadBalancingRpcClient);
for (int i = 0; i < 100; i++) {
if (i == 20) {
h2.setFailed();
} else if (i == 40) {
h2.setOK();
}
c.append(getEvent(i));
}
Assert.assertEquals(60, h1.getAppendCount());
Assert.assertEquals(40, h2.getAppendCount());
} finally {
if (s1 != null) s1.close();
if (s2 != null) s2.close();
if (c != null) c.close();
}
}
// This will fail without FLUME-1823
@Test(expected = EventDeliveryException.class)
public void testTwoHostFailoverThrowAfterClose() throws Exception {
Server s1 = null;
Server s2 = null;
RpcClient c = null;
try {
LoadBalancedAvroHandler h1 = new LoadBalancedAvroHandler();
LoadBalancedAvroHandler h2 = new LoadBalancedAvroHandler();
s1 = RpcTestUtils.startServer(h1);
s2 = RpcTestUtils.startServer(h2);
Properties p = new Properties();
p.put("hosts", "h1 h2");
p.put("client.type", "default_loadbalance");
p.put("hosts.h1", "127.0.0.1:" + s1.getPort());
p.put("hosts.h2", "127.0.0.1:" + s2.getPort());
c = RpcClientFactory.getInstance(p);
Assert.assertTrue(c instanceof LoadBalancingRpcClient);
for (int i = 0; i < 100; i++) {
if (i == 20) {
h2.setFailed();
} else if (i == 40) {
h2.setOK();
}
c.append(getEvent(i));
}
Assert.assertEquals(60, h1.getAppendCount());
Assert.assertEquals(40, h2.getAppendCount());
if (c != null) c.close();
c.append(getEvent(3));
Assert.fail();
} finally {
if (s1 != null) s1.close();
if (s2 != null) s2.close();
}
}
/**
* Ensure that we can tolerate a host that is completely down.
*
* @throws Exception
*/
@Test
public void testTwoHostsOneDead() throws Exception {
LOGGER.info("Running testTwoHostsOneDead...");
Server s1 = null;
RpcClient c1 = null;
RpcClient c2 = null;
try {
LoadBalancedAvroHandler h1 = new LoadBalancedAvroHandler();
s1 = RpcTestUtils.startServer(h1);
// do not create a 2nd server (assume it's "down")
Properties p = new Properties();
p.put("hosts", "h1 h2");
p.put("client.type", "default_loadbalance");
p.put("hosts.h1", "127.0.0.1:" + 0); // port 0 should always be closed
p.put("hosts.h2", "127.0.0.1:" + s1.getPort());
// test batch API
c1 = RpcClientFactory.getInstance(p);
Assert.assertTrue(c1 instanceof LoadBalancingRpcClient);
for (int i = 0; i < 10; i++) {
c1.appendBatch(getBatchedEvent(i));
}
Assert.assertEquals(10, h1.getAppendBatchCount());
// test non-batch API
c2 = RpcClientFactory.getInstance(p);
Assert.assertTrue(c2 instanceof LoadBalancingRpcClient);
for (int i = 0; i < 10; i++) {
c2.append(getEvent(i));
}
Assert.assertEquals(10, h1.getAppendCount());
} finally {
if (s1 != null) s1.close();
if (c1 != null) c1.close();
if (c2 != null) c2.close();
}
}
@Test
public void testTwoHostFailoverBatch() throws Exception {
Server s1 = null;
Server s2 = null;
RpcClient c = null;
try {
LoadBalancedAvroHandler h1 = new LoadBalancedAvroHandler();
LoadBalancedAvroHandler h2 = new LoadBalancedAvroHandler();
s1 = RpcTestUtils.startServer(h1);
s2 = RpcTestUtils.startServer(h2);
Properties p = new Properties();
p.put("hosts", "h1 h2");
p.put("client.type", "default_loadbalance");
p.put("hosts.h1", "127.0.0.1:" + s1.getPort());
p.put("hosts.h2", "127.0.0.1:" + s2.getPort());
c = RpcClientFactory.getInstance(p);
Assert.assertTrue(c instanceof LoadBalancingRpcClient);
for (int i = 0; i < 100; i++) {
if (i == 20) {
h2.setFailed();
} else if (i == 40) {
h2.setOK();
}
c.appendBatch(getBatchedEvent(i));
}
Assert.assertEquals(60, h1.getAppendBatchCount());
Assert.assertEquals(40, h2.getAppendBatchCount());
} finally {
if (s1 != null) s1.close();
if (s2 != null) s2.close();
if (c != null) c.close();
}
}
@Test
public void testLbDefaultClientTwoHosts() throws Exception {
Server s1 = null;
Server s2 = null;
RpcClient c = null;
try {
LoadBalancedAvroHandler h1 = new LoadBalancedAvroHandler();
LoadBalancedAvroHandler h2 = new LoadBalancedAvroHandler();
s1 = RpcTestUtils.startServer(h1);
s2 = RpcTestUtils.startServer(h2);
Properties p = new Properties();
p.put("hosts", "h1 h2");
p.put("client.type", "default_loadbalance");
p.put("hosts.h1", "127.0.0.1:" + s1.getPort());
p.put("hosts.h2", "127.0.0.1:" + s2.getPort());
c = RpcClientFactory.getInstance(p);
Assert.assertTrue(c instanceof LoadBalancingRpcClient);
for (int i = 0; i < 100; i++) {
c.append(getEvent(i));
}
Assert.assertEquals(50, h1.getAppendCount());
Assert.assertEquals(50, h2.getAppendCount());
} finally {
if (s1 != null) s1.close();
if (s2 != null) s2.close();
if (c != null) c.close();
}
}
@Test
public void testLbDefaultClientTwoHostsBatch() throws Exception {
Server s1 = null;
Server s2 = null;
RpcClient c = null;
try {
LoadBalancedAvroHandler h1 = new LoadBalancedAvroHandler();
LoadBalancedAvroHandler h2 = new LoadBalancedAvroHandler();
s1 = RpcTestUtils.startServer(h1);
s2 = RpcTestUtils.startServer(h2);
Properties p = new Properties();
p.put("hosts", "h1 h2");
p.put("client.type", "default_loadbalance");
p.put("hosts.h1", "127.0.0.1:" + s1.getPort());
p.put("hosts.h2", "127.0.0.1:" + s2.getPort());
c = RpcClientFactory.getInstance(p);
Assert.assertTrue(c instanceof LoadBalancingRpcClient);
for (int i = 0; i < 100; i++) {
c.appendBatch(getBatchedEvent(i));
}
Assert.assertEquals(50, h1.getAppendBatchCount());
Assert.assertEquals(50, h2.getAppendBatchCount());
} finally {
if (s1 != null) s1.close();
if (s2 != null) s2.close();
if (c != null) c.close();
}
}
@Test
public void testLbClientTenHostRandomDistribution() throws Exception {
final int NUM_HOSTS = 10;
final int NUM_EVENTS = 1000;
Server[] s = new Server[NUM_HOSTS];
LoadBalancedAvroHandler[] h = new LoadBalancedAvroHandler[NUM_HOSTS];
RpcClient c = null;
try {
Properties p = new Properties();
StringBuilder hostList = new StringBuilder("");
for (int i = 0; i < NUM_HOSTS; i++) {
h[i] = new LoadBalancedAvroHandler();
s[i] = RpcTestUtils.startServer(h[i]);
String name = "h" + i;
p.put("hosts." + name, "127.0.0.1:" + s[i].getPort());
hostList.append(name).append(" ");
}
p.put("hosts", hostList.toString().trim());
p.put("client.type", "default_loadbalance");
p.put("host-selector", "random");
c = RpcClientFactory.getInstance(p);
Assert.assertTrue(c instanceof LoadBalancingRpcClient);
for (int i = 0; i < NUM_EVENTS; i++) {
c.append(getEvent(i));
}
Set<Integer> counts = new HashSet<Integer>();
int total = 0;
for (LoadBalancedAvroHandler handler : h) {
total += handler.getAppendCount();
counts.add(handler.getAppendCount());
}
Assert.assertTrue("Very unusual distribution", counts.size() > 2);
Assert.assertTrue("Missing events", total == NUM_EVENTS);
} finally {
for (int i = 0; i < NUM_HOSTS; i++) {
if (s[i] != null) s[i].close();
}
}
}
@Test
public void testLbClientTenHostRandomDistributionBatch() throws Exception {
final int NUM_HOSTS = 10;
final int NUM_EVENTS = 1000;
Server[] s = new Server[NUM_HOSTS];
LoadBalancedAvroHandler[] h = new LoadBalancedAvroHandler[NUM_HOSTS];
RpcClient c = null;
try {
Properties p = new Properties();
StringBuilder hostList = new StringBuilder("");
for (int i = 0; i < NUM_HOSTS; i++) {
h[i] = new LoadBalancedAvroHandler();
s[i] = RpcTestUtils.startServer(h[i]);
String name = "h" + i;
p.put("hosts." + name, "127.0.0.1:" + s[i].getPort());
hostList.append(name).append(" ");
}
p.put("hosts", hostList.toString().trim());
p.put("client.type", "default_loadbalance");
p.put("host-selector", "random");
c = RpcClientFactory.getInstance(p);
Assert.assertTrue(c instanceof LoadBalancingRpcClient);
for (int i = 0; i < NUM_EVENTS; i++) {
c.appendBatch(getBatchedEvent(i));
}
Set<Integer> counts = new HashSet<Integer>();
int total = 0;
for (LoadBalancedAvroHandler handler : h) {
total += handler.getAppendBatchCount();
counts.add(handler.getAppendBatchCount());
}
Assert.assertTrue("Very unusual distribution", counts.size() > 2);
Assert.assertTrue("Missing events", total == NUM_EVENTS);
} finally {
for (int i = 0; i < NUM_HOSTS; i++) {
if (s[i] != null) s[i].close();
}
}
}
@Test
public void testLbClientTenHostRoundRobinDistribution() throws Exception {
final int NUM_HOSTS = 10;
final int NUM_EVENTS = 1000;
Server[] s = new Server[NUM_HOSTS];
LoadBalancedAvroHandler[] h = new LoadBalancedAvroHandler[NUM_HOSTS];
RpcClient c = null;
try {
Properties p = new Properties();
StringBuilder hostList = new StringBuilder("");
for (int i = 0; i < NUM_HOSTS; i++) {
h[i] = new LoadBalancedAvroHandler();
s[i] = RpcTestUtils.startServer(h[i]);
String name = "h" + i;
p.put("hosts." + name, "127.0.0.1:" + s[i].getPort());
hostList.append(name).append(" ");
}
p.put("hosts", hostList.toString().trim());
p.put("client.type", "default_loadbalance");
p.put("host-selector", "round_robin");
c = RpcClientFactory.getInstance(p);
Assert.assertTrue(c instanceof LoadBalancingRpcClient);
for (int i = 0; i < NUM_EVENTS; i++) {
c.append(getEvent(i));
}
Set<Integer> counts = new HashSet<Integer>();
int total = 0;
for (LoadBalancedAvroHandler handler : h) {
total += handler.getAppendCount();
counts.add(handler.getAppendCount());
}
Assert.assertTrue("Very unusual distribution", counts.size() == 1);
Assert.assertTrue("Missing events", total == NUM_EVENTS);
} finally {
for (int i = 0; i < NUM_HOSTS; i++) {
if (s[i] != null) s[i].close();
}
}
}
@Test
public void testLbClientTenHostRoundRobinDistributionBatch() throws Exception {
final int NUM_HOSTS = 10;
final int NUM_EVENTS = 1000;
Server[] s = new Server[NUM_HOSTS];
LoadBalancedAvroHandler[] h = new LoadBalancedAvroHandler[NUM_HOSTS];
RpcClient c = null;
try {
Properties p = new Properties();
StringBuilder hostList = new StringBuilder("");
for (int i = 0; i < NUM_HOSTS; i++) {
h[i] = new LoadBalancedAvroHandler();
s[i] = RpcTestUtils.startServer(h[i]);
String name = "h" + i;
p.put("hosts." + name, "127.0.0.1:" + s[i].getPort());
hostList.append(name).append(" ");
}
p.put("hosts", hostList.toString().trim());
p.put("client.type", "default_loadbalance");
p.put("host-selector", "round_robin");
c = RpcClientFactory.getInstance(p);
Assert.assertTrue(c instanceof LoadBalancingRpcClient);
for (int i = 0; i < NUM_EVENTS; i++) {
c.appendBatch(getBatchedEvent(i));
}
Set<Integer> counts = new HashSet<Integer>();
int total = 0;
for (LoadBalancedAvroHandler handler : h) {
total += handler.getAppendBatchCount();
counts.add(handler.getAppendBatchCount());
}
Assert.assertTrue("Very unusual distribution", counts.size() == 1);
Assert.assertTrue("Missing events", total == NUM_EVENTS);
} finally {
for (int i = 0; i < NUM_HOSTS; i++) {
if (s[i] != null) s[i].close();
}
}
}
@Test
public void testRandomBackoff() throws Exception {
Properties p = new Properties();
List<LoadBalancedAvroHandler> hosts =
new ArrayList<LoadBalancedAvroHandler>();
List<Server> servers = new ArrayList<Server>();
StringBuilder hostList = new StringBuilder("");
for (int i = 0; i < 3; i++) {
LoadBalancedAvroHandler s = new LoadBalancedAvroHandler();
hosts.add(s);
Server srv = RpcTestUtils.startServer(s);
servers.add(srv);
String name = "h" + i;
p.put("hosts." + name, "127.0.0.1:" + srv.getPort());
hostList.append(name).append(" ");
}
p.put("hosts", hostList.toString().trim());
p.put("client.type", "default_loadbalance");
p.put("host-selector", "random");
p.put("backoff", "true");
hosts.get(0).setFailed();
hosts.get(2).setFailed();
RpcClient c = RpcClientFactory.getInstance(p);
Assert.assertTrue(c instanceof LoadBalancingRpcClient);
// TODO: there is a remote possibility that s0 or s2
// never get hit by the random assignment
// and thus not backoffed, causing the test to fail
for (int i = 0; i < 50; i++) {
// a well behaved runner would always check the return.
c.append(EventBuilder.withBody(("test" + String.valueOf(i)).getBytes()));
}
Assert.assertEquals(50, hosts.get(1).getAppendCount());
Assert.assertEquals(0, hosts.get(0).getAppendCount());
Assert.assertEquals(0, hosts.get(2).getAppendCount());
hosts.get(0).setOK();
hosts.get(1).setFailed(); // s0 should still be backed off
try {
c.append(EventBuilder.withBody("shouldfail".getBytes()));
// nothing should be able to process right now
Assert.fail("Expected EventDeliveryException");
} catch (EventDeliveryException e) {
// this is expected
}
Thread.sleep(2500); // wait for s0 to no longer be backed off
for (int i = 0; i < 50; i++) {
// a well behaved runner would always check the return.
c.append(EventBuilder.withBody(("test" + String.valueOf(i)).getBytes()));
}
Assert.assertEquals(50, hosts.get(0).getAppendCount());
Assert.assertEquals(50, hosts.get(1).getAppendCount());
Assert.assertEquals(0, hosts.get(2).getAppendCount());
}
@Test
public void testRoundRobinBackoffInitialFailure() throws EventDeliveryException {
Properties p = new Properties();
List<LoadBalancedAvroHandler> hosts =
new ArrayList<LoadBalancedAvroHandler>();
List<Server> servers = new ArrayList<Server>();
StringBuilder hostList = new StringBuilder("");
for (int i = 0; i < 3; i++) {
LoadBalancedAvroHandler s = new LoadBalancedAvroHandler();
hosts.add(s);
Server srv = RpcTestUtils.startServer(s);
servers.add(srv);
String name = "h" + i;
p.put("hosts." + name, "127.0.0.1:" + srv.getPort());
hostList.append(name).append(" ");
}
p.put("hosts", hostList.toString().trim());
p.put("client.type", "default_loadbalance");
p.put("host-selector", "round_robin");
p.put("backoff", "true");
RpcClient c = RpcClientFactory.getInstance(p);
Assert.assertTrue(c instanceof LoadBalancingRpcClient);
for (int i = 0; i < 3; i++) {
c.append(EventBuilder.withBody("testing".getBytes()));
}
hosts.get(1).setFailed();
for (int i = 0; i < 3; i++) {
c.append(EventBuilder.withBody("testing".getBytes()));
}
hosts.get(1).setOK();
//This time the iterators will never have "1".
//So clients get in the order: 1 - 3 - 1
for (int i = 0; i < 3; i++) {
c.append(EventBuilder.withBody("testing".getBytes()));
}
Assert.assertEquals(1 + 2 + 1, hosts.get(0).getAppendCount());
Assert.assertEquals(1, hosts.get(1).getAppendCount());
Assert.assertEquals(1 + 1 + 2, hosts.get(2).getAppendCount());
}
@Test
public void testRoundRobinBackoffIncreasingBackoffs() throws Exception {
Properties p = new Properties();
List<LoadBalancedAvroHandler> hosts =
new ArrayList<LoadBalancedAvroHandler>();
List<Server> servers = new ArrayList<Server>();
StringBuilder hostList = new StringBuilder("");
for (int i = 0; i < 3; i++) {
LoadBalancedAvroHandler s = new LoadBalancedAvroHandler();
hosts.add(s);
if (i == 1) {
s.setFailed();
}
Server srv = RpcTestUtils.startServer(s);
servers.add(srv);
String name = "h" + i;
p.put("hosts." + name, "127.0.0.1:" + srv.getPort());
hostList.append(name).append(" ");
}
p.put("hosts", hostList.toString().trim());
p.put("client.type", "default_loadbalance");
p.put("host-selector", "round_robin");
p.put("backoff", "true");
RpcClient c = RpcClientFactory.getInstance(p);
Assert.assertTrue(c instanceof LoadBalancingRpcClient);
for (int i = 0; i < 3; i++) {
c.append(EventBuilder.withBody("testing".getBytes()));
}
Assert.assertEquals(0, hosts.get(1).getAppendCount());
Thread.sleep(2100);
// this should let the sink come out of backoff and get backed off for a longer time
for (int i = 0; i < 3; i++) {
c.append(EventBuilder.withBody("testing".getBytes()));
}
Assert.assertEquals(0, hosts.get(1).getAppendCount());
hosts.get(1).setOK();
Thread.sleep(2100);
// this time it shouldn't come out of backoff yet as the timeout isn't over
for (int i = 0; i < 3; i++) {
c.append(EventBuilder.withBody("testing".getBytes()));
}
Assert.assertEquals(0, hosts.get(1).getAppendCount());
// after this s2 should be receiving events again
Thread.sleep(2500);
int numEvents = 60;
for (int i = 0; i < numEvents; i++) {
c.append(EventBuilder.withBody("testing".getBytes()));
}
Assert.assertEquals(2 + 2 + 1 + (numEvents / 3), hosts.get(0).getAppendCount());
Assert.assertEquals((numEvents / 3), hosts.get(1).getAppendCount());
Assert.assertEquals(1 + 1 + 2 + (numEvents / 3), hosts.get(2).getAppendCount());
}
@Test
public void testRoundRobinBackoffFailureRecovery()
throws EventDeliveryException, InterruptedException {
Properties p = new Properties();
List<LoadBalancedAvroHandler> hosts =
new ArrayList<LoadBalancedAvroHandler>();
List<Server> servers = new ArrayList<Server>();
StringBuilder hostList = new StringBuilder("");
for (int i = 0; i < 3; i++) {
LoadBalancedAvroHandler s = new LoadBalancedAvroHandler();
hosts.add(s);
if (i == 1) {
s.setFailed();
}
Server srv = RpcTestUtils.startServer(s);
servers.add(srv);
String name = "h" + i;
p.put("hosts." + name, "127.0.0.1:" + srv.getPort());
hostList.append(name).append(" ");
}
p.put("hosts", hostList.toString().trim());
p.put("client.type", "default_loadbalance");
p.put("host-selector", "round_robin");
p.put("backoff", "true");
RpcClient c = RpcClientFactory.getInstance(p);
Assert.assertTrue(c instanceof LoadBalancingRpcClient);
for (int i = 0; i < 3; i++) {
c.append(EventBuilder.withBody("recovery test".getBytes()));
}
hosts.get(1).setOK();
Thread.sleep(3000);
int numEvents = 60;
for (int i = 0; i < numEvents; i++) {
c.append(EventBuilder.withBody("testing".getBytes()));
}
Assert.assertEquals(2 + (numEvents / 3), hosts.get(0).getAppendCount());
Assert.assertEquals(0 + (numEvents / 3), hosts.get(1).getAppendCount());
Assert.assertEquals(1 + (numEvents / 3), hosts.get(2).getAppendCount());
}
private List<Event> getBatchedEvent(int index) {
List<Event> result = new ArrayList<Event>();
result.add(getEvent(index));
return result;
}
private Event getEvent(int index) {
return EventBuilder.withBody(("event: " + index).getBytes());
}
}
| 9,655 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/api/TestFailoverRpcClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import java.net.InetSocketAddress;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import org.apache.avro.ipc.Server;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.apache.flume.api.RpcTestUtils.OKAvroHandler;
import org.apache.flume.event.EventBuilder;
import org.junit.Assert;
import org.junit.Test;
public class TestFailoverRpcClient {
/**
* Test a bunch of servers closing the one we are writing to and bringing
* another one back online.
*
* @throws FlumeException
* @throws EventDeliveryException
* @throws InterruptedException
*/
@Test
public void testFailover()
throws FlumeException, EventDeliveryException,InterruptedException {
FailoverRpcClient client = null;
Server server1 = RpcTestUtils.startServer(new OKAvroHandler());
Server server2 = RpcTestUtils.startServer(new OKAvroHandler());
Server server3 = RpcTestUtils.startServer(new OKAvroHandler());
Properties props = new Properties();
int s1Port = server1.getPort();
int s2Port = server2.getPort();
int s3Port = server3.getPort();
props.put("client.type", "default_failover");
props.put("hosts", "host1 host2 host3");
props.put("hosts.host1", "127.0.0.1:" + String.valueOf(s1Port));
props.put("hosts.host2", "127.0.0.1:" + String.valueOf(s2Port));
props.put("hosts.host3", "127.0.0.1:" + String.valueOf(s3Port));
client = (FailoverRpcClient) RpcClientFactory.getInstance(props);
List<Event> events = new ArrayList<Event>();
for (int i = 0; i < 50; i++) {
events.add(EventBuilder.withBody("evt: " + i, Charset.forName("UTF8")));
}
client.appendBatch(events);
Assert.assertEquals(client.getLastConnectedServerAddress(),
new InetSocketAddress("127.0.0.1", server1.getPort()));
server1.close();
Thread.sleep(1000L); // wait a second for the close to occur
events = new ArrayList<Event>();
for (int i = 0; i < 50; i++) {
events.add(EventBuilder.withBody("evt: " + i, Charset.forName("UTF8")));
}
client.appendBatch(events);
Assert.assertEquals(new InetSocketAddress("localhost", server2.getPort()),
client.getLastConnectedServerAddress());
server2.close();
Thread.sleep(1000L); // wait a second for the close to occur
client.append(EventBuilder.withBody("Had a sandwich?",
Charset.forName("UTF8")));
Assert.assertEquals(new InetSocketAddress("localhost", server3.getPort()),
client.getLastConnectedServerAddress());
// Bring server 2 back.
Server server4 = RpcTestUtils.startServer(new OKAvroHandler(), s2Port);
server3.close();
Thread.sleep(1000L); // wait a second for the close to occur
events = new ArrayList<Event>();
for (int i = 0; i < 50; i++) {
events.add(EventBuilder.withBody("evt: " + i, Charset.forName("UTF8")));
}
client.appendBatch(events);
Assert.assertEquals(new InetSocketAddress("localhost", s2Port),
client.getLastConnectedServerAddress());
Server server5 = RpcTestUtils.startServer(new OKAvroHandler(), s1Port);
// Make sure we are still talking to server 4
client
.append(EventBuilder.withBody("Had a mango?", Charset.forName("UTF8")));
Assert.assertEquals(new InetSocketAddress("localhost", s2Port),
client.getLastConnectedServerAddress());
server4.close();
Thread.sleep(1000L); // wait a second for the close to occur
events = new ArrayList<Event>();
for (int i = 0; i < 50; i++) {
events.add(EventBuilder.withBody("evt: " + i, Charset.forName("UTF8")));
}
client.appendBatch(events);
Assert.assertEquals(new InetSocketAddress("localhost", s1Port),
client.getLastConnectedServerAddress());
server5.close();
Thread.sleep(1000L); // wait a second for the close to occur
Server server6 = RpcTestUtils.startServer(new OKAvroHandler(), s1Port);
client.append(EventBuilder.withBody("Had a whole watermelon?", Charset.forName("UTF8")));
Assert.assertEquals(new InetSocketAddress("localhost", s1Port),
client.getLastConnectedServerAddress());
server6.close();
Thread.sleep(1000L); // wait a second for the close to occur
Server server7 = RpcTestUtils.startServer(new OKAvroHandler(), s3Port);
events = new ArrayList<Event>();
for (int i = 0; i < 50; i++) {
events.add(EventBuilder.withBody("evt: " + i, Charset.forName("UTF8")));
}
client.appendBatch(events);
Assert.assertEquals(new InetSocketAddress("localhost", s3Port),
client.getLastConnectedServerAddress());
server7.close();
}
/**
* Try writing to some servers and then kill them all.
*
* @throws FlumeException
* @throws EventDeliveryException
*/
@Test(
expected = EventDeliveryException.class)
public void testFailedServers() throws FlumeException, EventDeliveryException {
FailoverRpcClient client = null;
Server server1 = RpcTestUtils.startServer(new OKAvroHandler());
Server server2 = RpcTestUtils.startServer(new OKAvroHandler());
Server server3 = RpcTestUtils.startServer(new OKAvroHandler());
Properties props = new Properties();
props.put("client.type", "default_failover");
props.put("hosts", "host1 host2 host3");
props.put("hosts.host1", "localhost:" + String.valueOf(server1.getPort()));
props.put("hosts.host2", "localhost:" + String.valueOf(server2.getPort()));
props.put("hosts.host3", " localhost:" + String.valueOf(server3.getPort()));
client = (FailoverRpcClient) RpcClientFactory.getInstance(props);
List<Event> events = new ArrayList<Event>();
for (int i = 0; i < 50; i++) {
events.add(EventBuilder.withBody("evt: " + i, Charset.forName("UTF8")));
}
client.appendBatch(events);
server1.close();
server2.close();
server3.close();
events = new ArrayList<Event>();
for (int i = 0; i < 50; i++) {
events.add(EventBuilder.withBody("evt: " + i, Charset.forName("UTF8")));
}
client.appendBatch(events);
}
}
| 9,656 |
0 | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/test/java/org/apache/flume/event/TestEventBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.event;
import java.util.HashMap;
import java.util.Map;
import org.apache.flume.Event;
import org.apache.flume.FlumeException;
import org.junit.Assert;
import org.junit.Test;
public class TestEventBuilder {
@Test
public void testBody() {
Event e1 = EventBuilder.withBody("e1".getBytes());
Assert.assertNotNull(e1);
Assert.assertArrayEquals("body is correct", "e1".getBytes(), e1.getBody());
Event e2 = EventBuilder.withBody(Long.valueOf(2).toString().getBytes());
Assert.assertNotNull(e2);
Assert.assertArrayEquals("body is correct", Long.valueOf(2L).toString()
.getBytes(), e2.getBody());
}
@Test
public void testHeaders() {
Map<String, String> headers = new HashMap<String, String>();
headers.put("one", "1");
headers.put("two", "2");
Event e1 = EventBuilder.withBody("e1".getBytes(), headers);
Assert.assertNotNull(e1);
Assert.assertArrayEquals("e1 has the proper body", "e1".getBytes(),
e1.getBody());
Assert.assertEquals("e1 has the proper headers", 2, e1.getHeaders().size());
Assert.assertEquals("e1 has a one key", "1", e1.getHeaders().get("one"));
}
@Test (expected = FlumeException.class)
public void testJsonEventUnsupportedEncoding() {
JSONEvent jsonEvent = new JSONEvent();
jsonEvent.setCharset("dummy");
jsonEvent.setBody("This is json event".getBytes());
jsonEvent.getBody();
}
} | 9,657 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/EventDeliveryException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume;
/**
* An event delivery exception is raised whenever an {@link Event} fails to
* reach at least one of its intended (next-hop) destinations.
*/
public class EventDeliveryException extends Exception {
private static final long serialVersionUID = 1102327497549834945L;
public EventDeliveryException() {
super();
}
public EventDeliveryException(String message) {
super(message);
}
public EventDeliveryException(String message, Throwable t) {
super(message, t);
}
public EventDeliveryException(Throwable t) {
super(t);
}
}
| 9,658 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/Event.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume;
import java.util.Map;
/**
* Basic representation of a data object in Flume.
* Provides access to data as it flows through the system.
*/
public interface Event {
/**
* Returns a map of name-value pairs describing the data stored in the body.
*/
Map<String, String> getHeaders();
/**
* Set the event headers
* @param headers Map of headers to replace the current headers.
*/
void setHeaders(Map<String, String> headers);
/**
* Returns the raw byte array of the data contained in this event.
*/
byte[] getBody();
/**
* Sets the raw byte array of the data contained in this event.
* @param body The data.
*/
void setBody(byte[] body);
}
| 9,659 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/FlumeException.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume;
/**
* Base class of all flume exceptions.
*/
public class FlumeException extends RuntimeException {
private static final long serialVersionUID = 1L;
public FlumeException(String msg) {
super(msg);
}
public FlumeException(String msg, Throwable th) {
super(msg, th);
}
public FlumeException(Throwable th) {
super(th);
}
}
| 9,660 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/util/SpecificOrderIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.util;
import java.util.Iterator;
import java.util.List;
/**
* A utility class that iterates over the given ordered list of items via
* the specified order array. The entries of the order array indicate the
* index within the ordered list of items that needs to be picked over the
* course of iteration.
*/
public class SpecificOrderIterator<T> implements Iterator<T> {
private final int[] order;
private final List<T> items;
private int index = 0;
public SpecificOrderIterator(int[] orderArray, List<T> itemList) {
order = orderArray;
items = itemList;
}
@Override
public boolean hasNext() {
return index < order.length;
}
@Override
public T next() {
return items.get(order[index++]);
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
| 9,661 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/util/RoundRobinOrderSelector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.util;
import java.util.Iterator;
import java.util.List;
/**
* An implementation of OrderSelector which returns objects in round robin order.
* Also supports backoff.
*/
public class RoundRobinOrderSelector<T> extends OrderSelector<T> {
private int nextHead = 0;
public RoundRobinOrderSelector(boolean shouldBackOff) {
super(shouldBackOff);
}
@Override
public Iterator<T> createIterator() {
List<Integer> activeIndices = getIndexList();
int size = activeIndices.size();
// possible that the size has shrunk so gotta adjust nextHead for that
if (nextHead >= size) {
nextHead = 0;
}
int begin = nextHead++;
if (nextHead == activeIndices.size()) {
nextHead = 0;
}
int[] indexOrder = new int[size];
for (int i = 0; i < size; i++) {
indexOrder[i] = activeIndices.get((begin + i) % size);
}
return new SpecificOrderIterator<T>(indexOrder, getObjects());
}
}
| 9,662 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/util/SSLUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SSLUtil {
private static final Logger LOGGER = LoggerFactory.getLogger(SSLUtil.class);
private static final String SYS_PROP_KEYSTORE_PATH = "javax.net.ssl.keyStore";
private static final String SYS_PROP_KEYSTORE_PASSWORD = "javax.net.ssl.keyStorePassword";
private static final String SYS_PROP_KEYSTORE_TYPE = "javax.net.ssl.keyStoreType";
private static final String SYS_PROP_TRUSTSTORE_PATH = "javax.net.ssl.trustStore";
private static final String SYS_PROP_TRUSTSTORE_PASSWORD = "javax.net.ssl.trustStorePassword";
private static final String SYS_PROP_TRUSTSTORE_TYPE = "javax.net.ssl.trustStoreType";
private static final String SYS_PROP_INCLUDE_PROTOCOLS = "flume.ssl.include.protocols";
private static final String SYS_PROP_EXCLUDE_PROTOCOLS = "flume.ssl.exclude.protocols";
private static final String SYS_PROP_INCLUDE_CIPHERSUITES = "flume.ssl.include.cipherSuites";
private static final String SYS_PROP_EXCLUDE_CIPHERSUITES = "flume.ssl.exclude.cipherSuites";
private static final String ENV_VAR_KEYSTORE_PATH = "FLUME_SSL_KEYSTORE_PATH";
private static final String ENV_VAR_KEYSTORE_PASSWORD = "FLUME_SSL_KEYSTORE_PASSWORD";
private static final String ENV_VAR_KEYSTORE_TYPE = "FLUME_SSL_KEYSTORE_TYPE";
private static final String ENV_VAR_TRUSTSTORE_PATH = "FLUME_SSL_TRUSTSTORE_PATH";
private static final String ENV_VAR_TRUSTSTORE_PASSWORD = "FLUME_SSL_TRUSTSTORE_PASSWORD";
private static final String ENV_VAR_TRUSTSTORE_TYPE = "FLUME_SSL_TRUSTSTORE_TYPE";
private static final String ENV_VAR_INCLUDE_PROTOCOLS = "FLUME_SSL_INCLUDE_PROTOCOLS";
private static final String ENV_VAR_EXCLUDE_PROTOCOLS = "FLUME_SSL_EXCLUDE_PROTOCOLS";
private static final String ENV_VAR_INCLUDE_CIPHERSUITES = "FLUME_SSL_INCLUDE_CIPHERSUITES";
private static final String ENV_VAR_EXCLUDE_CIPHERSUITES = "FLUME_SSL_EXCLUDE_CIPHERSUITES";
private static final String DESCR_KEYSTORE_PATH = "keystore path";
private static final String DESCR_KEYSTORE_PASSWORD = "keystore password";
private static final String DESCR_KEYSTORE_TYPE = "keystore type";
private static final String DESCR_TRUSTSTORE_PATH = "truststore path";
private static final String DESCR_TRUSTSTORE_PASSWORD = "truststore password";
private static final String DESCR_TRUSTSTORE_TYPE = "truststore type";
private static final String DESCR_INCLUDE_PROTOCOLS = "include protocols";
private static final String DESCR_EXCLUDE_PROTOCOLS = "exclude protocols";
private static final String DESCR_INCLUDE_CIPHERSUITES = "include cipher suites";
private static final String DESCR_EXCLUDE_CIPHERSUITES = "exclude cipher suites";
public static void initGlobalSSLParameters() {
initSysPropFromEnvVar(
SYS_PROP_KEYSTORE_PATH, ENV_VAR_KEYSTORE_PATH, DESCR_KEYSTORE_PATH);
initSysPropFromEnvVar(
SYS_PROP_KEYSTORE_PASSWORD, ENV_VAR_KEYSTORE_PASSWORD, DESCR_KEYSTORE_PASSWORD);
initSysPropFromEnvVar(
SYS_PROP_KEYSTORE_TYPE, ENV_VAR_KEYSTORE_TYPE, DESCR_KEYSTORE_TYPE);
initSysPropFromEnvVar(
SYS_PROP_TRUSTSTORE_PATH, ENV_VAR_TRUSTSTORE_PATH, DESCR_TRUSTSTORE_PATH);
initSysPropFromEnvVar(
SYS_PROP_TRUSTSTORE_PASSWORD, ENV_VAR_TRUSTSTORE_PASSWORD, DESCR_TRUSTSTORE_PASSWORD);
initSysPropFromEnvVar(
SYS_PROP_TRUSTSTORE_TYPE, ENV_VAR_TRUSTSTORE_TYPE, DESCR_TRUSTSTORE_TYPE);
initSysPropFromEnvVar(
SYS_PROP_INCLUDE_PROTOCOLS, ENV_VAR_INCLUDE_PROTOCOLS, DESCR_INCLUDE_PROTOCOLS);
initSysPropFromEnvVar(
SYS_PROP_EXCLUDE_PROTOCOLS, ENV_VAR_EXCLUDE_PROTOCOLS, DESCR_EXCLUDE_PROTOCOLS);
initSysPropFromEnvVar(
SYS_PROP_INCLUDE_CIPHERSUITES, ENV_VAR_INCLUDE_CIPHERSUITES, DESCR_INCLUDE_CIPHERSUITES);
initSysPropFromEnvVar(
SYS_PROP_EXCLUDE_CIPHERSUITES, ENV_VAR_EXCLUDE_CIPHERSUITES, DESCR_EXCLUDE_CIPHERSUITES);
}
private static void initSysPropFromEnvVar(String sysPropName, String envVarName,
String description) {
if (System.getProperty(sysPropName) != null) {
LOGGER.debug("Global SSL " + description + " has been initialized from system property.");
} else {
String envVarValue = System.getenv(envVarName);
if (envVarValue != null) {
System.setProperty(sysPropName, envVarValue);
LOGGER.debug("Global SSL " + description +
" has been initialized from environment variable.");
} else {
LOGGER.debug("No global SSL " + description + " specified.");
}
}
}
public static String getGlobalKeystorePath() {
return System.getProperty(SYS_PROP_KEYSTORE_PATH);
}
public static String getGlobalKeystorePassword() {
return System.getProperty(SYS_PROP_KEYSTORE_PASSWORD);
}
public static String getGlobalKeystoreType(String defaultValue) {
String sysPropValue = System.getProperty(SYS_PROP_KEYSTORE_TYPE);
return sysPropValue != null ? sysPropValue : defaultValue;
}
public static String getGlobalTruststorePath() {
return System.getProperty(SYS_PROP_TRUSTSTORE_PATH);
}
public static String getGlobalTruststorePassword() {
return System.getProperty(SYS_PROP_TRUSTSTORE_PASSWORD);
}
public static String getGlobalTruststoreType(String defaultValue) {
String sysPropValue = System.getProperty(SYS_PROP_TRUSTSTORE_TYPE);
return sysPropValue != null ? sysPropValue : defaultValue;
}
public static String getGlobalExcludeProtocols() {
return normalizeProperty(SYS_PROP_EXCLUDE_PROTOCOLS);
}
public static String getGlobalIncludeProtocols() {
return normalizeProperty(SYS_PROP_INCLUDE_PROTOCOLS);
}
public static String getGlobalExcludeCipherSuites() {
return normalizeProperty(SYS_PROP_EXCLUDE_CIPHERSUITES);
}
public static String getGlobalIncludeCipherSuites() {
return normalizeProperty(SYS_PROP_INCLUDE_CIPHERSUITES);
}
private static String normalizeProperty(String name) {
String property = System.getProperty(name);
return property == null ? null : property.replaceAll(",", " ");
}
}
| 9,663 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/util/RandomOrderSelector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.util;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
/**
* An implementation of OrderSelector which returns objects in random order.
* Also supports backoff.
*/
public class RandomOrderSelector<T> extends OrderSelector<T> {
private Random random = new Random(System.currentTimeMillis());
public RandomOrderSelector(boolean shouldBackOff) {
super(shouldBackOff);
}
@Override
public synchronized Iterator<T> createIterator() {
List<Integer> indexList = getIndexList();
int size = indexList.size();
int[] indexOrder = new int[size];
while (indexList.size() != 1) {
int pick = random.nextInt(indexList.size());
indexOrder[indexList.size() - 1] = indexList.remove(pick);
}
indexOrder[0] = indexList.get(0);
return new SpecificOrderIterator<T>(indexOrder, getObjects());
}
}
| 9,664 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/util/OrderSelector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.util;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* A basic implementation of an order selector that implements a simple
* exponential backoff algorithm. Subclasses can use the same algorithm for
* backoff by simply overriding <tt>createIterator</tt> method to order the
* list of active sinks returned by <tt>getIndexList</tt> method. Classes
* instantiating subclasses of this class are expected to call <tt>informFailure</tt>
* method when an object passed to this class should be marked as failed and backed off.
*
* When implementing a different backoff algorithm, a subclass should
* minimally override <tt>informFailure</tt> and <tt>getIndexList</tt> methods.
*
* @param <T> - The class on which ordering is to be done
*/
public abstract class OrderSelector<T> {
private static final int EXP_BACKOFF_COUNTER_LIMIT = 16;
private static final long CONSIDER_SEQUENTIAL_RANGE = TimeUnit.HOURS.toMillis(1);
private static final long MAX_TIMEOUT = 30000L;
private final Map<T, FailureState> stateMap =
new LinkedHashMap<T, FailureState>();
private long maxTimeout = MAX_TIMEOUT;
private final boolean shouldBackOff;
protected OrderSelector(boolean shouldBackOff) {
this.shouldBackOff = shouldBackOff;
}
/**
* Set the list of objects which this class should return in order.
* @param objects
*/
@SuppressWarnings("unchecked")
public void setObjects(List<T> objects) {
//Order is the same as the original order.
for (T sink : objects) {
FailureState state = new FailureState();
stateMap.put(sink, state);
}
}
/**
* Get the list of objects to be ordered. This list is in the same order
* as originally passed in, not in the algorithmically reordered order.
* @return - list of objects to be ordered.
*/
public List<T> getObjects() {
return new ArrayList<T>(stateMap.keySet());
}
/**
*
* @return - list of algorithmically ordered active sinks
*/
public abstract Iterator<T> createIterator();
/**
* Inform this class of the failure of an object so it can be backed off.
* @param failedObject
*/
public void informFailure(T failedObject) {
//If there is no backoff this method is a no-op.
if (!shouldBackOff) {
return;
}
FailureState state = stateMap.get(failedObject);
long now = System.currentTimeMillis();
long delta = now - state.lastFail;
/*
* When do we increase the backoff period?
* We basically calculate the time difference between the last failure
* and the current one. If this failure happened within one hour of the
* last backoff period getting over, then we increase the timeout,
* since the object did not recover yet. Else we assume this is a fresh
* failure and reset the count.
*/
long lastBackoffLength = Math.min(maxTimeout, 1000 * (1 << state.sequentialFails));
long allowableDiff = lastBackoffLength + CONSIDER_SEQUENTIAL_RANGE;
if (allowableDiff > delta) {
if (state.sequentialFails < EXP_BACKOFF_COUNTER_LIMIT) {
state.sequentialFails++;
}
} else {
state.sequentialFails = 1;
}
state.lastFail = now;
//Depending on the number of sequential failures this component had, delay
//its restore time. Each time it fails, delay the restore by 1000 ms,
//until the maxTimeOut is reached.
state.restoreTime = now + Math.min(maxTimeout, 1000 * (1 << state.sequentialFails));
}
/**
*
* @return - List of indices currently active objects
*/
protected List<Integer> getIndexList() {
long now = System.currentTimeMillis();
List<Integer> indexList = new ArrayList<Integer>();
int i = 0;
for (T obj : stateMap.keySet()) {
if (!isShouldBackOff() || stateMap.get(obj).restoreTime < now) {
indexList.add(i);
}
i++;
}
return indexList;
}
public boolean isShouldBackOff() {
return shouldBackOff;
}
public void setMaxTimeOut(long timeout) {
this.maxTimeout = timeout;
}
public long getMaxTimeOut() {
return this.maxTimeout;
}
private static class FailureState {
long lastFail = 0;
long restoreTime = 0;
int sequentialFails = 0;
}
}
| 9,665 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/thrift/Status.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Autogenerated by Thrift Compiler (0.9.3)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package org.apache.flume.thrift;
import java.util.Map;
import java.util.HashMap;
import org.apache.thrift.TEnum;
public enum Status implements org.apache.thrift.TEnum {
OK(0),
FAILED(1),
ERROR(2),
UNKNOWN(3);
private final int value;
private Status(int value) {
this.value = value;
}
/**
* Get the integer value of this enum value, as defined in the Thrift IDL.
*/
public int getValue() {
return value;
}
/**
* Find a the enum type by its integer value, as defined in the Thrift IDL.
* @return null if the value is not found.
*/
public static Status findByValue(int value) {
switch (value) {
case 0:
return OK;
case 1:
return FAILED;
case 2:
return ERROR;
case 3:
return UNKNOWN;
default:
return null;
}
}
}
| 9,666 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/thrift/ThriftSourceProtocol.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Autogenerated by Thrift Compiler (0.9.3)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package org.apache.flume.thrift;
import org.apache.thrift.scheme.IScheme;
import org.apache.thrift.scheme.SchemeFactory;
import org.apache.thrift.scheme.StandardScheme;
import org.apache.thrift.scheme.TupleScheme;
import org.apache.thrift.protocol.TTupleProtocol;
import org.apache.thrift.protocol.TProtocolException;
import org.apache.thrift.EncodingUtils;
import org.apache.thrift.TException;
import org.apache.thrift.async.AsyncMethodCallback;
import org.apache.thrift.server.AbstractNonblockingServer.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.util.HashMap;
import java.util.EnumMap;
import java.util.Set;
import java.util.HashSet;
import java.util.EnumSet;
import java.util.Collections;
import java.util.BitSet;
import java.nio.ByteBuffer;
import java.util.Arrays;
import javax.annotation.Generated;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2017-09-01")
public class ThriftSourceProtocol {
public interface Iface {
public Status append(ThriftFlumeEvent event) throws org.apache.thrift.TException;
public Status appendBatch(List<ThriftFlumeEvent> events) throws org.apache.thrift.TException;
}
public interface AsyncIface {
public void append(ThriftFlumeEvent event, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
public void appendBatch(List<ThriftFlumeEvent> events, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
}
public static class Client extends org.apache.thrift.TServiceClient implements Iface {
public static class Factory implements org.apache.thrift.TServiceClientFactory<Client> {
public Factory() {}
public Client getClient(org.apache.thrift.protocol.TProtocol prot) {
return new Client(prot);
}
public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
return new Client(iprot, oprot);
}
}
public Client(org.apache.thrift.protocol.TProtocol prot)
{
super(prot, prot);
}
public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
super(iprot, oprot);
}
public Status append(ThriftFlumeEvent event) throws org.apache.thrift.TException
{
send_append(event);
return recv_append();
}
public void send_append(ThriftFlumeEvent event) throws org.apache.thrift.TException
{
append_args args = new append_args();
args.setEvent(event);
sendBase("append", args);
}
public Status recv_append() throws org.apache.thrift.TException
{
append_result result = new append_result();
receiveBase(result, "append");
if (result.isSetSuccess()) {
return result.success;
}
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "append failed: unknown result");
}
public Status appendBatch(List<ThriftFlumeEvent> events) throws org.apache.thrift.TException
{
send_appendBatch(events);
return recv_appendBatch();
}
public void send_appendBatch(List<ThriftFlumeEvent> events) throws org.apache.thrift.TException
{
appendBatch_args args = new appendBatch_args();
args.setEvents(events);
sendBase("appendBatch", args);
}
public Status recv_appendBatch() throws org.apache.thrift.TException
{
appendBatch_result result = new appendBatch_result();
receiveBase(result, "appendBatch");
if (result.isSetSuccess()) {
return result.success;
}
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "appendBatch failed: unknown result");
}
}
public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface {
public static class Factory implements org.apache.thrift.async.TAsyncClientFactory<AsyncClient> {
private org.apache.thrift.async.TAsyncClientManager clientManager;
private org.apache.thrift.protocol.TProtocolFactory protocolFactory;
public Factory(org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.protocol.TProtocolFactory protocolFactory) {
this.clientManager = clientManager;
this.protocolFactory = protocolFactory;
}
public AsyncClient getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) {
return new AsyncClient(protocolFactory, clientManager, transport);
}
}
public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.transport.TNonblockingTransport transport) {
super(protocolFactory, clientManager, transport);
}
public void append(ThriftFlumeEvent event, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
append_call method_call = new append_call(event, resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class append_call extends org.apache.thrift.async.TAsyncMethodCall {
private ThriftFlumeEvent event;
public append_call(ThriftFlumeEvent event, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
this.event = event;
}
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("append", org.apache.thrift.protocol.TMessageType.CALL, 0));
append_args args = new append_args();
args.setEvent(event);
args.write(prot);
prot.writeMessageEnd();
}
public Status getResult() throws org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
return (new Client(prot)).recv_append();
}
}
public void appendBatch(List<ThriftFlumeEvent> events, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
appendBatch_call method_call = new appendBatch_call(events, resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class appendBatch_call extends org.apache.thrift.async.TAsyncMethodCall {
private List<ThriftFlumeEvent> events;
public appendBatch_call(List<ThriftFlumeEvent> events, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
this.events = events;
}
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("appendBatch", org.apache.thrift.protocol.TMessageType.CALL, 0));
appendBatch_args args = new appendBatch_args();
args.setEvents(events);
args.write(prot);
prot.writeMessageEnd();
}
public Status getResult() throws org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
return (new Client(prot)).recv_appendBatch();
}
}
}
public static class Processor<I extends Iface> extends org.apache.thrift.TBaseProcessor<I> implements org.apache.thrift.TProcessor {
private static final Logger LOGGER = LoggerFactory.getLogger(Processor.class.getName());
public Processor(I iface) {
super(iface, getProcessMap(new HashMap<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>>()));
}
protected Processor(I iface, Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> processMap) {
super(iface, getProcessMap(processMap));
}
private static <I extends Iface> Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> getProcessMap(Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> processMap) {
processMap.put("append", new append());
processMap.put("appendBatch", new appendBatch());
return processMap;
}
public static class append<I extends Iface> extends org.apache.thrift.ProcessFunction<I, append_args> {
public append() {
super("append");
}
public append_args getEmptyArgsInstance() {
return new append_args();
}
protected boolean isOneway() {
return false;
}
public append_result getResult(I iface, append_args args) throws org.apache.thrift.TException {
append_result result = new append_result();
result.success = iface.append(args.event);
return result;
}
}
public static class appendBatch<I extends Iface> extends org.apache.thrift.ProcessFunction<I, appendBatch_args> {
public appendBatch() {
super("appendBatch");
}
public appendBatch_args getEmptyArgsInstance() {
return new appendBatch_args();
}
protected boolean isOneway() {
return false;
}
public appendBatch_result getResult(I iface, appendBatch_args args) throws org.apache.thrift.TException {
appendBatch_result result = new appendBatch_result();
result.success = iface.appendBatch(args.events);
return result;
}
}
}
public static class AsyncProcessor<I extends AsyncIface> extends org.apache.thrift.TBaseAsyncProcessor<I> {
private static final Logger LOGGER = LoggerFactory.getLogger(AsyncProcessor.class.getName());
public AsyncProcessor(I iface) {
super(iface, getProcessMap(new HashMap<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>>()));
}
protected AsyncProcessor(I iface, Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>> processMap) {
super(iface, getProcessMap(processMap));
}
private static <I extends AsyncIface> Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase,?>> getProcessMap(Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>> processMap) {
processMap.put("append", new append());
processMap.put("appendBatch", new appendBatch());
return processMap;
}
public static class append<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_args, Status> {
public append() {
super("append");
}
public append_args getEmptyArgsInstance() {
return new append_args();
}
public AsyncMethodCallback<Status> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<Status>() {
public void onComplete(Status o) {
append_result result = new append_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
append_result result = new append_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb,msg,msgType,seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, append_args args, org.apache.thrift.async.AsyncMethodCallback<Status> resultHandler) throws TException {
iface.append(args.event,resultHandler);
}
}
public static class appendBatch<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, appendBatch_args, Status> {
public appendBatch() {
super("appendBatch");
}
public appendBatch_args getEmptyArgsInstance() {
return new appendBatch_args();
}
public AsyncMethodCallback<Status> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<Status>() {
public void onComplete(Status o) {
appendBatch_result result = new appendBatch_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
appendBatch_result result = new appendBatch_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb,msg,msgType,seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, appendBatch_args args, org.apache.thrift.async.AsyncMethodCallback<Status> resultHandler) throws TException {
iface.appendBatch(args.events,resultHandler);
}
}
}
public static class append_args implements org.apache.thrift.TBase<append_args, append_args._Fields>, java.io.Serializable, Cloneable, Comparable<append_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("append_args");
private static final org.apache.thrift.protocol.TField EVENT_FIELD_DESC = new org.apache.thrift.protocol.TField("event", org.apache.thrift.protocol.TType.STRUCT, (short)1);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new append_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new append_argsTupleSchemeFactory());
}
public ThriftFlumeEvent event; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
EVENT((short)1, "event");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // EVENT
return EVENT;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.EVENT, new org.apache.thrift.meta_data.FieldMetaData("event", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ThriftFlumeEvent.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(append_args.class, metaDataMap);
}
public append_args() {
}
public append_args(
ThriftFlumeEvent event)
{
this();
this.event = event;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public append_args(append_args other) {
if (other.isSetEvent()) {
this.event = new ThriftFlumeEvent(other.event);
}
}
public append_args deepCopy() {
return new append_args(this);
}
@Override
public void clear() {
this.event = null;
}
public ThriftFlumeEvent getEvent() {
return this.event;
}
public append_args setEvent(ThriftFlumeEvent event) {
this.event = event;
return this;
}
public void unsetEvent() {
this.event = null;
}
/** Returns true if field event is set (has been assigned a value) and false otherwise */
public boolean isSetEvent() {
return this.event != null;
}
public void setEventIsSet(boolean value) {
if (!value) {
this.event = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case EVENT:
if (value == null) {
unsetEvent();
} else {
setEvent((ThriftFlumeEvent)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case EVENT:
return getEvent();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case EVENT:
return isSetEvent();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof append_args)
return this.equals((append_args)that);
return false;
}
public boolean equals(append_args that) {
if (that == null)
return false;
boolean this_present_event = true && this.isSetEvent();
boolean that_present_event = true && that.isSetEvent();
if (this_present_event || that_present_event) {
if (!(this_present_event && that_present_event))
return false;
if (!this.event.equals(that.event))
return false;
}
return true;
}
@Override
public int hashCode() {
List<Object> list = new ArrayList<Object>();
boolean present_event = true && (isSetEvent());
list.add(present_event);
if (present_event)
list.add(event);
return list.hashCode();
}
@Override
public int compareTo(append_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetEvent()).compareTo(other.isSetEvent());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetEvent()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.event, other.event);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("append_args(");
boolean first = true;
sb.append("event:");
if (this.event == null) {
sb.append("null");
} else {
sb.append(this.event);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
if (event != null) {
event.validate();
}
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class append_argsStandardSchemeFactory implements SchemeFactory {
public append_argsStandardScheme getScheme() {
return new append_argsStandardScheme();
}
}
private static class append_argsStandardScheme extends StandardScheme<append_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, append_args struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // EVENT
if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
struct.event = new ThriftFlumeEvent();
struct.event.read(iprot);
struct.setEventIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, append_args struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.event != null) {
oprot.writeFieldBegin(EVENT_FIELD_DESC);
struct.event.write(oprot);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class append_argsTupleSchemeFactory implements SchemeFactory {
public append_argsTupleScheme getScheme() {
return new append_argsTupleScheme();
}
}
private static class append_argsTupleScheme extends TupleScheme<append_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, append_args struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetEvent()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetEvent()) {
struct.event.write(oprot);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, append_args struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
struct.event = new ThriftFlumeEvent();
struct.event.read(iprot);
struct.setEventIsSet(true);
}
}
}
}
public static class append_result implements org.apache.thrift.TBase<append_result, append_result._Fields>, java.io.Serializable, Cloneable, Comparable<append_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("append_result");
private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new append_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new append_resultTupleSchemeFactory());
}
/**
*
* @see Status
*/
public Status success; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
/**
*
* @see Status
*/
SUCCESS((short)0, "success");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 0: // SUCCESS
return SUCCESS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, Status.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(append_result.class, metaDataMap);
}
public append_result() {
}
public append_result(
Status success)
{
this();
this.success = success;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public append_result(append_result other) {
if (other.isSetSuccess()) {
this.success = other.success;
}
}
public append_result deepCopy() {
return new append_result(this);
}
@Override
public void clear() {
this.success = null;
}
/**
*
* @see Status
*/
public Status getSuccess() {
return this.success;
}
/**
*
* @see Status
*/
public append_result setSuccess(Status success) {
this.success = success;
return this;
}
public void unsetSuccess() {
this.success = null;
}
/** Returns true if field success is set (has been assigned a value) and false otherwise */
public boolean isSetSuccess() {
return this.success != null;
}
public void setSuccessIsSet(boolean value) {
if (!value) {
this.success = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case SUCCESS:
if (value == null) {
unsetSuccess();
} else {
setSuccess((Status)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case SUCCESS:
return getSuccess();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case SUCCESS:
return isSetSuccess();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof append_result)
return this.equals((append_result)that);
return false;
}
public boolean equals(append_result that) {
if (that == null)
return false;
boolean this_present_success = true && this.isSetSuccess();
boolean that_present_success = true && that.isSetSuccess();
if (this_present_success || that_present_success) {
if (!(this_present_success && that_present_success))
return false;
if (!this.success.equals(that.success))
return false;
}
return true;
}
@Override
public int hashCode() {
List<Object> list = new ArrayList<Object>();
boolean present_success = true && (isSetSuccess());
list.add(present_success);
if (present_success)
list.add(success.getValue());
return list.hashCode();
}
@Override
public int compareTo(append_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSuccess()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("append_result(");
boolean first = true;
sb.append("success:");
if (this.success == null) {
sb.append("null");
} else {
sb.append(this.success);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class append_resultStandardSchemeFactory implements SchemeFactory {
public append_resultStandardScheme getScheme() {
return new append_resultStandardScheme();
}
}
private static class append_resultStandardScheme extends StandardScheme<append_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, append_result struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.success = org.apache.flume.thrift.Status.findByValue(iprot.readI32());
struct.setSuccessIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, append_result struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.success != null) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
oprot.writeI32(struct.success.getValue());
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class append_resultTupleSchemeFactory implements SchemeFactory {
public append_resultTupleScheme getScheme() {
return new append_resultTupleScheme();
}
}
private static class append_resultTupleScheme extends TupleScheme<append_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, append_result struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetSuccess()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetSuccess()) {
oprot.writeI32(struct.success.getValue());
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, append_result struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
struct.success = org.apache.flume.thrift.Status.findByValue(iprot.readI32());
struct.setSuccessIsSet(true);
}
}
}
}
public static class appendBatch_args implements org.apache.thrift.TBase<appendBatch_args, appendBatch_args._Fields>, java.io.Serializable, Cloneable, Comparable<appendBatch_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("appendBatch_args");
private static final org.apache.thrift.protocol.TField EVENTS_FIELD_DESC = new org.apache.thrift.protocol.TField("events", org.apache.thrift.protocol.TType.LIST, (short)1);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new appendBatch_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new appendBatch_argsTupleSchemeFactory());
}
public List<ThriftFlumeEvent> events; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
EVENTS((short)1, "events");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // EVENTS
return EVENTS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.EVENTS, new org.apache.thrift.meta_data.FieldMetaData("events", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ThriftFlumeEvent.class))));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(appendBatch_args.class, metaDataMap);
}
public appendBatch_args() {
}
public appendBatch_args(
List<ThriftFlumeEvent> events)
{
this();
this.events = events;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public appendBatch_args(appendBatch_args other) {
if (other.isSetEvents()) {
List<ThriftFlumeEvent> __this__events = new ArrayList<ThriftFlumeEvent>(other.events.size());
for (ThriftFlumeEvent other_element : other.events) {
__this__events.add(new ThriftFlumeEvent(other_element));
}
this.events = __this__events;
}
}
public appendBatch_args deepCopy() {
return new appendBatch_args(this);
}
@Override
public void clear() {
this.events = null;
}
public int getEventsSize() {
return (this.events == null) ? 0 : this.events.size();
}
public java.util.Iterator<ThriftFlumeEvent> getEventsIterator() {
return (this.events == null) ? null : this.events.iterator();
}
public void addToEvents(ThriftFlumeEvent elem) {
if (this.events == null) {
this.events = new ArrayList<ThriftFlumeEvent>();
}
this.events.add(elem);
}
public List<ThriftFlumeEvent> getEvents() {
return this.events;
}
public appendBatch_args setEvents(List<ThriftFlumeEvent> events) {
this.events = events;
return this;
}
public void unsetEvents() {
this.events = null;
}
/** Returns true if field events is set (has been assigned a value) and false otherwise */
public boolean isSetEvents() {
return this.events != null;
}
public void setEventsIsSet(boolean value) {
if (!value) {
this.events = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case EVENTS:
if (value == null) {
unsetEvents();
} else {
setEvents((List<ThriftFlumeEvent>)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case EVENTS:
return getEvents();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case EVENTS:
return isSetEvents();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof appendBatch_args)
return this.equals((appendBatch_args)that);
return false;
}
public boolean equals(appendBatch_args that) {
if (that == null)
return false;
boolean this_present_events = true && this.isSetEvents();
boolean that_present_events = true && that.isSetEvents();
if (this_present_events || that_present_events) {
if (!(this_present_events && that_present_events))
return false;
if (!this.events.equals(that.events))
return false;
}
return true;
}
@Override
public int hashCode() {
List<Object> list = new ArrayList<Object>();
boolean present_events = true && (isSetEvents());
list.add(present_events);
if (present_events)
list.add(events);
return list.hashCode();
}
@Override
public int compareTo(appendBatch_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetEvents()).compareTo(other.isSetEvents());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetEvents()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.events, other.events);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("appendBatch_args(");
boolean first = true;
sb.append("events:");
if (this.events == null) {
sb.append("null");
} else {
sb.append(this.events);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class appendBatch_argsStandardSchemeFactory implements SchemeFactory {
public appendBatch_argsStandardScheme getScheme() {
return new appendBatch_argsStandardScheme();
}
}
private static class appendBatch_argsStandardScheme extends StandardScheme<appendBatch_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, appendBatch_args struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // EVENTS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list10 = iprot.readListBegin();
struct.events = new ArrayList<ThriftFlumeEvent>(_list10.size);
ThriftFlumeEvent _elem11;
for (int _i12 = 0; _i12 < _list10.size; ++_i12)
{
_elem11 = new ThriftFlumeEvent();
_elem11.read(iprot);
struct.events.add(_elem11);
}
iprot.readListEnd();
}
struct.setEventsIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, appendBatch_args struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.events != null) {
oprot.writeFieldBegin(EVENTS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.events.size()));
for (ThriftFlumeEvent _iter13 : struct.events)
{
_iter13.write(oprot);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class appendBatch_argsTupleSchemeFactory implements SchemeFactory {
public appendBatch_argsTupleScheme getScheme() {
return new appendBatch_argsTupleScheme();
}
}
private static class appendBatch_argsTupleScheme extends TupleScheme<appendBatch_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, appendBatch_args struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetEvents()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetEvents()) {
{
oprot.writeI32(struct.events.size());
for (ThriftFlumeEvent _iter14 : struct.events)
{
_iter14.write(oprot);
}
}
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, appendBatch_args struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
{
org.apache.thrift.protocol.TList _list15 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
struct.events = new ArrayList<ThriftFlumeEvent>(_list15.size);
ThriftFlumeEvent _elem16;
for (int _i17 = 0; _i17 < _list15.size; ++_i17)
{
_elem16 = new ThriftFlumeEvent();
_elem16.read(iprot);
struct.events.add(_elem16);
}
}
struct.setEventsIsSet(true);
}
}
}
}
public static class appendBatch_result implements org.apache.thrift.TBase<appendBatch_result, appendBatch_result._Fields>, java.io.Serializable, Cloneable, Comparable<appendBatch_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("appendBatch_result");
private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new appendBatch_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new appendBatch_resultTupleSchemeFactory());
}
/**
*
* @see Status
*/
public Status success; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
/**
*
* @see Status
*/
SUCCESS((short)0, "success");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 0: // SUCCESS
return SUCCESS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, Status.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(appendBatch_result.class, metaDataMap);
}
public appendBatch_result() {
}
public appendBatch_result(
Status success)
{
this();
this.success = success;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public appendBatch_result(appendBatch_result other) {
if (other.isSetSuccess()) {
this.success = other.success;
}
}
public appendBatch_result deepCopy() {
return new appendBatch_result(this);
}
@Override
public void clear() {
this.success = null;
}
/**
*
* @see Status
*/
public Status getSuccess() {
return this.success;
}
/**
*
* @see Status
*/
public appendBatch_result setSuccess(Status success) {
this.success = success;
return this;
}
public void unsetSuccess() {
this.success = null;
}
/** Returns true if field success is set (has been assigned a value) and false otherwise */
public boolean isSetSuccess() {
return this.success != null;
}
public void setSuccessIsSet(boolean value) {
if (!value) {
this.success = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case SUCCESS:
if (value == null) {
unsetSuccess();
} else {
setSuccess((Status)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case SUCCESS:
return getSuccess();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case SUCCESS:
return isSetSuccess();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof appendBatch_result)
return this.equals((appendBatch_result)that);
return false;
}
public boolean equals(appendBatch_result that) {
if (that == null)
return false;
boolean this_present_success = true && this.isSetSuccess();
boolean that_present_success = true && that.isSetSuccess();
if (this_present_success || that_present_success) {
if (!(this_present_success && that_present_success))
return false;
if (!this.success.equals(that.success))
return false;
}
return true;
}
@Override
public int hashCode() {
List<Object> list = new ArrayList<Object>();
boolean present_success = true && (isSetSuccess());
list.add(present_success);
if (present_success)
list.add(success.getValue());
return list.hashCode();
}
@Override
public int compareTo(appendBatch_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSuccess()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("appendBatch_result(");
boolean first = true;
sb.append("success:");
if (this.success == null) {
sb.append("null");
} else {
sb.append(this.success);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class appendBatch_resultStandardSchemeFactory implements SchemeFactory {
public appendBatch_resultStandardScheme getScheme() {
return new appendBatch_resultStandardScheme();
}
}
private static class appendBatch_resultStandardScheme extends StandardScheme<appendBatch_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, appendBatch_result struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.success = org.apache.flume.thrift.Status.findByValue(iprot.readI32());
struct.setSuccessIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, appendBatch_result struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.success != null) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
oprot.writeI32(struct.success.getValue());
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class appendBatch_resultTupleSchemeFactory implements SchemeFactory {
public appendBatch_resultTupleScheme getScheme() {
return new appendBatch_resultTupleScheme();
}
}
private static class appendBatch_resultTupleScheme extends TupleScheme<appendBatch_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, appendBatch_result struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetSuccess()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetSuccess()) {
oprot.writeI32(struct.success.getValue());
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, appendBatch_result struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
struct.success = org.apache.flume.thrift.Status.findByValue(iprot.readI32());
struct.setSuccessIsSet(true);
}
}
}
}
}
| 9,667 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/thrift/ThriftFlumeEvent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Autogenerated by Thrift Compiler (0.9.3)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package org.apache.flume.thrift;
import org.apache.thrift.scheme.IScheme;
import org.apache.thrift.scheme.SchemeFactory;
import org.apache.thrift.scheme.StandardScheme;
import org.apache.thrift.scheme.TupleScheme;
import org.apache.thrift.protocol.TTupleProtocol;
import org.apache.thrift.protocol.TProtocolException;
import org.apache.thrift.EncodingUtils;
import org.apache.thrift.TException;
import org.apache.thrift.async.AsyncMethodCallback;
import org.apache.thrift.server.AbstractNonblockingServer.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.util.HashMap;
import java.util.EnumMap;
import java.util.Set;
import java.util.HashSet;
import java.util.EnumSet;
import java.util.Collections;
import java.util.BitSet;
import java.nio.ByteBuffer;
import java.util.Arrays;
import javax.annotation.Generated;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2017-09-01")
public class ThriftFlumeEvent implements org.apache.thrift.TBase<ThriftFlumeEvent, ThriftFlumeEvent._Fields>, java.io.Serializable, Cloneable, Comparable<ThriftFlumeEvent> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ThriftFlumeEvent");
private static final org.apache.thrift.protocol.TField HEADERS_FIELD_DESC = new org.apache.thrift.protocol.TField("headers", org.apache.thrift.protocol.TType.MAP, (short)1);
private static final org.apache.thrift.protocol.TField BODY_FIELD_DESC = new org.apache.thrift.protocol.TField("body", org.apache.thrift.protocol.TType.STRING, (short)2);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new ThriftFlumeEventStandardSchemeFactory());
schemes.put(TupleScheme.class, new ThriftFlumeEventTupleSchemeFactory());
}
public Map<String,String> headers; // required
public ByteBuffer body; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
HEADERS((short)1, "headers"),
BODY((short)2, "body");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // HEADERS
return HEADERS;
case 2: // BODY
return BODY;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.HEADERS, new org.apache.thrift.meta_data.FieldMetaData("headers", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING),
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
tmpMap.put(_Fields.BODY, new org.apache.thrift.meta_data.FieldMetaData("body", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ThriftFlumeEvent.class, metaDataMap);
}
public ThriftFlumeEvent() {
}
public ThriftFlumeEvent(
Map<String,String> headers,
ByteBuffer body)
{
this();
this.headers = headers;
this.body = org.apache.thrift.TBaseHelper.copyBinary(body);
}
/**
* Performs a deep copy on <i>other</i>.
*/
public ThriftFlumeEvent(ThriftFlumeEvent other) {
if (other.isSetHeaders()) {
Map<String,String> __this__headers = new HashMap<String,String>(other.headers);
this.headers = __this__headers;
}
if (other.isSetBody()) {
this.body = org.apache.thrift.TBaseHelper.copyBinary(other.body);
}
}
public ThriftFlumeEvent deepCopy() {
return new ThriftFlumeEvent(this);
}
@Override
public void clear() {
this.headers = null;
this.body = null;
}
public int getHeadersSize() {
return (this.headers == null) ? 0 : this.headers.size();
}
public void putToHeaders(String key, String val) {
if (this.headers == null) {
this.headers = new HashMap<String,String>();
}
this.headers.put(key, val);
}
public Map<String,String> getHeaders() {
return this.headers;
}
public ThriftFlumeEvent setHeaders(Map<String,String> headers) {
this.headers = headers;
return this;
}
public void unsetHeaders() {
this.headers = null;
}
/** Returns true if field headers is set (has been assigned a value) and false otherwise */
public boolean isSetHeaders() {
return this.headers != null;
}
public void setHeadersIsSet(boolean value) {
if (!value) {
this.headers = null;
}
}
public byte[] getBody() {
setBody(org.apache.thrift.TBaseHelper.rightSize(body));
return body == null ? null : body.array();
}
public ByteBuffer bufferForBody() {
return org.apache.thrift.TBaseHelper.copyBinary(body);
}
public ThriftFlumeEvent setBody(byte[] body) {
this.body = body == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(body, body.length));
return this;
}
public ThriftFlumeEvent setBody(ByteBuffer body) {
this.body = org.apache.thrift.TBaseHelper.copyBinary(body);
return this;
}
public void unsetBody() {
this.body = null;
}
/** Returns true if field body is set (has been assigned a value) and false otherwise */
public boolean isSetBody() {
return this.body != null;
}
public void setBodyIsSet(boolean value) {
if (!value) {
this.body = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case HEADERS:
if (value == null) {
unsetHeaders();
} else {
setHeaders((Map<String,String>)value);
}
break;
case BODY:
if (value == null) {
unsetBody();
} else {
setBody((ByteBuffer)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case HEADERS:
return getHeaders();
case BODY:
return getBody();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case HEADERS:
return isSetHeaders();
case BODY:
return isSetBody();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof ThriftFlumeEvent)
return this.equals((ThriftFlumeEvent)that);
return false;
}
public boolean equals(ThriftFlumeEvent that) {
if (that == null)
return false;
boolean this_present_headers = true && this.isSetHeaders();
boolean that_present_headers = true && that.isSetHeaders();
if (this_present_headers || that_present_headers) {
if (!(this_present_headers && that_present_headers))
return false;
if (!this.headers.equals(that.headers))
return false;
}
boolean this_present_body = true && this.isSetBody();
boolean that_present_body = true && that.isSetBody();
if (this_present_body || that_present_body) {
if (!(this_present_body && that_present_body))
return false;
if (!this.body.equals(that.body))
return false;
}
return true;
}
@Override
public int hashCode() {
List<Object> list = new ArrayList<Object>();
boolean present_headers = true && (isSetHeaders());
list.add(present_headers);
if (present_headers)
list.add(headers);
boolean present_body = true && (isSetBody());
list.add(present_body);
if (present_body)
list.add(body);
return list.hashCode();
}
@Override
public int compareTo(ThriftFlumeEvent other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetHeaders()).compareTo(other.isSetHeaders());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetHeaders()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.headers, other.headers);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetBody()).compareTo(other.isSetBody());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetBody()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.body, other.body);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("ThriftFlumeEvent(");
boolean first = true;
sb.append("headers:");
if (this.headers == null) {
sb.append("null");
} else {
sb.append(this.headers);
}
first = false;
if (!first) sb.append(", ");
sb.append("body:");
if (this.body == null) {
sb.append("null");
} else {
org.apache.thrift.TBaseHelper.toString(this.body, sb);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
if (headers == null) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'headers' was not present! Struct: " + toString());
}
if (body == null) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'body' was not present! Struct: " + toString());
}
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class ThriftFlumeEventStandardSchemeFactory implements SchemeFactory {
public ThriftFlumeEventStandardScheme getScheme() {
return new ThriftFlumeEventStandardScheme();
}
}
private static class ThriftFlumeEventStandardScheme extends StandardScheme<ThriftFlumeEvent> {
public void read(org.apache.thrift.protocol.TProtocol iprot, ThriftFlumeEvent struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // HEADERS
if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
{
org.apache.thrift.protocol.TMap _map0 = iprot.readMapBegin();
struct.headers = new HashMap<String,String>(2*_map0.size);
String _key1;
String _val2;
for (int _i3 = 0; _i3 < _map0.size; ++_i3)
{
_key1 = iprot.readString();
_val2 = iprot.readString();
struct.headers.put(_key1, _val2);
}
iprot.readMapEnd();
}
struct.setHeadersIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 2: // BODY
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.body = iprot.readBinary();
struct.setBodyIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, ThriftFlumeEvent struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.headers != null) {
oprot.writeFieldBegin(HEADERS_FIELD_DESC);
{
oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.headers.size()));
for (Map.Entry<String, String> _iter4 : struct.headers.entrySet())
{
oprot.writeString(_iter4.getKey());
oprot.writeString(_iter4.getValue());
}
oprot.writeMapEnd();
}
oprot.writeFieldEnd();
}
if (struct.body != null) {
oprot.writeFieldBegin(BODY_FIELD_DESC);
oprot.writeBinary(struct.body);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class ThriftFlumeEventTupleSchemeFactory implements SchemeFactory {
public ThriftFlumeEventTupleScheme getScheme() {
return new ThriftFlumeEventTupleScheme();
}
}
private static class ThriftFlumeEventTupleScheme extends TupleScheme<ThriftFlumeEvent> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, ThriftFlumeEvent struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
{
oprot.writeI32(struct.headers.size());
for (Map.Entry<String, String> _iter5 : struct.headers.entrySet())
{
oprot.writeString(_iter5.getKey());
oprot.writeString(_iter5.getValue());
}
}
oprot.writeBinary(struct.body);
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, ThriftFlumeEvent struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
{
org.apache.thrift.protocol.TMap _map6 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
struct.headers = new HashMap<String,String>(2*_map6.size);
String _key7;
String _val8;
for (int _i9 = 0; _i9 < _map6.size; ++_i9)
{
_key7 = iprot.readString();
_val8 = iprot.readString();
struct.headers.put(_key7, _val8);
}
}
struct.setHeadersIsSet(true);
struct.body = iprot.readBinary();
struct.setBodyIsSet(true);
}
}
}
| 9,668 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/api/SSLContextAwareAbstractRpcClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import java.util.Arrays;
import java.util.LinkedHashSet;
import java.util.Objects;
import java.util.Properties;
import java.util.Set;
import org.apache.flume.FlumeException;
import org.apache.flume.util.SSLUtil;
public abstract class SSLContextAwareAbstractRpcClient extends AbstractRpcClient {
protected boolean enableSsl;
protected boolean trustAllCerts;
protected String truststore;
protected String truststorePassword;
protected String truststoreType;
protected final Set<String> excludeProtocols = new LinkedHashSet<>(Arrays.asList("SSLv3"));
protected final Set<String> includeProtocols = new LinkedHashSet<>();
protected final Set<String> excludeCipherSuites = new LinkedHashSet<>();
protected final Set<String> includeCipherSuites = new LinkedHashSet<>();
protected void configureSSL(Properties properties) throws FlumeException {
enableSsl = Boolean.parseBoolean(properties.getProperty(
RpcClientConfigurationConstants.CONFIG_SSL));
trustAllCerts = Boolean.parseBoolean(properties.getProperty(
RpcClientConfigurationConstants.CONFIG_TRUST_ALL_CERTS));
truststore = properties.getProperty(
RpcClientConfigurationConstants.CONFIG_TRUSTSTORE, SSLUtil.getGlobalTruststorePath());
truststorePassword = properties.getProperty(
RpcClientConfigurationConstants.CONFIG_TRUSTSTORE_PASSWORD,
SSLUtil.getGlobalTruststorePassword());
truststoreType = properties.getProperty(
RpcClientConfigurationConstants.CONFIG_TRUSTSTORE_TYPE,
SSLUtil.getGlobalTruststoreType("JKS"));
parseList(properties.getProperty(
RpcClientConfigurationConstants.CONFIG_EXCLUDE_PROTOCOLS,
SSLUtil.getGlobalExcludeProtocols()),
excludeProtocols);
parseList(properties.getProperty(
RpcClientConfigurationConstants.CONFIG_INCLUDE_PROTOCOLS,
SSLUtil.getGlobalIncludeProtocols()),
includeProtocols);
parseList(properties.getProperty(
RpcClientConfigurationConstants.CONFIG_EXCLUDE_CIPHER_SUITES,
SSLUtil.getGlobalExcludeCipherSuites()),
excludeCipherSuites);
parseList(properties.getProperty(
RpcClientConfigurationConstants.CONFIG_INCLUDE_CIPHER_SUITES,
SSLUtil.getGlobalIncludeCipherSuites()),
includeCipherSuites);
}
private void parseList(String value, Set<String> set) {
if (Objects.nonNull(value)) {
set.addAll(Arrays.asList(value.split(" ")));
}
}
}
| 9,669 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/api/RpcClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import java.util.List;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
/**
* <p>Public client interface for sending data to Flume.</p>
*
* <p>This interface is intended not to change incompatibly for Flume 1.x.</p>
*
* <p><strong>Note:</strong> It is recommended for applications to construct
* {@link RpcClient} instances using the {@link RpcClientFactory} class,
* instead of using builders associated with a particular implementation class.
* </p>
*
* @see org.apache.flume.api.RpcClientFactory
*/
public interface RpcClient {
/**
* Returns the maximum number of {@link Event events} that may be batched
* at once by {@link #appendBatch(List) appendBatch()}.
*/
public int getBatchSize();
/**
* <p>Send a single {@link Event} to the associated Flume source.</p>
*
* <p>This method blocks until the RPC returns or until the request times out.
* </p>
*
* <p><strong>Note:</strong> If this method throws an
* {@link EventDeliveryException}, there is no way to recover and the
* application must invoke {@link #close()} on this object to clean up system
* resources.</p>
*
* @param event
*
* @throws EventDeliveryException when an error prevents event delivery.
*/
public void append(Event event) throws EventDeliveryException;
/**
* <p>Send a list of {@linkplain Event events} to the associated Flume source.
* </p>
*
* <p>This method blocks until the RPC returns or until the request times out.
* </p>
*
* <p>It is strongly recommended that the number of events in the List be no
* more than {@link #getBatchSize()}. If it is more, multiple RPC calls will
* be required, and the likelihood of duplicate Events being stored will
* increase.</p>
*
* <p><strong>Note:</strong> If this method throws an
* {@link EventDeliveryException}, there is no way to recover and the
* application must invoke {@link #close()} on this object to clean up system
* resources.</p>
*
* @param events List of events to send
*
* @throws EventDeliveryException when an error prevents event delivery.
*/
public void appendBatch(List<Event> events) throws
EventDeliveryException;
/**
* <p>Returns {@code true} if this object appears to be in a usable state, and
* it returns {@code false} if this object is permanently disabled.</p>
*
* <p>If this method returns {@code false}, an application must call
* {@link #close()} on this object to clean up system resources.</p>
*/
public boolean isActive();
/**
* <p>Immediately closes the client so that it may no longer be used.</p>
*
* <p><strong>Note:</strong> This method MUST be called by applications
* when they are done using the RPC client in order to clean up resources.</p>
*
* <p>Multi-threaded applications may want to gracefully stop making
* RPC calls before calling close(). Otherwise, they risk getting
* {@link EventDeliveryException} thrown from their in-flight calls when the
* underlying connection is disabled.</p>
*/
public void close() throws FlumeException;
}
| 9,670 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/api/RpcClientConfigurationConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import java.util.concurrent.TimeUnit;
/**
* Configuration constants used by the RpcClient. These configuration keys
* can be specified via a Properties object to the appropriate method of
* RpcClientFactory in order to obtain a customized RPC client.
*/
public final class RpcClientConfigurationConstants {
/**
* Hosts configuration key to specify a space delimited list of named
* hosts. For example:
* <pre>
* hosts = h1 h2
* </pre>
*/
public static final String CONFIG_HOSTS = "hosts";
/**
* Hosts prefix to specify address of a particular named host. For example
* <pre>
* hosts.h1 = server1.example.com:12121
* hosts.h2 = server2.example.com:12121
* </pre>
*/
public static final String CONFIG_HOSTS_PREFIX = "hosts.";
/**
* Configuration key used to specify the batch size. Default batch size is
* {@value DEFAULT_BATCH_SIZE}.
*/
public static final String CONFIG_BATCH_SIZE = "batch-size";
/**
* Configuration key to specify connection timeout in milliseconds. The
* default connection timeout is {@value DEFAULT_CONNECT_TIMEOUT_MILLIS}.
*/
public static final String CONFIG_CONNECT_TIMEOUT = "connect-timeout";
/**
* Configuration key to specify request timeout in milliseconds. The
* default request timeout is {@value DEFAULT_REQUEST_TIMEOUT_MILLIS}.
*/
public static final String CONFIG_REQUEST_TIMEOUT = "request-timeout";
/**
* Default batch size.
*/
public static final Integer DEFAULT_BATCH_SIZE = 100;
/**
* Default connection, handshake, and initial request timeout in milliseconds.
*/
public static final long DEFAULT_CONNECT_TIMEOUT_MILLIS =
TimeUnit.MILLISECONDS.convert(20, TimeUnit.SECONDS);
/**
* Default request timeout in milliseconds.
*/
public static final long DEFAULT_REQUEST_TIMEOUT_MILLIS =
TimeUnit.MILLISECONDS.convert(20, TimeUnit.SECONDS);
/**
* Maximum attempts to be made by the FailoverRpcClient in case of
* failures.
*/
public static final String CONFIG_MAX_ATTEMPTS = "max-attempts";
/**
* Configuration key to specify the RpcClient type to be used. The available
* values are <tt>DEFAULT</tt> which results in the creation of a regular
* <tt>NettyAvroRpcClient</tt> and <tt>DEFAULT_FAILOVER</tt> which results
* in the creation of a failover client implementation on top of multiple
* <tt>NettyAvroRpcClient</tt>s. The default value of this configuration
* is {@value #DEFAULT_CLIENT_TYPE}.
*
*/
public static final String CONFIG_CLIENT_TYPE = "client.type";
/**
* The default client type to be created if no explicit type is specified.
*/
public static final String DEFAULT_CLIENT_TYPE =
RpcClientFactory.ClientType.DEFAULT.name();
/**
* The selector type used by the <tt>LoadBalancingRpcClient</tt>. This
* value of this setting could be either <tt>round_robin</tt>,
* <tt>random</tt>, or the fully qualified name class that implements the
* <tt>LoadBalancingRpcClient.HostSelector</tt> interface.
*/
public static final String CONFIG_HOST_SELECTOR =
"host-selector";
public static final String HOST_SELECTOR_ROUND_ROBIN = "ROUND_ROBIN";
public static final String HOST_SELECTOR_RANDOM = "RANDOM";
public static final String CONFIG_MAX_BACKOFF = "maxBackoff";
public static final String CONFIG_BACKOFF = "backoff";
public static final String DEFAULT_BACKOFF = "false";
/**
* Maximum number of connections each Thrift Rpc client can open to a given
* host.
*/
public static final String CONFIG_CONNECTION_POOL_SIZE = "maxConnections";
public static final int DEFAULT_CONNECTION_POOL_SIZE = 5;
/**
* The following are const for the NettyAvro Client. To enable compression
* and set a compression level
*/
public static final String CONFIG_COMPRESSION_TYPE = "compression-type";
public static final String CONFIG_COMPRESSION_LEVEL = "compression-level";
public static final int DEFAULT_COMPRESSION_LEVEL = 6;
/**
* Configuration constants for SSL support
*/
public static final String CONFIG_SSL = "ssl";
public static final String CONFIG_TRUST_ALL_CERTS = "trust-all-certs";
public static final String CONFIG_TRUSTSTORE = "truststore";
public static final String CONFIG_TRUSTSTORE_PASSWORD = "truststore-password";
public static final String CONFIG_TRUSTSTORE_TYPE = "truststore-type";
public static final String CONFIG_EXCLUDE_PROTOCOLS = "exclude-protocols";
public static final String CONFIG_INCLUDE_PROTOCOLS = "include-protocols";
public static final String CONFIG_EXCLUDE_CIPHER_SUITES = "exclude-cipher-suites";
public static final String CONFIG_INCLUDE_CIPHER_SUITES = "include-cipher-suites";
public static final String KERBEROS_KEY = "kerberos";
/**
* Configuration constants for the NettyAvroRpcClient
* NioClientSocketChannelFactory
*/
public static final String MAX_IO_WORKERS = "maxIoWorkers";
private RpcClientConfigurationConstants() {
// disable explicit object creation
}
}
| 9,671 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/api/NettyAvroRpcClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
import java.security.KeyStore;
import java.security.cert.X509Certificate;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLEngine;
import javax.net.ssl.TrustManager;
import javax.net.ssl.TrustManagerFactory;
import javax.net.ssl.X509TrustManager;
import org.apache.avro.ipc.CallFuture;
import org.apache.avro.ipc.Transceiver;
import org.apache.avro.ipc.netty.NettyTransceiver;
import org.apache.avro.ipc.specific.SpecificRequestor;
import org.apache.commons.lang.StringUtils;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.apache.flume.source.avro.AvroFlumeEvent;
import org.apache.flume.source.avro.AvroSourceProtocol;
import org.apache.flume.source.avro.Status;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.netty.channel.ChannelPipeline;
import io.netty.handler.codec.compression.JZlibDecoder;
import io.netty.handler.codec.compression.JZlibEncoder;
import io.netty.handler.codec.compression.ZlibEncoder;
import io.netty.handler.ssl.SslHandler;
/**
* Avro/Netty implementation of {@link RpcClient}.
* The connections are intended to be opened before clients are given access so
* that the object cannot ever be in an inconsistent when exposed to users.
*/
public class NettyAvroRpcClient extends SSLContextAwareAbstractRpcClient {
private ExecutorService callTimeoutPool;
private final ReentrantLock stateLock = new ReentrantLock();
/**
* Guarded by {@code stateLock}
*/
private ConnState connState;
private InetSocketAddress address;
private Transceiver transceiver;
private AvroSourceProtocol.Callback avroClient;
private static final Logger logger = LoggerFactory.getLogger(NettyAvroRpcClient.class);
private boolean enableDeflateCompression;
private int compressionLevel;
/**
* This constructor is intended to be called from {@link RpcClientFactory}.
* A call to this constructor should be followed by call to configure().
*/
protected NettyAvroRpcClient() {
}
/**
* This method should only be invoked by the build function
* @throws FlumeException
*/
private void connect() throws FlumeException {
connect(connectTimeout, TimeUnit.MILLISECONDS);
}
/**
* Internal only, for now
* @param timeout
* @param tu
* @throws FlumeException
*/
private void connect(long timeout, TimeUnit tu) throws FlumeException {
callTimeoutPool = Executors.newCachedThreadPool(
new TransceiverThreadFactory("Flume Avro RPC Client Call Invoker"));
try {
transceiver = new NettyTransceiver(this.address, Math.toIntExact(tu.toMillis(timeout)),
(ch) -> {
ChannelPipeline pipeline = ch.pipeline();
if (enableDeflateCompression) {
ZlibEncoder encoder = new JZlibEncoder(compressionLevel);
pipeline.addFirst("deflater", encoder);
pipeline.addFirst("inflater", new JZlibDecoder());
}
SSLEngine engine = createSSLEngine();
if (engine != null) {
engine.setUseClientMode(true);
pipeline.addLast("ssl", new SslHandler(engine));
}
});
avroClient = SpecificRequestor.getClient(AvroSourceProtocol.Callback.class, transceiver);
} catch (Throwable t) {
if (callTimeoutPool != null) {
callTimeoutPool.shutdownNow();
}
if (t instanceof IOException) {
throw new FlumeException(this + ": RPC connection error", t);
} else if (t instanceof FlumeException) {
throw (FlumeException) t;
} else if (t instanceof Error) {
throw (Error) t;
} else {
throw new FlumeException(this + ": Unexpected exception", t);
}
}
setState(ConnState.READY);
}
@Override
public void close() throws FlumeException {
if (callTimeoutPool != null) {
callTimeoutPool.shutdown();
try {
if (!callTimeoutPool.awaitTermination(requestTimeout, TimeUnit.MILLISECONDS)) {
callTimeoutPool.shutdownNow();
if (!callTimeoutPool.awaitTermination(requestTimeout, TimeUnit.MILLISECONDS)) {
logger.warn(this + ": Unable to cleanly shut down call timeout pool");
}
}
} catch (InterruptedException ex) {
logger.warn(this + ": Interrupted during close", ex);
// re-cancel if current thread also interrupted
callTimeoutPool.shutdownNow();
// preserve interrupt status
Thread.currentThread().interrupt();
}
callTimeoutPool = null;
}
try {
if (transceiver != null) {
transceiver.close();
}
} catch (IOException ex) {
throw new FlumeException(this + ": Error closing transceiver.", ex);
} finally {
setState(ConnState.DEAD);
}
}
@Override
public String toString() {
return "NettyAvroRpcClient { host: " + address.getHostName() + ", port: " + address.getPort() + " }";
}
@Override
public void append(Event event) throws EventDeliveryException {
try {
append(event, requestTimeout, TimeUnit.MILLISECONDS);
} catch (Throwable t) {
// we mark as no longer active without trying to clean up resources
// client is required to call close() to clean up resources
setState(ConnState.DEAD);
if (t instanceof Error) {
throw (Error) t;
}
if (t instanceof TimeoutException) {
throw new EventDeliveryException(this + ": Failed to send event. " +
"RPC request timed out after " + requestTimeout + "ms", t);
}
throw new EventDeliveryException(this + ": Failed to send event", t);
}
}
private void append(Event event, long timeout, TimeUnit tu)
throws EventDeliveryException {
assertReady();
final CallFuture<Status> callFuture = new CallFuture<Status>();
final AvroFlumeEvent avroEvent = new AvroFlumeEvent();
avroEvent.setBody(ByteBuffer.wrap(event.getBody()));
avroEvent.setHeaders(toCharSeqMap(event.getHeaders()));
Future<Void> handshake;
try {
// due to AVRO-1122, avroClient.append() may block
handshake = callTimeoutPool.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
avroClient.append(avroEvent, callFuture);
return null;
}
});
} catch (RejectedExecutionException ex) {
throw new EventDeliveryException(this + ": Executor error", ex);
}
try {
handshake.get(connectTimeout, TimeUnit.MILLISECONDS);
} catch (TimeoutException ex) {
throw new EventDeliveryException(this + ": Handshake timed out after " + connectTimeout + " ms", ex);
} catch (InterruptedException ex) {
throw new EventDeliveryException(this + ": Interrupted in handshake", ex);
} catch (ExecutionException ex) {
throw new EventDeliveryException(this + ": RPC request exception", ex);
} catch (CancellationException ex) {
throw new EventDeliveryException(this + ": RPC request cancelled", ex);
} finally {
if (!handshake.isDone()) {
handshake.cancel(true);
}
}
waitForStatusOK(callFuture, timeout, tu);
}
@Override
public void appendBatch(List<Event> events) throws EventDeliveryException {
try {
appendBatch(events, requestTimeout, TimeUnit.MILLISECONDS);
} catch (Throwable t) {
// we mark as no longer active without trying to clean up resources
// client is required to call close() to clean up resources
setState(ConnState.DEAD);
if (t instanceof Error) {
throw (Error) t;
}
if (t instanceof TimeoutException) {
throw new EventDeliveryException(this + ": Failed to send event. " +
"RPC request timed out after " + requestTimeout + " ms", t);
}
throw new EventDeliveryException(this + ": Failed to send batch", t);
}
}
private void appendBatch(List<Event> events, long timeout, TimeUnit tu)
throws EventDeliveryException {
assertReady();
Iterator<Event> iter = events.iterator();
final List<AvroFlumeEvent> avroEvents = new LinkedList<AvroFlumeEvent>();
// send multiple batches... bail if there is a problem at any time
while (iter.hasNext()) {
avroEvents.clear();
for (int i = 0; i < batchSize && iter.hasNext(); i++) {
Event event = iter.next();
AvroFlumeEvent avroEvent = new AvroFlumeEvent();
avroEvent.setBody(ByteBuffer.wrap(event.getBody()));
avroEvent.setHeaders(toCharSeqMap(event.getHeaders()));
avroEvents.add(avroEvent);
}
final CallFuture<Status> callFuture = new CallFuture<Status>();
Future<Void> handshake;
try {
// due to AVRO-1122, avroClient.appendBatch() may block
handshake = callTimeoutPool.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
avroClient.appendBatch(avroEvents, callFuture);
return null;
}
});
} catch (RejectedExecutionException ex) {
throw new EventDeliveryException(this + ": Executor error", ex);
}
try {
handshake.get(connectTimeout, TimeUnit.MILLISECONDS);
} catch (TimeoutException ex) {
throw new EventDeliveryException(this + ": Handshake timed out after " +
connectTimeout + "ms", ex);
} catch (InterruptedException ex) {
throw new EventDeliveryException(this + ": Interrupted in handshake",
ex);
} catch (ExecutionException ex) {
throw new EventDeliveryException(this + ": RPC request exception", ex);
} catch (CancellationException ex) {
throw new EventDeliveryException(this + ": RPC request cancelled", ex);
} finally {
if (!handshake.isDone()) {
handshake.cancel(true);
}
}
waitForStatusOK(callFuture, timeout, tu);
}
}
/**
* Helper method that waits for a Status future to come back and validates
* that it returns Status == OK.
* @param callFuture Future to wait on
* @param timeout Time to wait before failing
* @param tu Time Unit of {@code timeout}
* @throws EventDeliveryException If there is a timeout or if Status != OK
*/
private void waitForStatusOK(CallFuture<Status> callFuture,
long timeout, TimeUnit tu) throws EventDeliveryException {
try {
Status status = callFuture.get(timeout, tu);
if (status != Status.OK) {
throw new EventDeliveryException(this + ": Avro RPC call returned Status: " + status);
}
} catch (CancellationException ex) {
throw new EventDeliveryException(this + ": RPC future was cancelled", ex);
} catch (ExecutionException ex) {
throw new EventDeliveryException(this + ": Exception thrown from remote handler", ex);
} catch (TimeoutException ex) {
throw new EventDeliveryException(this + ": RPC request timed out", ex);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw new EventDeliveryException(this + ": RPC request interrupted", ex);
}
}
/**
* This method should always be used to change {@code connState} so we ensure
* that invalid state transitions do not occur and that the {@code isIdle}
* {@link Condition} variable gets signaled reliably.
* Throws {@code IllegalStateException} when called to transition from CLOSED
* to another state.
* @param newState
*/
private void setState(ConnState newState) {
stateLock.lock();
try {
if (connState == ConnState.DEAD && connState != newState) {
throw new IllegalStateException("Cannot transition from CLOSED state.");
}
connState = newState;
} finally {
stateLock.unlock();
}
}
/**
* If the connection state != READY, throws {@link EventDeliveryException}.
*/
private void assertReady() throws EventDeliveryException {
stateLock.lock();
try {
ConnState curState = connState;
if (curState != ConnState.READY) {
throw new EventDeliveryException("RPC failed, client in an invalid state: " + curState);
}
} finally {
stateLock.unlock();
}
}
/**
* Helper function to convert a map of String to a map of CharSequence.
*/
private static Map<CharSequence, CharSequence> toCharSeqMap(Map<String, String> stringMap) {
Map<CharSequence, CharSequence> charSeqMap = new HashMap<>();
for (Map.Entry<String, String> entry : stringMap.entrySet()) {
charSeqMap.put(entry.getKey(), entry.getValue());
}
return charSeqMap;
}
@Override
public boolean isActive() {
stateLock.lock();
try {
return (connState == ConnState.READY);
} finally {
stateLock.unlock();
}
}
private static enum ConnState {
INIT, READY, DEAD
}
/**
* <p>
* Configure the actual client using the properties.
* <tt>properties</tt> should have at least 2 params:
* <p><tt>hosts</tt> = <i>alias_for_host</i></p>
* <p><tt>alias_for_host</tt> = <i>hostname:port</i>. </p>
* Only the first host is added, rest are discarded.</p>
* <p>Optionally it can also have a <p>
* <tt>batch-size</tt> = <i>batchSize</i>
* @param properties The properties to instantiate the client with.
* @return
*/
@Override
public synchronized void configure(Properties properties) throws FlumeException {
stateLock.lock();
try {
if (connState == ConnState.READY || connState == ConnState.DEAD) {
throw new FlumeException("This client was already configured, cannot reconfigure.");
}
} finally {
stateLock.unlock();
}
batchSize = parseBatchSize(properties);
// host and port
String hostNames = properties.getProperty(RpcClientConfigurationConstants.CONFIG_HOSTS);
String[] hosts = null;
if (hostNames != null && !hostNames.isEmpty()) {
hosts = hostNames.split("\\s+");
} else {
throw new FlumeException("Hosts list is invalid: " + hostNames);
}
if (hosts.length > 1) {
logger.warn("More than one hosts are specified for the default client. "
+ "Only the first host will be used and others ignored. Specified: "
+ hostNames + "; to be used: " + hosts[0]);
}
String host = properties.getProperty(
RpcClientConfigurationConstants.CONFIG_HOSTS_PREFIX + hosts[0]);
if (host == null || host.isEmpty()) {
throw new FlumeException("Host not found: " + hosts[0]);
}
String[] hostAndPort = host.split(":");
if (hostAndPort.length != 2) {
throw new FlumeException("Invalid hostname: " + hosts[0]);
}
Integer port = null;
try {
port = Integer.parseInt(hostAndPort[1]);
} catch (NumberFormatException e) {
throw new FlumeException("Invalid Port: " + hostAndPort[1], e);
}
this.address = new InetSocketAddress(hostAndPort[0], port);
// connect timeout
connectTimeout = RpcClientConfigurationConstants.DEFAULT_CONNECT_TIMEOUT_MILLIS;
String strConnTimeout = properties.getProperty(
RpcClientConfigurationConstants.CONFIG_CONNECT_TIMEOUT);
if (strConnTimeout != null && strConnTimeout.trim().length() > 0) {
try {
connectTimeout = Long.parseLong(strConnTimeout);
if (connectTimeout < 1000) {
logger.warn("Connection timeout specified less than 1s. Using default value instead.");
connectTimeout = RpcClientConfigurationConstants.DEFAULT_CONNECT_TIMEOUT_MILLIS;
}
} catch (NumberFormatException ex) {
logger.error("Invalid connect timeout specified: " + strConnTimeout);
}
}
// request timeout
requestTimeout = RpcClientConfigurationConstants.DEFAULT_REQUEST_TIMEOUT_MILLIS;
String strReqTimeout = properties.getProperty(RpcClientConfigurationConstants.CONFIG_REQUEST_TIMEOUT);
if (strReqTimeout != null && strReqTimeout.trim().length() > 0) {
try {
requestTimeout = Long.parseLong(strReqTimeout);
if (requestTimeout < 1000) {
logger.warn("Request timeout specified less than 1s. Using default value instead.");
requestTimeout = RpcClientConfigurationConstants.DEFAULT_REQUEST_TIMEOUT_MILLIS;
}
} catch (NumberFormatException ex) {
logger.error("Invalid request timeout specified: " + strReqTimeout);
}
}
String enableCompressionStr =
properties.getProperty(RpcClientConfigurationConstants.CONFIG_COMPRESSION_TYPE);
if (enableCompressionStr != null && enableCompressionStr.equalsIgnoreCase("deflate")) {
this.enableDeflateCompression = true;
String compressionLvlStr =
properties.getProperty(RpcClientConfigurationConstants.CONFIG_COMPRESSION_LEVEL);
compressionLevel = RpcClientConfigurationConstants.DEFAULT_COMPRESSION_LEVEL;
if (compressionLvlStr != null) {
try {
compressionLevel = Integer.parseInt(compressionLvlStr);
} catch (NumberFormatException ex) {
logger.error("Invalid compression level: " + compressionLvlStr);
}
}
}
configureSSL(properties);
String maxIoWorkersStr = properties.getProperty(RpcClientConfigurationConstants.MAX_IO_WORKERS);
if (!StringUtils.isEmpty(maxIoWorkersStr)) {
logger.warn("Specifying the number of workers is no longer supported");
}
this.connect();
}
/**
* A thread factor implementation modeled after the implementation of
* NettyTransceiver.NettyTransceiverThreadFactory class which is
* a private static class. The only difference between that and this
* implementation is that this implementation marks all the threads daemon
* which allows the termination of the VM when the non-daemon threads
* are done.
*/
private static class TransceiverThreadFactory implements ThreadFactory {
private final AtomicInteger threadId = new AtomicInteger(0);
private final String prefix;
/**
* Creates a TransceiverThreadFactory that creates threads with the
* specified name.
* @param prefix the name prefix to use for all threads created by this
* ThreadFactory. A unique ID will be appended to this prefix to form the
* final thread name.
*/
public TransceiverThreadFactory(String prefix) {
this.prefix = prefix;
}
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r);
thread.setDaemon(true);
thread.setName(prefix + " " + threadId.incrementAndGet());
return thread;
}
}
private SSLEngine createSSLEngine() {
TrustManager[] managers;
try {
if (enableSsl) {
if (trustAllCerts) {
logger.warn(
"No truststore configured, setting TrustManager to accept all server certificates");
managers = new TrustManager[]{new PermissiveTrustManager()};
} else {
KeyStore keystore = null;
if (truststore != null) {
try (InputStream truststoreStream = new FileInputStream(truststore)) {
keystore = KeyStore.getInstance(truststoreType);
keystore.load(truststoreStream, truststorePassword != null ? truststorePassword.toCharArray() : null);
}
}
TrustManagerFactory tmf = TrustManagerFactory.getInstance("SunX509");
// null keystore is OK, with SunX509 it defaults to system CA Certs
// see http://docs.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#X509TrustManager
tmf.init(keystore);
managers = tmf.getTrustManagers();
}
SSLContext sslContext = SSLContext.getInstance("TLS");
sslContext.init(null, managers, null);
SSLEngine sslEngine = sslContext.createSSLEngine();
sslEngine.setUseClientMode(true);
List<String> enabledProtocols = new ArrayList<String>();
for (String protocol : sslEngine.getEnabledProtocols()) {
if ((includeProtocols.isEmpty() || includeProtocols.contains(protocol))
&& !excludeProtocols.contains(protocol)) {
enabledProtocols.add(protocol);
}
}
sslEngine.setEnabledProtocols(enabledProtocols.toArray(new String[0]));
List<String> enabledCipherSuites = new ArrayList<String>();
for (String suite : sslEngine.getEnabledCipherSuites()) {
if ((includeCipherSuites.isEmpty() || includeCipherSuites.contains(suite))
&& !excludeCipherSuites.contains(suite)) {
enabledCipherSuites.add(suite);
}
}
sslEngine.setEnabledCipherSuites(enabledCipherSuites.toArray(new String[0]));
logger.info("SSLEngine protocols enabled: " + Arrays.asList(sslEngine.getEnabledProtocols()));
logger.info("SSLEngine cipher suites enabled: " + Arrays.asList(sslEngine.getEnabledProtocols()));
return sslEngine;
} else {
return null;
}
} catch (Exception ex) {
logger.error("Cannot create SSL channel", ex);
throw new RuntimeException("Cannot create SSL channel", ex);
}
}
/**
* Permissive trust manager accepting any certificate
*/
private static class PermissiveTrustManager implements X509TrustManager {
@Override
public void checkClientTrusted(X509Certificate[] certs, String s) {
// nothing
}
@Override
public void checkServerTrusted(X509Certificate[] certs, String s) {
// nothing
}
@Override
public X509Certificate[] getAcceptedIssuers() {
return new X509Certificate[0];
}
}
}
| 9,672 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/api/RpcClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import org.apache.flume.FlumeException;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.util.Locale;
import java.util.Properties;
/**
* Factory class to construct Flume {@link RPCClient} implementations.
*/
public class RpcClientFactory {
/**
* Returns an instance of {@link RpcClient}, optionally with failover.
* To create a failover client, the properties object should have a
* property <tt>client.type</tt> which has the value "failover". The client
* connects to hosts specified by <tt>hosts</tt> property in given properties.
*
* @see org.apache.flume.api.FailoverRpcClient
* <p>
* If no <tt>client.type</tt> is specified, a default client that connects to
* single host at a given port is created.(<tt>type</tt> can also simply be
* <tt>DEFAULT</tt> for the default client).
*
* @see org.apache.flume.api.NettyAvroClient
*
* @param properties The properties to instantiate the client with.
* @throws FlumeException
*/
@SuppressWarnings("unchecked")
public static RpcClient getInstance(Properties properties)
throws FlumeException {
String type = null;
type = properties.getProperty(
RpcClientConfigurationConstants.CONFIG_CLIENT_TYPE);
if (type == null || type.isEmpty()) {
type = ClientType.DEFAULT.getClientClassName();
}
Class<? extends AbstractRpcClient> clazz;
AbstractRpcClient client;
try {
String clientClassType = type;
ClientType clientType = null;
try {
clientType = ClientType.valueOf(type.toUpperCase(Locale.ENGLISH));
} catch (IllegalArgumentException e) {
clientType = ClientType.OTHER;
}
if (!clientType.equals(ClientType.OTHER)) {
clientClassType = clientType.getClientClassName();
}
clazz =
(Class<? extends AbstractRpcClient>) Class.forName(clientClassType);
} catch (ClassNotFoundException e) {
throw new FlumeException("No such client!", e);
}
try {
client = clazz.newInstance();
} catch (InstantiationException e) {
throw new FlumeException("Cannot instantiate client. " +
"Exception follows:", e);
} catch (IllegalAccessException e) {
throw new FlumeException("Cannot instantiate client. " +
"Exception follows:", e);
}
client.configure(properties);
return client;
}
/**
* Delegates to {@link #getInstance(Properties props)}, given a File path
* to a {@link Properties} file.
* @param propertiesFile Valid properties file
* @return RpcClient configured according to the given Properties file.
* @throws FileNotFoundException If the file cannot be found
* @throws IOException If there is an IO error
*/
public static RpcClient getInstance(File propertiesFile)
throws FileNotFoundException, IOException {
Reader reader = new FileReader(propertiesFile);
Properties props = new Properties();
props.load(reader);
return getInstance(props);
}
/**
* Deprecated. Use
* {@link #getDefaultInstance(String, Integer)} instead.
* @throws FlumeException
* @deprecated
*/
@Deprecated
public static RpcClient getInstance(String hostname, Integer port)
throws FlumeException {
return getDefaultInstance(hostname, port);
}
/**
* Returns an instance of {@link RpcClient} connected to the specified
* {@code hostname} and {@code port}.
* @throws FlumeException
*/
public static RpcClient getDefaultInstance(String hostname, Integer port)
throws FlumeException {
return getDefaultInstance(hostname, port, 0);
}
/**
* Deprecated. Use
* {@link #getDefaultInstance(String, Integer, Integer)}
* instead.
* @throws FlumeException
* @deprecated
*/
@Deprecated
public static RpcClient getInstance(String hostname, Integer port,
Integer batchSize) throws FlumeException {
return getDefaultInstance(hostname, port, batchSize);
}
/**
* Returns an instance of {@link RpcClient} connected to the specified
* {@code hostname} and {@code port} with the specified {@code batchSize}.
* @throws FlumeException
*/
public static RpcClient getDefaultInstance(String hostname, Integer port,
Integer batchSize) throws FlumeException {
if (hostname == null) {
throw new NullPointerException("hostname must not be null");
}
if (port == null) {
throw new NullPointerException("port must not be null");
}
if (batchSize == null) {
throw new NullPointerException("batchSize must not be null");
}
Properties props = new Properties();
props.setProperty(RpcClientConfigurationConstants.CONFIG_HOSTS, "h1");
props.setProperty(RpcClientConfigurationConstants.CONFIG_HOSTS_PREFIX + "h1",
hostname + ":" + port.intValue());
props.setProperty(RpcClientConfigurationConstants.CONFIG_BATCH_SIZE, batchSize.toString());
NettyAvroRpcClient client = new NettyAvroRpcClient();
client.configure(props);
return client;
}
/**
* Return an {@linkplain RpcClient} that uses Thrift for communicating with
* the next hop. The next hop must have a ThriftSource listening on the
* specified port.
* @param hostname - The hostname of the next hop.
* @param port - The port on which the ThriftSource is listening
* @param batchSize - batch size of each transaction.
* @return an {@linkplain RpcClient} which uses thrift configured with the
* given parameters.
*/
public static RpcClient getThriftInstance(String hostname, Integer port, Integer batchSize) {
if (hostname == null) {
throw new NullPointerException("hostname must not be null");
}
if (port == null) {
throw new NullPointerException("port must not be null");
}
if (batchSize == null) {
throw new NullPointerException("batchSize must not be null");
}
Properties props = new Properties();
props.setProperty(RpcClientConfigurationConstants.CONFIG_HOSTS, "h1");
props.setProperty(RpcClientConfigurationConstants.CONFIG_HOSTS_PREFIX + "h1",
hostname + ":" + port.intValue());
props.setProperty(RpcClientConfigurationConstants.CONFIG_BATCH_SIZE, batchSize.toString());
ThriftRpcClient client = new ThriftRpcClient();
client.configure(props);
return client;
}
/**
* Return an {@linkplain RpcClient} that uses Thrift for communicating with
* the next hop. The next hop must have a ThriftSource listening on the
* specified port. This will use the default batch size. See {@linkplain
* RpcClientConfigurationConstants}
* @param hostname - The hostname of the next hop.
* @param port - The port on which the ThriftSource is listening
* @return - An {@linkplain RpcClient} which uses thrift configured with the
* given parameters.
*/
public static RpcClient getThriftInstance(String hostname, Integer port) {
return getThriftInstance(hostname, port, RpcClientConfigurationConstants
.DEFAULT_BATCH_SIZE);
}
/**
* Return an {@linkplain RpcClient} that uses Thrift for communicating with
* the next hop.
* @param props
* @return - An {@linkplain RpcClient} which uses thrift configured with the
* given parameters.
*/
public static RpcClient getThriftInstance(Properties props) {
props.setProperty(RpcClientConfigurationConstants.CONFIG_CLIENT_TYPE,
ClientType.THRIFT.clientClassName);
return getInstance(props);
}
public static enum ClientType {
OTHER(null),
DEFAULT(NettyAvroRpcClient.class.getCanonicalName()),
DEFAULT_FAILOVER(FailoverRpcClient.class.getCanonicalName()),
DEFAULT_LOADBALANCE(LoadBalancingRpcClient.class.getCanonicalName()),
THRIFT(ThriftRpcClient.class.getCanonicalName());
private final String clientClassName;
private ClientType(String className) {
this.clientClassName = className;
}
protected String getClientClassName() {
return this.clientClassName;
}
}
}
| 9,673 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/api/AbstractRpcClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import java.util.List;
import java.util.Properties;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class AbstractRpcClient implements RpcClient {
private static Logger logger = LoggerFactory.getLogger(AbstractRpcClient.class);
protected int batchSize =
RpcClientConfigurationConstants.DEFAULT_BATCH_SIZE;
protected long connectTimeout =
RpcClientConfigurationConstants.DEFAULT_CONNECT_TIMEOUT_MILLIS;
protected long requestTimeout =
RpcClientConfigurationConstants.DEFAULT_REQUEST_TIMEOUT_MILLIS;
@Override
public int getBatchSize() {
return batchSize;
}
@Override
public abstract void append(Event event) throws EventDeliveryException;
@Override
public abstract void appendBatch(List<Event> events)
throws EventDeliveryException;
@Override
public abstract boolean isActive();
@Override
public abstract void close() throws FlumeException;
/**
* Configure the client using the given properties object.
* @param properties
* @throws FlumeException if the client can not be configured using this
* method, or if the client was already configured once.
*/
protected abstract void configure(Properties properties)
throws FlumeException;
/**
* This is to parse the batch size config for rpc clients
* @param properties config
* @return batch size
*/
public static int parseBatchSize(Properties properties) {
String strBatchSize = properties.getProperty(
RpcClientConfigurationConstants.CONFIG_BATCH_SIZE);
logger.debug("Batch size string = " + strBatchSize);
int batchSize = RpcClientConfigurationConstants.DEFAULT_BATCH_SIZE;
if (strBatchSize != null && !strBatchSize.isEmpty()) {
try {
int parsedBatch = Integer.parseInt(strBatchSize);
if (parsedBatch < 1) {
logger.warn("Invalid value for batchSize: {}; Using default value.", parsedBatch);
} else {
batchSize = parsedBatch;
}
} catch (NumberFormatException e) {
logger.warn("BatchSize is not valid for RpcClient: " + strBatchSize +
". Default value assigned.", e);
}
}
return batchSize;
}
}
| 9,674 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/api/HostInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import org.apache.flume.FlumeException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A convenience class that holds the property reference name along with the
* hostname and port number of a specified host address. It also provides
* a method to parse out the host list from a given properties object
* that contains the host details.
*/
public class HostInfo {
private static final Logger LOGGER = LoggerFactory.getLogger(HostInfo.class);
private final String referenceName;
private final String hostName;
private final int portNumber;
public HostInfo(String referenceName, String hostName, int portNumber) {
this.referenceName = referenceName;
this.hostName = hostName;
this.portNumber = portNumber;
}
public String getReferenceName() {
return referenceName;
}
public String getHostName() {
return hostName;
}
public int getPortNumber() {
return portNumber;
}
@Override
public String toString() {
return referenceName + "{" + hostName + ":" + portNumber + "}";
}
public static List<HostInfo> getHostInfoList(Properties properties) {
List<HostInfo> hosts = new ArrayList<HostInfo>();
String hostNames = properties.getProperty(
RpcClientConfigurationConstants.CONFIG_HOSTS);
String[] hostList;
if (hostNames != null && !hostNames.isEmpty()) {
hostList = hostNames.split("\\s+");
for (int i = 0; i < hostList.length; i++) {
String hostAndPortStr = properties.getProperty(
RpcClientConfigurationConstants.CONFIG_HOSTS_PREFIX + hostList[i]);
// Ignore that host if value is not there
if (hostAndPortStr != null) {
String[] hostAndPort = hostAndPortStr.split(":");
if (hostAndPort.length != 2) {
LOGGER.error("Invalid host address" + hostAndPortStr);
throw new FlumeException("Invalid host address" + hostAndPortStr);
}
Integer port = null;
try {
port = Integer.parseInt(hostAndPort[1]);
} catch (NumberFormatException e) {
LOGGER.error("Invalid port number" + hostAndPortStr, e);
throw new FlumeException("Invalid port number" + hostAndPortStr);
}
HostInfo info = new HostInfo(hostList[i],
hostAndPort[0].trim(), port);
hosts.add(info);
}
}
}
return hosts;
}
}
| 9,675 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/api/FailoverRpcClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import java.net.InetSocketAddress;
import java.util.List;
import java.util.Properties;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Avro/Netty implementation of {@link RpcClient} which supports failover. This
* takes a list of hostname port combinations and connects to the next available
* (looping back to the first) host, from a given list of agents in the order
* provided.
*
*
* The properties used to build a FailoverRpcClient must have:
* <p><tt>hosts</tt> = <i>alias_for_host1</i> <i>alias_for_host2</i></p> ...
* <p><tt>hosts.alias_for_host1</tt> = <i>hostname1:port1</i>. </p>
* <p><tt>hosts.alias_for_host2</tt> = <i>hostname2:port2</i>. </p> etc
* <p>Optionally it can also have a <p>
* <tt>batch-size</tt> = <i>batchSize</i>
* <tt>max-attempts</tt> = <i>maxAttempts</i>
*
* Given a failure, this client will attempt to append to <i>maxAttempts</i>
* clients in the <i>hosts</i> list immediately following the failed host
* (looping back to the beginning of the <i>hosts</i> list.
*/
public class FailoverRpcClient extends AbstractRpcClient implements RpcClient {
private volatile RpcClient client;
private List<HostInfo> hosts;
private Integer maxTries;
private int lastCheckedhost;
private boolean isActive;
private Properties configurationProperties;
private static final Logger logger = LoggerFactory
.getLogger(FailoverRpcClient.class);
protected FailoverRpcClient() {
lastCheckedhost = -1;
client = null;
}
//This function has to be synchronized to establish a happens-before
//relationship for different threads that access this object
//since shared data structures are created here.
private synchronized void configureHosts(Properties properties)
throws FlumeException {
if (isActive) {
logger.error("This client was already configured, " +
"cannot reconfigure.");
throw new FlumeException("This client was already configured, " +
"cannot reconfigure.");
}
hosts = HostInfo.getHostInfoList(properties);
String tries = properties.getProperty(
RpcClientConfigurationConstants.CONFIG_MAX_ATTEMPTS);
if (tries == null || tries.isEmpty()) {
maxTries = hosts.size();
} else {
try {
maxTries = Integer.parseInt(tries);
} catch (NumberFormatException e) {
maxTries = hosts.size();
}
}
batchSize = parseBatchSize(properties);
isActive = true;
}
/**
* Get the maximum number of "failed" hosts the client will try to establish
* connection to before throwing an exception. Failed = was able to set up a
* connection, but failed / returned error when the client tried to send data,
*
* @return The maximum number of failed retries
*/
protected Integer getMaxTries() {
return maxTries;
}
private synchronized RpcClient getClient() {
if (client == null || !this.client.isActive()) {
client = getNextClient();
return client;
} else {
return client;
}
}
/**
* Tries to append an event to the currently connected client. If it cannot
* send the event, it tries to send to next available host
*
* @param event The event to be appended.
*
* @throws EventDeliveryException
*/
@Override
public void append(Event event) throws EventDeliveryException {
//Why a local variable rather than just calling getClient()?
//If we get an EventDeliveryException, we need to call close on
//that specific client, getClient in this case, will get us
//the next client - leaving a resource leak.
RpcClient localClient = null;
synchronized (this) {
if (!isActive) {
logger.error("Attempting to append to an already closed client.");
throw new EventDeliveryException(
"Attempting to append to an already closed client.");
}
}
// Sit in an infinite loop and try to append!
int tries = 0;
while (tries < maxTries) {
try {
tries++;
localClient = getClient();
localClient.append(event);
return;
} catch (EventDeliveryException e) {
// Could not send event through this client, try to pick another client.
logger.warn("Client failed. Exception follows: ", e);
localClient.close();
localClient = null;
} catch (Exception e2) {
logger.error("Failed to send event: ", e2);
throw new EventDeliveryException(
"Failed to send event. Exception follows: ", e2);
}
}
logger.error("Tried many times, could not send event.");
throw new EventDeliveryException("Failed to send the event!");
}
/**
* Tries to append a list of events to the currently connected client. If it
* cannot send the event, it tries to send to next available host
*
* @param events The events to be appended.
*
* @throws EventDeliveryException
*/
@Override
public void appendBatch(List<Event> events)
throws EventDeliveryException {
RpcClient localClient = null;
synchronized (this) {
if (!isActive) {
logger.error("Attempting to append to an already closed client.");
throw new EventDeliveryException(
"Attempting to append to an already closed client!");
}
}
int tries = 0;
while (tries < maxTries) {
try {
tries++;
localClient = getClient();
localClient.appendBatch(events);
return;
} catch (EventDeliveryException e) {
// Could not send event through this client, try to pick another client.
logger.warn("Client failed. Exception follows: ", e);
localClient.close();
localClient = null;
} catch (Exception e1) {
logger.error("No clients active: ", e1);
throw new EventDeliveryException("No clients currently active. " +
"Exception follows: ", e1);
}
}
logger.error("Tried many times, could not send event.");
throw new EventDeliveryException("Failed to send the event!");
}
// Returns false if and only if this client has been closed explicitly.
// Should we check if any clients are active, if none are then return false?
// This method has to be lightweight, so not checking if hosts are active.
@Override
public synchronized boolean isActive() {
return isActive;
}
/**
* Close the connection. This function is safe to call over and over.
*/
@Override
public synchronized void close() throws FlumeException {
if (client != null) {
client.close();
isActive = false;
}
}
/**
* Get the last socket address this client connected to. No guarantee this
* will be the next it will connect to. If this host is down, it will connect
* to another host. To be used only from the unit tests!
* @return The last socket address this client connected to
*/
protected InetSocketAddress getLastConnectedServerAddress() {
HostInfo hostInfo = hosts.get(lastCheckedhost);
return new InetSocketAddress(hostInfo.getHostName(),
hostInfo.getPortNumber());
}
private RpcClient getNextClient() throws FlumeException {
lastCheckedhost =
(lastCheckedhost == (hosts.size() - 1)) ? -1 : lastCheckedhost;
RpcClient localClient = null;
int limit = hosts.size();
Properties props = new Properties();
props.putAll(configurationProperties);
props.put(RpcClientConfigurationConstants.CONFIG_CLIENT_TYPE,
RpcClientConfigurationConstants.DEFAULT_CLIENT_TYPE);
//Try to connect to all hosts again, till we find one available
for (int count = lastCheckedhost + 1; count < limit; count++) {
HostInfo hostInfo = hosts.get(count);
try {
setDefaultProperties(hostInfo, props);
localClient = RpcClientFactory.getInstance(props);
lastCheckedhost = count;
return localClient;
} catch (FlumeException e) {
logger.info("Could not connect to " + hostInfo, e);
continue;
}
}
for (int count = 0; count <= lastCheckedhost; count++) {
HostInfo hostInfo = hosts.get(count);
try {
setDefaultProperties(hostInfo, props);
localClient = RpcClientFactory.getInstance(props);
lastCheckedhost = count;
return localClient;
} catch (FlumeException e) {
logger.info("Could not connect to " + hostInfo, e);
continue;
}
}
if (localClient == null) {
lastCheckedhost = -1;
logger.error("No active client found.");
throw new FlumeException("No active client.");
}
// This return should never be reached!
return localClient;
}
private void setDefaultProperties(HostInfo hostInfo, Properties props) {
props.put(RpcClientConfigurationConstants.CONFIG_CLIENT_TYPE,
RpcClientFactory.ClientType.DEFAULT.name());
props.put(RpcClientConfigurationConstants.CONFIG_HOSTS,
hostInfo.getReferenceName());
}
@Override
public void configure(Properties properties) throws FlumeException {
configurationProperties = new Properties();
configurationProperties.putAll(properties);
configureHosts(configurationProperties);
}
}
| 9,676 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/api/ThriftRpcClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.apache.flume.thrift.Status;
import org.apache.flume.thrift.ThriftFlumeEvent;
import org.apache.flume.thrift.ThriftSourceProtocol;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.protocol.TCompactProtocol;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.layered.TFastFramedTransport;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLSocket;
import javax.net.ssl.SSLSocketFactory;
import javax.net.ssl.TrustManagerFactory;
import java.io.FileInputStream;
import java.nio.ByteBuffer;
import java.security.KeyStore;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Properties;
import java.util.Queue;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
public class ThriftRpcClient extends SSLContextAwareAbstractRpcClient {
private static final Logger LOGGER = LoggerFactory.getLogger(ThriftRpcClient.class);
/**
* Config param for the thrift protocol to use.
*/
public static final String CONFIG_PROTOCOL = "protocol";
public static final String BINARY_PROTOCOL = "binary";
public static final String COMPACT_PROTOCOL = "compact";
private long requestTimeout;
private final Lock stateLock;
private State connState;
private String hostname;
private int port;
private ConnectionPoolManager connectionManager;
private final ExecutorService callTimeoutPool;
private final AtomicLong threadCounter;
private final Random random = new Random();
private String protocol;
public ThriftRpcClient() {
stateLock = new ReentrantLock(true);
connState = State.INIT;
threadCounter = new AtomicLong(0);
// OK to use cached threadpool, because this is simply meant to timeout
// the calls - and is IO bound.
callTimeoutPool = Executors.newCachedThreadPool(new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread t = new Thread(r);
t.setName("Flume Thrift RPC thread - " + String.valueOf(threadCounter.incrementAndGet()));
return t;
}
});
}
@Override
public void append(Event event) throws EventDeliveryException {
// Thrift IPC client is not thread safe, so don't allow state changes or
// client.append* calls unless the lock is acquired.
ClientWrapper client = null;
boolean destroyedClient = false;
try {
if (!isActive()) {
throw new EventDeliveryException("Client was closed due to error. " +
"Please create a new client");
}
client = connectionManager.checkout();
final ThriftFlumeEvent thriftEvent = new ThriftFlumeEvent(event
.getHeaders(), ByteBuffer.wrap(event.getBody()));
doAppend(client, thriftEvent).get(requestTimeout, TimeUnit.MILLISECONDS);
} catch (Throwable e) {
if (e instanceof ExecutionException) {
Throwable cause = e.getCause();
if (cause instanceof EventDeliveryException) {
throw (EventDeliveryException) cause;
} else if (cause instanceof TimeoutException) {
throw new EventDeliveryException("Append call timeout", cause);
}
}
destroyedClient = true;
// If destroy throws, we still don't want to reuse the client, so mark it
// as destroyed before we actually do.
if (client != null) {
connectionManager.destroy(client);
}
if (e instanceof Error) {
throw (Error) e;
} else if (e instanceof RuntimeException) {
throw (RuntimeException) e;
}
throw new EventDeliveryException("Failed to send event. ", e);
} finally {
if (client != null && !destroyedClient) {
connectionManager.checkIn(client);
}
}
}
@Override
public void appendBatch(List<Event> events) throws EventDeliveryException {
// Thrift IPC client is not thread safe, so don't allow state changes or
// client.append* calls unless the lock is acquired.
ClientWrapper client = null;
boolean destroyedClient = false;
try {
if (!isActive()) {
throw new EventDeliveryException("Client was closed " +
"due to error or is not yet configured.");
}
client = connectionManager.checkout();
final List<ThriftFlumeEvent> thriftFlumeEvents = new ArrayList
<ThriftFlumeEvent>();
Iterator<Event> eventsIter = events.iterator();
while (eventsIter.hasNext()) {
thriftFlumeEvents.clear();
for (int i = 0; i < batchSize && eventsIter.hasNext(); i++) {
Event event = eventsIter.next();
thriftFlumeEvents.add(new ThriftFlumeEvent(event.getHeaders(),
ByteBuffer.wrap(event.getBody())));
}
if (!thriftFlumeEvents.isEmpty()) {
doAppendBatch(client, thriftFlumeEvents).get(requestTimeout,
TimeUnit.MILLISECONDS);
}
}
} catch (Throwable e) {
if (e instanceof ExecutionException) {
Throwable cause = e.getCause();
if (cause instanceof EventDeliveryException) {
throw (EventDeliveryException) cause;
} else if (cause instanceof TimeoutException) {
throw new EventDeliveryException("Append call timeout", cause);
}
}
destroyedClient = true;
// If destroy throws, we still don't want to reuse the client, so mark it
// as destroyed before we actually do.
if (client != null) {
connectionManager.destroy(client);
}
if (e instanceof Error) {
throw (Error) e;
} else if (e instanceof RuntimeException) {
throw (RuntimeException) e;
}
throw new EventDeliveryException("Failed to send event. ", e);
} finally {
if (client != null && !destroyedClient) {
connectionManager.checkIn(client);
}
}
}
private Future<Void> doAppend(final ClientWrapper client,
final ThriftFlumeEvent e) throws Exception {
return callTimeoutPool.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
Status status = client.client.append(e);
if (status != Status.OK) {
throw new EventDeliveryException("Failed to deliver events. Server " +
"returned status : " + status.name());
}
return null;
}
});
}
private Future<Void> doAppendBatch(final ClientWrapper client,
final List<ThriftFlumeEvent> e) throws Exception {
return callTimeoutPool.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
Status status = client.client.appendBatch(e);
if (status != Status.OK) {
throw new EventDeliveryException("Failed to deliver events. Server " +
"returned status : " + status.name());
}
return null;
}
});
}
@Override
public boolean isActive() {
stateLock.lock();
try {
return (connState == State.READY);
} finally {
stateLock.unlock();
}
}
@Override
public void close() throws FlumeException {
try {
//Do not release this, because this client is not to be used again
stateLock.lock();
connState = State.DEAD;
connectionManager.closeAll();
callTimeoutPool.shutdown();
if (!callTimeoutPool.awaitTermination(5, TimeUnit.SECONDS)) {
callTimeoutPool.shutdownNow();
}
} catch (Throwable ex) {
if (ex instanceof Error) {
throw (Error) ex;
} else if (ex instanceof RuntimeException) {
throw (RuntimeException) ex;
}
throw new FlumeException("Failed to close RPC client. ", ex);
} finally {
stateLock.unlock();
}
}
@Override
protected void configure(Properties properties) throws FlumeException {
if (isActive()) {
throw new FlumeException("Attempting to re-configured an already " +
"configured client!");
}
stateLock.lock();
try {
HostInfo host = HostInfo.getHostInfoList(properties).get(0);
hostname = host.getHostName();
port = host.getPortNumber();
protocol = properties.getProperty(CONFIG_PROTOCOL);
if (protocol == null) {
// default is to use the compact protocol.
protocol = COMPACT_PROTOCOL;
}
// check in case that garbage was put in.
if (!(protocol.equalsIgnoreCase(BINARY_PROTOCOL) ||
protocol.equalsIgnoreCase(COMPACT_PROTOCOL))) {
LOGGER.warn("'binary' or 'compact' are the only valid Thrift protocol types to "
+ "choose from. Defaulting to 'compact'.");
protocol = COMPACT_PROTOCOL;
}
batchSize = parseBatchSize(properties);
requestTimeout = Long.parseLong(properties.getProperty(
RpcClientConfigurationConstants.CONFIG_REQUEST_TIMEOUT,
String.valueOf(
RpcClientConfigurationConstants.DEFAULT_REQUEST_TIMEOUT_MILLIS)));
if (requestTimeout < 1000) {
LOGGER.warn("Request timeout specified less than 1s. " +
"Using default value instead.");
requestTimeout =
RpcClientConfigurationConstants.DEFAULT_REQUEST_TIMEOUT_MILLIS;
}
int connectionPoolSize = Integer.parseInt(properties.getProperty(
RpcClientConfigurationConstants.CONFIG_CONNECTION_POOL_SIZE,
String.valueOf(RpcClientConfigurationConstants
.DEFAULT_CONNECTION_POOL_SIZE)));
if (connectionPoolSize < 1) {
LOGGER.warn("Connection Pool Size specified is less than 1. " +
"Using default value instead.");
connectionPoolSize = RpcClientConfigurationConstants
.DEFAULT_CONNECTION_POOL_SIZE;
}
configureSSL(properties);
connectionManager = new ConnectionPoolManager(connectionPoolSize);
connState = State.READY;
} catch (Throwable ex) {
//Failed to configure, kill the client.
connState = State.DEAD;
if (ex instanceof Error) {
throw (Error) ex;
} else if (ex instanceof RuntimeException) {
throw (RuntimeException) ex;
}
throw new FlumeException("Error while configuring RpcClient. ", ex);
} finally {
stateLock.unlock();
}
}
private static enum State {
INIT, READY, DEAD
}
protected TTransport getTransport(TSocket tsocket) throws Exception {
return new TFastFramedTransport(tsocket);
}
/**
* Wrapper around a client and transport, so we can clean up when this
* client gets closed.
*/
private class ClientWrapper {
public final ThriftSourceProtocol.Client client;
public final TTransport transport;
private final int hashCode;
public ClientWrapper() throws Exception {
TSocket tsocket;
if (enableSsl) {
// JDK6's factory doesn't appear to pass the protocol onto the Socket
// properly so we have to do some magic to make sure that happens.
// Not an issue in JDK7 Lifted from thrift-0.9.1 to make the SSLContext
SSLContext sslContext = createSSLContext(truststore, truststorePassword,
truststoreType);
// Create the factory from it
SSLSocketFactory sslSockFactory = sslContext.getSocketFactory();
// Create the TSocket from that
tsocket = createSSLSocket(
sslSockFactory, hostname, port, 120000, excludeProtocols,
includeProtocols, excludeCipherSuites, includeCipherSuites);
} else {
tsocket = new TSocket(hostname, port);
}
transport = getTransport(tsocket);
// The transport is already open for SSL as part of TSSLTransportFactory.getClientSocket
if (!transport.isOpen()) {
transport.open();
}
if (protocol.equals(BINARY_PROTOCOL)) {
LOGGER.info("Using TBinaryProtocol");
client = new ThriftSourceProtocol.Client(new TBinaryProtocol(transport));
} else {
LOGGER.info("Using TCompactProtocol");
client = new ThriftSourceProtocol.Client(new TCompactProtocol(transport));
}
// Not a great hash code, but since this class is immutable and there
// is at most one instance of the components of this class,
// this works fine [If the objects are equal, hash code is the same]
hashCode = random.nextInt();
}
public boolean equals(Object o) {
if (o == null) {
return false;
}
// Since there is only one wrapper with any given client,
// direct comparison is good enough.
if (this == o) {
return true;
}
return false;
}
public int hashCode() {
return hashCode;
}
}
private class ConnectionPoolManager {
private final Queue<ClientWrapper> availableClients;
private final Set<ClientWrapper> checkedOutClients;
private final int maxPoolSize;
private int currentPoolSize;
private final Lock poolLock;
private final Condition availableClientsCondition;
public ConnectionPoolManager(int poolSize) {
this.maxPoolSize = poolSize;
availableClients = new LinkedList<ClientWrapper>();
checkedOutClients = new HashSet<ClientWrapper>();
poolLock = new ReentrantLock();
availableClientsCondition = poolLock.newCondition();
currentPoolSize = 0;
}
public ClientWrapper checkout() throws Exception {
ClientWrapper ret = null;
poolLock.lock();
try {
if (availableClients.isEmpty() && currentPoolSize < maxPoolSize) {
ret = new ClientWrapper();
currentPoolSize++;
checkedOutClients.add(ret);
return ret;
}
while (availableClients.isEmpty()) {
availableClientsCondition.await();
}
ret = availableClients.poll();
checkedOutClients.add(ret);
} finally {
poolLock.unlock();
}
return ret;
}
public void checkIn(ClientWrapper client) {
poolLock.lock();
try {
availableClients.add(client);
checkedOutClients.remove(client);
availableClientsCondition.signal();
} finally {
poolLock.unlock();
}
}
public void destroy(ClientWrapper client) {
poolLock.lock();
try {
checkedOutClients.remove(client);
currentPoolSize--;
} finally {
poolLock.unlock();
}
client.transport.close();
}
public void closeAll() {
poolLock.lock();
try {
for (ClientWrapper c : availableClients) {
c.transport.close();
currentPoolSize--;
}
// Be cruel and close even the checked out clients. The threads writing
// using these will now get an exception.
for (ClientWrapper c : checkedOutClients) {
c.transport.close();
currentPoolSize--;
}
} finally {
poolLock.unlock();
}
}
}
/**
* Lifted from ACCUMULO-3318 - Lifted from TSSLTransportFactory in Thrift-0.9.1.
* The method to create a client socket with an SSLContextFactory object is not visible to us.
* Have to use * SslConnectionParams instead of TSSLTransportParameters because no getters exist
* on TSSLTransportParameters.
*/
private static SSLContext createSSLContext(String truststore,
String truststorePassword,
String truststoreType) throws FlumeException {
SSLContext ctx;
try {
ctx = SSLContext.getInstance("TLS");
TrustManagerFactory tmf;
tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
KeyStore ts = null;
if (truststore != null && truststoreType != null) {
ts = KeyStore.getInstance(truststoreType);
ts.load(new FileInputStream(truststore),
truststorePassword != null ? truststorePassword.toCharArray() : null);
tmf.init(ts);
}
tmf.init(ts);
ctx.init(null, tmf.getTrustManagers(), null);
} catch (Exception e) {
throw new FlumeException("Error creating the transport", e);
}
return ctx;
}
private static TSocket createSSLSocket(SSLSocketFactory factory, String host,
int port, int timeout, Set<String> excludeProtocols, Set<String> includeProtocols,
Set<String> excludeCipherSuites, Set<String> includeCipherSuites)
throws FlumeException {
try {
SSLSocket socket = (SSLSocket) factory.createSocket(host, port);
socket.setSoTimeout(timeout);
List<String> enabledProtocols = new ArrayList<String>();
for (String protocol : socket.getEnabledProtocols()) {
if ((includeProtocols.isEmpty() || includeProtocols.contains(protocol))
&& !excludeProtocols.contains(protocol)) {
enabledProtocols.add(protocol);
}
}
socket.setEnabledProtocols(enabledProtocols.toArray(new String[0]));
List<String> enabledCipherSuites = new ArrayList<String>();
for (String suite : socket.getEnabledCipherSuites()) {
if ((includeCipherSuites.isEmpty() || includeCipherSuites.contains(suite))
&& !excludeCipherSuites.contains(suite)) {
enabledCipherSuites.add(suite);
}
}
socket.setEnabledCipherSuites(enabledCipherSuites.toArray(new String[0]));
return new TSocket(socket);
} catch (Exception e) {
throw new FlumeException("Could not connect to " + host + " on port " + port, e);
}
}
}
| 9,677 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/api/LoadBalancingRpcClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.api;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.apache.flume.util.OrderSelector;
import org.apache.flume.util.RandomOrderSelector;
import org.apache.flume.util.RoundRobinOrderSelector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* <p>An implementation of RpcClient interface that uses NettyAvroRpcClient
* instances to load-balance the requests over many different hosts. This
* implementation supports a round-robin scheme or random scheme of doing
* load balancing over the various hosts. To specify round-robin scheme set
* the value of the configuration property <tt>load-balance-type</tt> to
* <tt>round_robin</tt>. Similarly, for random scheme this value should be
* set to <tt>random</tt>, and for a custom scheme the full class name of
* the class that implements the <tt>HostSelector</tt> interface.
* </p>
* <p>
* This implementation also performs basic failover in case the randomly
* selected host is not available for receiving the event.
* </p>
*/
public class LoadBalancingRpcClient extends AbstractRpcClient {
private static final Logger LOGGER = LoggerFactory
.getLogger(LoadBalancingRpcClient.class);
private List<HostInfo> hosts;
private HostSelector selector;
private Map<String, RpcClient> clientMap;
private Properties configurationProperties;
private volatile boolean isOpen = false;
@Override
public void append(Event event) throws EventDeliveryException {
throwIfClosed();
boolean eventSent = false;
Iterator<HostInfo> it = selector.createHostIterator();
while (it.hasNext()) {
HostInfo host = it.next();
try {
RpcClient client = getClient(host);
client.append(event);
eventSent = true;
break;
} catch (Exception ex) {
selector.informFailure(host);
LOGGER.warn("Failed to send event to host " + host, ex);
}
}
if (!eventSent) {
throw new EventDeliveryException("Unable to send event to any host");
}
}
@Override
public void appendBatch(List<Event> events) throws EventDeliveryException {
throwIfClosed();
boolean batchSent = false;
Iterator<HostInfo> it = selector.createHostIterator();
while (it.hasNext()) {
HostInfo host = it.next();
try {
RpcClient client = getClient(host);
client.appendBatch(events);
batchSent = true;
break;
} catch (Exception ex) {
selector.informFailure(host);
LOGGER.warn("Failed to send batch to host " + host, ex);
}
}
if (!batchSent) {
throw new EventDeliveryException("Unable to send batch to any host");
}
}
@Override
public boolean isActive() {
return isOpen;
}
private void throwIfClosed() throws EventDeliveryException {
if (!isOpen) {
throw new EventDeliveryException("Rpc Client is closed");
}
}
@Override
public void close() throws FlumeException {
isOpen = false;
synchronized (this) {
Iterator<String> it = clientMap.keySet().iterator();
while (it.hasNext()) {
String name = it.next();
RpcClient client = clientMap.get(name);
if (client != null) {
try {
client.close();
} catch (Exception ex) {
LOGGER.warn("Failed to close client: " + name, ex);
}
}
it.remove();
}
}
}
@Override
protected void configure(Properties properties) throws FlumeException {
clientMap = new HashMap<String, RpcClient>();
configurationProperties = new Properties();
configurationProperties.putAll(properties);
hosts = HostInfo.getHostInfoList(properties);
if (hosts.size() < 2) {
throw new FlumeException("At least two hosts are required to use the "
+ "load balancing RPC client.");
}
String lbTypeName = properties.getProperty(
RpcClientConfigurationConstants.CONFIG_HOST_SELECTOR,
RpcClientConfigurationConstants.HOST_SELECTOR_ROUND_ROBIN);
boolean backoff = Boolean.valueOf(properties.getProperty(
RpcClientConfigurationConstants.CONFIG_BACKOFF,
String.valueOf(false)));
String maxBackoffStr = properties.getProperty(
RpcClientConfigurationConstants.CONFIG_MAX_BACKOFF);
long maxBackoff = 0;
if (maxBackoffStr != null) {
maxBackoff = Long.parseLong(maxBackoffStr);
}
if (lbTypeName.equalsIgnoreCase(
RpcClientConfigurationConstants.HOST_SELECTOR_ROUND_ROBIN)) {
selector = new RoundRobinHostSelector(backoff, maxBackoff);
} else if (lbTypeName.equalsIgnoreCase(
RpcClientConfigurationConstants.HOST_SELECTOR_RANDOM)) {
selector = new RandomOrderHostSelector(backoff, maxBackoff);
} else {
try {
@SuppressWarnings("unchecked")
Class<? extends HostSelector> klass = (Class<? extends HostSelector>)
Class.forName(lbTypeName);
selector = klass.newInstance();
} catch (Exception ex) {
throw new FlumeException("Unable to instantiate host selector: "
+ lbTypeName, ex);
}
}
selector.setHosts(hosts);
batchSize = parseBatchSize(properties);
isOpen = true;
}
private synchronized RpcClient getClient(HostInfo info)
throws FlumeException, EventDeliveryException {
throwIfClosed();
String name = info.getReferenceName();
RpcClient client = clientMap.get(name);
if (client == null) {
client = createClient(name);
clientMap.put(name, client);
} else if (!client.isActive()) {
try {
client.close();
} catch (Exception ex) {
LOGGER.warn("Failed to close client for " + info, ex);
}
client = createClient(name);
clientMap.put(name, client);
}
return client;
}
private RpcClient createClient(String referenceName) throws FlumeException {
Properties props = getClientConfigurationProperties(referenceName);
return RpcClientFactory.getInstance(props);
}
private Properties getClientConfigurationProperties(String referenceName) {
Properties props = new Properties();
props.putAll(configurationProperties);
props.put(RpcClientConfigurationConstants.CONFIG_CLIENT_TYPE,
RpcClientFactory.ClientType.DEFAULT);
props.put(RpcClientConfigurationConstants.CONFIG_HOSTS, referenceName);
return props;
}
public interface HostSelector {
void setHosts(List<HostInfo> hosts);
Iterator<HostInfo> createHostIterator();
void informFailure(HostInfo failedHost);
}
/**
* A host selector that implements the round-robin host selection policy.
*/
private static class RoundRobinHostSelector implements HostSelector {
private OrderSelector<HostInfo> selector;
RoundRobinHostSelector(boolean backoff, long maxBackoff) {
selector = new RoundRobinOrderSelector<HostInfo>(backoff);
if (maxBackoff != 0) {
selector.setMaxTimeOut(maxBackoff);
}
}
@Override
public synchronized Iterator<HostInfo> createHostIterator() {
return selector.createIterator();
}
@Override
public synchronized void setHosts(List<HostInfo> hosts) {
selector.setObjects(hosts);
}
public synchronized void informFailure(HostInfo failedHost) {
selector.informFailure(failedHost);
}
}
private static class RandomOrderHostSelector implements HostSelector {
private OrderSelector<HostInfo> selector;
RandomOrderHostSelector(boolean backoff, Long maxBackoff) {
selector = new RandomOrderSelector<HostInfo>(backoff);
if (maxBackoff != 0) {
selector.setMaxTimeOut(maxBackoff);
}
}
@Override
public synchronized Iterator<HostInfo> createHostIterator() {
return selector.createIterator();
}
@Override
public synchronized void setHosts(List<HostInfo> hosts) {
selector.setObjects(hosts);
}
@Override
public void informFailure(HostInfo failedHost) {
selector.informFailure(failedHost);
}
}
}
| 9,678 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/event/JSONEvent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.event;
import java.io.UnsupportedEncodingException;
import java.util.Map;
import org.apache.flume.Event;
import org.apache.flume.FlumeException;
/**
*
*/
public class JSONEvent implements Event {
private Map<String, String> headers;
private String body;
private transient String charset = "UTF-8";
@Override
public Map<String, String> getHeaders() {
return headers;
}
@Override
public void setHeaders(Map<String, String> headers) {
this.headers = headers;
}
@Override
public byte[] getBody() {
if (body != null) {
try {
return body.getBytes(charset);
} catch (UnsupportedEncodingException ex) {
throw new FlumeException(String.format("%s encoding not supported", charset), ex);
}
} else {
return new byte[0];
}
}
@Override
public void setBody(byte[] body) {
if (body != null) {
this.body = new String(body);
} else {
this.body = "";
}
}
public void setCharset(String charset) {
this.charset = charset;
}
}
| 9,679 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/event/EventBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.event;
import java.nio.charset.Charset;
import java.util.HashMap;
import java.util.Map;
import org.apache.flume.Event;
public class EventBuilder {
/**
* Instantiate an Event instance based on the provided body and headers.
* If <code>headers</code> is <code>null</code>, then it is ignored.
* @param body
* @param headers
* @return
*/
public static Event withBody(byte[] body, Map<String, String> headers) {
Event event = new SimpleEvent();
if (body == null) {
body = new byte[0];
}
event.setBody(body);
if (headers != null) {
event.setHeaders(new HashMap<String, String>(headers));
}
return event;
}
public static Event withBody(byte[] body) {
return withBody(body, null);
}
public static Event withBody(String body, Charset charset,
Map<String, String> headers) {
return withBody(body.getBytes(charset), headers);
}
public static Event withBody(String body, Charset charset) {
return withBody(body, charset, null);
}
}
| 9,680 |
0 | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-sdk/src/main/java/org/apache/flume/event/SimpleEvent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.event;
import java.util.HashMap;
import java.util.Map;
import org.apache.flume.Event;
public class SimpleEvent implements Event {
private Map<String, String> headers;
private byte[] body;
public SimpleEvent() {
headers = new HashMap<String, String>();
body = new byte[0];
}
@Override
public Map<String, String> getHeaders() {
return headers;
}
@Override
public void setHeaders(Map<String, String> headers) {
this.headers = headers;
}
@Override
public byte[] getBody() {
return body;
}
@Override
public void setBody(byte[] body) {
if (body == null) {
body = new byte[0];
}
this.body = body;
}
@Override
public String toString() {
Integer bodyLen = null;
if (body != null) bodyLen = body.length;
return "[Event headers = " + headers + ", body.length = " + bodyLen + " ]";
}
}
| 9,681 |
0 | Create_ds/flume/.mvn | Create_ds/flume/.mvn/wrapper/MavenWrapperDownloader.java | /*
* Copyright 2007-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.net.*;
import java.io.*;
import java.nio.channels.*;
import java.util.Properties;
public class MavenWrapperDownloader {
private static final String WRAPPER_VERSION = "0.5.6";
/**
* Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
*/
private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
+ WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
/**
* Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
* use instead of the default one.
*/
private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
".mvn/wrapper/maven-wrapper.properties";
/**
* Path where the maven-wrapper.jar will be saved to.
*/
private static final String MAVEN_WRAPPER_JAR_PATH =
".mvn/wrapper/maven-wrapper.jar";
/**
* Name of the property which should be used to override the default download url for the wrapper.
*/
private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
public static void main(String args[]) {
System.out.println("- Downloader started");
File baseDirectory = new File(args[0]);
System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
// If the maven-wrapper.properties exists, read it and check if it contains a custom
// wrapperUrl parameter.
File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
String url = DEFAULT_DOWNLOAD_URL;
if(mavenWrapperPropertyFile.exists()) {
FileInputStream mavenWrapperPropertyFileInputStream = null;
try {
mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
Properties mavenWrapperProperties = new Properties();
mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
} catch (IOException e) {
System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
} finally {
try {
if(mavenWrapperPropertyFileInputStream != null) {
mavenWrapperPropertyFileInputStream.close();
}
} catch (IOException e) {
// Ignore ...
}
}
}
System.out.println("- Downloading from: " + url);
File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
if(!outputFile.getParentFile().exists()) {
if(!outputFile.getParentFile().mkdirs()) {
System.out.println(
"- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
}
}
System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
try {
downloadFileFromURL(url, outputFile);
System.out.println("Done");
System.exit(0);
} catch (Throwable e) {
System.out.println("- Error downloading");
e.printStackTrace();
System.exit(1);
}
}
private static void downloadFileFromURL(String urlString, File destination) throws Exception {
if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
String username = System.getenv("MVNW_USERNAME");
char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
Authenticator.setDefault(new Authenticator() {
@Override
protected PasswordAuthentication getPasswordAuthentication() {
return new PasswordAuthentication(username, password);
}
});
}
URL website = new URL(urlString);
ReadableByteChannel rbc;
rbc = Channels.newChannel(website.openStream());
FileOutputStream fos = new FileOutputStream(destination);
fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
fos.close();
rbc.close();
}
}
| 9,682 |
0 | Create_ds/flume/flume-ng-channels/flume-spillable-memory-channel/src/test/java/org/apache/flume | Create_ds/flume/flume-ng-channels/flume-spillable-memory-channel/src/test/java/org/apache/flume/channel/TestSpillableMemoryChannel.java | /*
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel;
import java.io.File;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
import org.apache.flume.ChannelException;
import org.apache.flume.ChannelFullException;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.Transaction;
import org.apache.flume.conf.Configurables;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.channel.file.FileChannelConfiguration;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.Rule;
import org.junit.rules.TemporaryFolder;
public class TestSpillableMemoryChannel {
private SpillableMemoryChannel channel;
@Rule
public TemporaryFolder fileChannelDir = new TemporaryFolder();
private void configureChannel(Map<String, String> overrides) {
try {
Context context = new Context();
String checkpoint = "checkpoint";
String data = "data";
Path rootPath = fileChannelDir.getRoot().toPath();
File checkPointDir;
File dataDir;
if (!Files.exists(rootPath.resolve(checkpoint))) {
checkPointDir = fileChannelDir.newFolder(checkpoint);
dataDir = fileChannelDir.newFolder(data);
} else {
checkPointDir = rootPath.resolve(checkpoint).toFile();
dataDir = rootPath.resolve(data).toFile();
}
context.put(FileChannelConfiguration.CHECKPOINT_DIR, checkPointDir.getAbsolutePath());
context.put(FileChannelConfiguration.DATA_DIRS, dataDir.getAbsolutePath());
// Set checkpoint for 5 seconds otherwise test will run out of memory
context.put(FileChannelConfiguration.CHECKPOINT_INTERVAL, "5000");
if (overrides != null) {
context.putAll(overrides);
}
Configurables.configure(channel, context);
} catch (IOException iox) {
throw new UncheckedIOException(iox);
}
}
private void reconfigureChannel(Map<String, String> overrides) {
configureChannel(overrides);
channel.stop();
channel.start();
}
private void startChannel(Map<String, String> params) {
configureChannel(params);
channel.start();
}
// performs a hard restart of the channel... creates a new channel object
private void restartChannel(Map<String, String> params) {
channel.stop();
setUp();
startChannel(params);
}
static class NullFound extends RuntimeException {
public int expectedValue;
public NullFound(int expected) {
super("Expected " + expected + ", but null found");
expectedValue = expected;
}
}
static class TooManyNulls extends RuntimeException {
private int nullsFound;
public TooManyNulls(int count) {
super("Total nulls found in thread ("
+ Thread.currentThread().getName() + ") : " + count);
nullsFound = count;
}
}
@Before
public void setUp() {
channel = new SpillableMemoryChannel();
channel.setName("spillChannel-" + UUID.randomUUID());
}
@After
public void tearDown() {
channel.stop();
}
private static void putN(int first, int count, AbstractChannel channel) {
for (int i = 0; i < count; ++i) {
channel.put(EventBuilder.withBody(String.valueOf(first++).getBytes()));
}
}
private static void takeNull(AbstractChannel channel) {
channel.take();
}
private static void takeN(int first, int count, AbstractChannel channel) {
int last = first + count;
for (int i = first; i < last; ++i) {
Event e = channel.take();
if (e == null) {
throw new NullFound(i);
}
Event expected = EventBuilder.withBody(String.valueOf(i).getBytes());
Assert.assertArrayEquals(e.getBody(), expected.getBody());
}
}
// returns the number of non null events found
private static int takeN_NoCheck(int batchSize, AbstractChannel channel) {
int i = 0;
for (; i < batchSize; ++i) {
Event e = channel.take();
if (e == null) {
try {
Thread.sleep(0);
} catch (InterruptedException ex) { /* ignore */ }
return i;
}
}
return i;
}
private static void transactionalPutN(int first, int count, AbstractChannel channel) {
Transaction tx = channel.getTransaction();
tx.begin();
try {
putN(first, count, channel);
tx.commit();
} catch (RuntimeException e) {
tx.rollback();
throw e;
} finally {
tx.close();
}
}
private static void transactionalTakeN(int first, int count, AbstractChannel channel) {
Transaction tx = channel.getTransaction();
tx.begin();
try {
takeN(first, count, channel);
tx.commit();
} catch (NullFound e) {
tx.commit();
throw e;
} catch (AssertionError e) {
tx.rollback();
throw e;
} catch (RuntimeException e) {
tx.rollback();
throw e;
} finally {
tx.close();
}
}
private static int transactionalTakeN_NoCheck(int count, AbstractChannel channel) {
Transaction tx = channel.getTransaction();
tx.begin();
try {
int eventCount = takeN_NoCheck(count, channel);
tx.commit();
return eventCount;
} catch (RuntimeException e) {
tx.rollback();
throw e;
} finally {
tx.close();
}
}
private static void transactionalTakeNull(int count, AbstractChannel channel) {
Transaction tx = channel.getTransaction();
tx.begin();
try {
for (int i = 0; i < count; ++i) {
takeNull(channel);
}
tx.commit();
} catch (AssertionError e) {
tx.rollback();
throw e;
} catch (RuntimeException e) {
tx.rollback();
throw e;
} finally {
tx.close();
}
}
private Thread makePutThread(String threadName, final int first, final int count,
final int batchSize, final AbstractChannel channel) {
return new Thread(threadName) {
public void run() {
int maxdepth = 0;
StopWatch watch = new StopWatch();
for (int i = first; i < first + count; i = i + batchSize) {
transactionalPutN(i, batchSize, channel);
}
watch.elapsed();
}
};
}
private static Thread makeTakeThread(String threadName, final int first, final int count,
final int batchSize, final AbstractChannel channel) {
return new Thread(threadName) {
public void run() {
StopWatch watch = new StopWatch();
for (int i = first; i < first + count; ) {
try {
transactionalTakeN(i, batchSize, channel);
i = i + batchSize;
} catch (NullFound e) {
i = e.expectedValue;
}
}
watch.elapsed();
}
};
}
private static Thread makeTakeThread_noCheck(String threadName, final int totalEvents,
final int batchSize, final AbstractChannel channel) {
return new Thread(threadName) {
public void run() {
int batchSz = batchSize;
StopWatch watch = new StopWatch();
int i = 0, attempts = 0;
while (i < totalEvents) {
int remaining = totalEvents - i;
batchSz = (remaining > batchSz) ? batchSz : remaining;
int takenCount = transactionalTakeN_NoCheck(batchSz, channel);
if (takenCount < batchSz) {
try {
Thread.sleep(20);
} catch (InterruptedException ex) { /* ignore */ }
}
i += takenCount;
++attempts;
if (attempts > totalEvents * 3) {
throw new TooManyNulls(attempts);
}
}
watch.elapsed(" items = " + i + ", attempts = " + attempts);
}
};
}
@Test
public void testPutTake() {
Map<String, String> params = new HashMap<String, String>();
params.put("memoryCapacity", "5");
params.put("overflowCapacity", "5");
params.put(FileChannelConfiguration.TRANSACTION_CAPACITY, "5");
startChannel(params);
Transaction tx = channel.getTransaction();
tx.begin();
putN(0, 2, channel);
tx.commit();
tx.close();
tx = channel.getTransaction();
tx.begin();
takeN(0, 2, channel);
tx.commit();
tx.close();
}
@Test
public void testCapacityDisableOverflow() {
Map<String, String> params = new HashMap<String, String>();
params.put("memoryCapacity", "2");
params.put("overflowCapacity", "0"); // overflow is disabled effectively
params.put("overflowTimeout", "0");
startChannel(params);
transactionalPutN(0, 2, channel);
boolean threw = false;
try {
transactionalPutN(2, 1, channel);
} catch (ChannelException e) {
threw = true;
}
Assert.assertTrue("Expecting ChannelFullException to be thrown", threw);
transactionalTakeN(0, 2, channel);
Transaction tx = channel.getTransaction();
tx.begin();
Assert.assertNull(channel.take());
tx.commit();
tx.close();
}
@Test
public void testCapacityWithOverflow() {
Map<String, String> params = new HashMap<String, String>();
params.put("memoryCapacity", "2");
params.put("overflowCapacity", "4");
params.put(FileChannelConfiguration.TRANSACTION_CAPACITY, "3");
params.put("overflowTimeout", "0");
startChannel(params);
transactionalPutN(1, 2, channel);
transactionalPutN(3, 2, channel);
transactionalPutN(5, 2, channel);
boolean threw = false;
try {
transactionalPutN(7, 2, channel); // cannot fit in channel
} catch (ChannelFullException e) {
threw = true;
}
Assert.assertTrue("Expecting ChannelFullException to be thrown", threw);
transactionalTakeN(1, 2, channel);
transactionalTakeN(3, 2, channel);
transactionalTakeN(5, 2, channel);
}
@Test
public void testRestart() {
Map<String, String> params = new HashMap<String, String>();
params.put("memoryCapacity", "2");
params.put("overflowCapacity", "10");
params.put(FileChannelConfiguration.TRANSACTION_CAPACITY, "4");
params.put("overflowTimeout", "0");
startChannel(params);
transactionalPutN(1, 2, channel);
transactionalPutN(3, 2, channel); // goes in overflow
restartChannel(params);
// from overflow, as in memory stuff should be lost
transactionalTakeN(3, 2, channel);
}
@Test
public void testBasicStart() {
Map<String, String> params = new HashMap<String, String>();
params.put("memoryCapacity", "10000000");
params.put("overflowCapacity", "20000000");
params.put(FileChannelConfiguration.TRANSACTION_CAPACITY, "10");
params.put("overflowTimeout", "1");
startChannel(params);
transactionalPutN(1, 5, channel);
transactionalPutN(6, 5, channel);
transactionalPutN(11, 5, channel); // these should go to overflow
transactionalTakeN(1, 10, channel);
transactionalTakeN(11, 5, channel);
}
@Test
public void testOverflow() {
Map<String, String> params = new HashMap<String, String>();
params.put("memoryCapacity", "10");
params.put("overflowCapacity", "20");
params.put(FileChannelConfiguration.TRANSACTION_CAPACITY, "10");
params.put("overflowTimeout", "1");
startChannel(params);
transactionalPutN(1, 5, channel);
transactionalPutN(6, 5, channel);
transactionalPutN(11, 5, channel); // these should go to overflow
transactionalTakeN(1, 10, channel);
transactionalTakeN(11, 5, channel);
}
@Test
public void testDrainOrder() {
Map<String, String> params = new HashMap<String, String>();
params.put("memoryCapacity", "10");
params.put("overflowCapacity", "10");
params.put(FileChannelConfiguration.TRANSACTION_CAPACITY, "5");
params.put("overflowTimeout", "1");
startChannel(params);
transactionalPutN(1, 5, channel);
transactionalPutN(6, 5, channel);
transactionalPutN(11, 5, channel); // into overflow
transactionalPutN(16, 5, channel); // into overflow
transactionalTakeN(1, 1, channel);
transactionalTakeN(2, 5, channel);
transactionalTakeN(7, 4, channel);
transactionalPutN(20, 2, channel);
transactionalPutN(22, 3, channel);
transactionalTakeN(11, 3, channel); // from overflow
transactionalTakeN(14, 5, channel); // from overflow
transactionalTakeN(19, 2, channel); // from overflow
}
@Test
public void testByteCapacity() {
Map<String, String> params = new HashMap<String, String>();
params.put("memoryCapacity", "1000");
// configure to hold 8 events of 10 bytes each (plus 20% event header space)
params.put("byteCapacity", "100");
params.put("avgEventSize", "10");
params.put("overflowCapacity", "20");
params.put(FileChannelConfiguration.TRANSACTION_CAPACITY, "10");
params.put("overflowTimeout", "1");
startChannel(params);
transactionalPutN(1, 8, channel); // this wil max the byteCapacity
transactionalPutN(9, 10, channel);
transactionalPutN(19, 10, channel); // this will fill up the overflow
boolean threw = false;
try {
transactionalPutN(11, 1, channel); // into overflow
} catch (ChannelFullException e) {
threw = true;
}
Assert.assertTrue("byteCapacity did not throw as expected", threw);
}
@Test
public void testDrainingOnChannelBoundary() {
Map<String, String> params = new HashMap<String, String>();
params.put("memoryCapacity", "5");
params.put("overflowCapacity", "15");
params.put(FileChannelConfiguration.TRANSACTION_CAPACITY, "10");
params.put("overflowTimeout", "1");
startChannel(params);
transactionalPutN(1, 5, channel);
transactionalPutN(6, 5, channel); // into overflow
transactionalPutN(11, 5, channel); // into overflow
transactionalPutN(16, 5, channel); // into overflow
transactionalTakeN(1, 3, channel);
Transaction tx = channel.getTransaction();
tx.begin();
takeN(4, 2, channel);
takeNull(channel); // expect null since next event is in overflow
tx.commit();
tx.close();
transactionalTakeN(6, 5, channel); // from overflow
transactionalTakeN(11, 5, channel); // from overflow
transactionalTakeN(16, 2, channel); // from overflow
transactionalPutN(21, 5, channel);
tx = channel.getTransaction();
tx.begin();
takeN(18, 3, channel); // from overflow
takeNull(channel); // expect null since next event is in primary
tx.commit();
tx.close();
transactionalTakeN(21, 5, channel);
}
@Test
public void testRollBack() {
Map<String, String> params = new HashMap<String, String>();
params.put("memoryCapacity", "100");
params.put("overflowCapacity", "900");
params.put(FileChannelConfiguration.TRANSACTION_CAPACITY, "900");
params.put("overflowTimeout", "0");
startChannel(params);
//1 Rollback for Puts
transactionalPutN(1, 5, channel);
Transaction tx = channel.getTransaction();
tx.begin();
putN(6, 5, channel);
tx.rollback();
tx.close();
transactionalTakeN(1, 5, channel);
transactionalTakeNull(2, channel);
//2. verify things back to normal after put rollback
transactionalPutN(11, 5, channel);
transactionalTakeN(11, 5, channel);
//3 Rollback for Takes
transactionalPutN(16, 5, channel);
tx = channel.getTransaction();
tx.begin();
takeN(16, 5, channel);
takeNull(channel);
tx.rollback();
tx.close();
transactionalTakeN_NoCheck(5, channel);
//4. verify things back to normal after take rollback
transactionalPutN(21, 5, channel);
transactionalTakeN(21, 5, channel);
}
@Test
public void testReconfigure() {
//1) bring up with small capacity
Map<String, String> params = new HashMap<String, String>();
params.put("memoryCapacity", "10");
params.put("overflowCapacity", "0");
params.put("overflowTimeout", "0");
startChannel(params);
Assert.assertTrue("overflowTimeout setting did not reconfigure correctly",
channel.getOverflowTimeout() == 0);
Assert.assertTrue("memoryCapacity did not reconfigure correctly",
channel.getMemoryCapacity() == 10);
Assert.assertTrue("overflowCapacity did not reconfigure correctly",
channel.isOverflowDisabled());
transactionalPutN(1, 10, channel);
boolean threw = false;
try {
transactionalPutN(11, 10, channel); // should throw an error
} catch (ChannelException e) {
threw = true;
}
Assert.assertTrue("Expected the channel to fill up and throw an exception, "
+ "but it did not throw", threw);
//2) Resize and verify
params = new HashMap<String, String>();
params.put("memoryCapacity", "20");
params.put("overflowCapacity", "0");
reconfigureChannel(params);
Assert.assertTrue("overflowTimeout setting did not reconfigure correctly",
channel.getOverflowTimeout() ==
SpillableMemoryChannel.defaultOverflowTimeout);
Assert.assertTrue("memoryCapacity did not reconfigure correctly",
channel.getMemoryCapacity() == 20);
Assert.assertTrue("overflowCapacity did not reconfigure correctly",
channel.isOverflowDisabled());
// pull out the values inserted prior to reconfiguration
transactionalTakeN(1, 10, channel);
transactionalPutN(11, 10, channel);
transactionalPutN(21, 10, channel);
threw = false;
try {
transactionalPutN(31, 10, channel); // should throw an error
} catch (ChannelException e) {
threw = true;
}
Assert.assertTrue("Expected the channel to fill up and throw an exception, "
+ "but it did not throw", threw);
transactionalTakeN(11, 10, channel);
transactionalTakeN(21, 10, channel);
// 3) Reconfigure with empty config and verify settings revert to default
params = new HashMap<String, String>();
reconfigureChannel(params);
Assert.assertTrue("overflowTimeout setting did not reconfigure correctly",
channel.getOverflowTimeout() ==
SpillableMemoryChannel.defaultOverflowTimeout);
Assert.assertTrue("memoryCapacity did not reconfigure correctly",
channel.getMemoryCapacity() == SpillableMemoryChannel.defaultMemoryCapacity);
Assert.assertTrue("overflowCapacity did not reconfigure correctly",
channel.getOverflowCapacity() ==
SpillableMemoryChannel.defaultOverflowCapacity);
Assert.assertFalse("overflowCapacity did not reconfigure correctly",
channel.isOverflowDisabled());
// 4) Reconfiguring of overflow
params = new HashMap<String, String>();
params.put("memoryCapacity", "10");
params.put("overflowCapacity", "10");
params.put("transactionCapacity", "5");
params.put("overflowTimeout", "1");
reconfigureChannel(params);
transactionalPutN(1, 5, channel);
transactionalPutN(6, 5, channel);
transactionalPutN(11, 5, channel);
transactionalPutN(16, 5, channel);
threw = false;
try {
// should error out as both primary & overflow are full
transactionalPutN(21, 5, channel);
} catch (ChannelException e) {
threw = true;
}
Assert.assertTrue("Expected the last insertion to fail, but it didn't.", threw);
// reconfig the overflow
params = new HashMap<String, String>();
params.put("memoryCapacity", "10");
params.put("overflowCapacity", "20");
params.put("transactionCapacity", "10");
params.put("overflowTimeout", "1");
reconfigureChannel(params);
// should succeed now as we have made room in the overflow
transactionalPutN(21, 5, channel);
transactionalTakeN(1, 10, channel);
transactionalTakeN(11, 5, channel);
transactionalTakeN(16, 5, channel);
transactionalTakeN(21, 5, channel);
}
@Test
public void testParallelSingleSourceAndSink() throws InterruptedException {
Map<String, String> params = new HashMap<String, String>();
params.put("memoryCapacity", "1000020");
params.put("overflowCapacity", "0");
params.put("overflowTimeout", "3");
startChannel(params);
// run source and sink concurrently
Thread sourceThd = makePutThread("src", 1, 500000, 100, channel);
Thread sinkThd = makeTakeThread("sink", 1, 500000, 100, channel);
StopWatch watch = new StopWatch();
sinkThd.start();
sourceThd.start();
sourceThd.join();
sinkThd.join();
watch.elapsed();
System.out.println("Max Queue size " + channel.getMaxMemQueueSize());
}
@Test
public void testCounters() throws InterruptedException {
Map<String, String> params = new HashMap<String, String>();
params.put("memoryCapacity", "5000");
params.put("overflowCapacity", "5000");
params.put("transactionCapacity", "5000");
params.put("overflowTimeout", "0");
startChannel(params);
Assert.assertTrue("channel.channelCounter should have started",
channel.channelCounter.getStartTime() > 0);
//1. fill up mem queue
Thread sourceThd = makePutThread("src", 1, 5000, 2500, channel);
sourceThd.start();
sourceThd.join();
Assert.assertEquals(5000, channel.getTotalStored());
Assert.assertEquals(5000, channel.channelCounter.getChannelSize());
Assert.assertEquals(5000, channel.channelCounter.getEventPutAttemptCount());
Assert.assertEquals(5000, channel.channelCounter.getEventPutSuccessCount());
//2. empty mem queue
Thread sinkThd = makeTakeThread("sink", 1, 5000, 1000, channel);
sinkThd.start();
sinkThd.join();
Assert.assertEquals(0, channel.getTotalStored());
Assert.assertEquals(0, channel.channelCounter.getChannelSize());
Assert.assertEquals(5000, channel.channelCounter.getEventTakeAttemptCount());
Assert.assertEquals(5000, channel.channelCounter.getEventTakeSuccessCount());
//3. fill up mem & overflow
sourceThd = makePutThread("src", 1, 10000, 1000, channel);
sourceThd.start();
sourceThd.join();
Assert.assertEquals(10000, channel.getTotalStored());
Assert.assertEquals(10000, channel.channelCounter.getChannelSize());
Assert.assertEquals(15000, channel.channelCounter.getEventPutAttemptCount());
Assert.assertEquals(15000, channel.channelCounter.getEventPutSuccessCount());
//4. empty memory
sinkThd = makeTakeThread("sink", 1, 5000, 1000, channel);
sinkThd.start();
sinkThd.join();
Assert.assertEquals(5000, channel.getTotalStored());
Assert.assertEquals(5000, channel.channelCounter.getChannelSize());
Assert.assertEquals(10000, channel.channelCounter.getEventTakeAttemptCount());
Assert.assertEquals(10000, channel.channelCounter.getEventTakeSuccessCount());
//5. empty overflow
transactionalTakeN(5001, 1000, channel);
transactionalTakeN(6001, 1000, channel);
transactionalTakeN(7001, 1000, channel);
transactionalTakeN(8001, 1000, channel);
transactionalTakeN(9001, 1000, channel);
Assert.assertEquals(0, channel.getTotalStored());
Assert.assertEquals(0, channel.channelCounter.getChannelSize());
Assert.assertEquals(15000, channel.channelCounter.getEventTakeAttemptCount());
Assert.assertEquals(15000, channel.channelCounter.getEventTakeSuccessCount());
//6. now do it concurrently
sourceThd = makePutThread("src1", 1, 5000, 1000, channel);
Thread sourceThd2 = makePutThread("src2", 1, 5000, 500, channel);
sinkThd = makeTakeThread_noCheck("sink1", 5000, 1000, channel);
sourceThd.start();
sourceThd2.start();
sinkThd.start();
sourceThd.join();
sourceThd2.join();
sinkThd.join();
Assert.assertEquals(5000, channel.getTotalStored());
Assert.assertEquals(5000, channel.channelCounter.getChannelSize());
Thread sinkThd2 = makeTakeThread_noCheck("sink2", 2500, 500, channel);
Thread sinkThd3 = makeTakeThread_noCheck("sink3", 2500, 1000, channel);
sinkThd2.start();
sinkThd3.start();
sinkThd2.join();
sinkThd3.join();
Assert.assertEquals(0, channel.getTotalStored());
Assert.assertEquals(0, channel.channelCounter.getChannelSize());
Assert.assertEquals(25000, channel.channelCounter.getEventTakeSuccessCount());
Assert.assertEquals(25000, channel.channelCounter.getEventPutSuccessCount());
Assert.assertTrue("TakeAttempt channel counter value larger than expected",
25000 <= channel.channelCounter.getEventTakeAttemptCount());
Assert.assertTrue("PutAttempt channel counter value larger than expected",
25000 <= channel.channelCounter.getEventPutAttemptCount());
}
public ArrayList<Thread> createSourceThreads(int count, int totalEvents, int batchSize) {
ArrayList<Thread> sourceThds = new ArrayList<Thread>();
for (int i = 0; i < count; ++i) {
sourceThds.add(makePutThread("src" + i, 1, totalEvents / count, batchSize, channel));
}
return sourceThds;
}
public ArrayList<Thread> createSinkThreads(int count, int totalEvents, int batchSize) {
ArrayList<Thread> sinkThreads = new ArrayList<Thread>(count);
for (int i = 0; i < count; ++i) {
sinkThreads.add(makeTakeThread_noCheck("sink" + i, totalEvents / count, batchSize, channel));
}
return sinkThreads;
}
public void startThreads(ArrayList<Thread> threads) {
for (Thread thread : threads) {
thread.start();
}
}
public void joinThreads(ArrayList<Thread> threads) throws InterruptedException {
for (Thread thread : threads) {
try {
thread.join();
} catch (InterruptedException e) {
System.out.println("Interrupted while waiting on " + thread.getName());
throw e;
}
}
}
@Test
public void testParallelMultipleSourcesAndSinks() throws InterruptedException {
int sourceCount = 8;
int sinkCount = 8;
int eventCount = 1000000;
int batchSize = 100;
Map<String, String> params = new HashMap<String, String>();
params.put("memoryCapacity", "0");
params.put("overflowCapacity", String.valueOf(eventCount));
params.put("overflowTimeout", "3");
startChannel(params);
ArrayList<Thread> sinks = createSinkThreads(sinkCount, eventCount, batchSize);
ArrayList<Thread> sources = createSourceThreads(sourceCount, eventCount, batchSize);
StopWatch watch = new StopWatch();
startThreads(sinks);
startThreads(sources);
joinThreads(sources);
joinThreads(sinks);
watch.elapsed();
System.out.println("Max Queue size " + channel.getMaxMemQueueSize());
Assert.assertEquals(eventCount, channel.drainOrder.totalPuts);
Assert.assertEquals("Channel not fully drained", 0, channel.getTotalStored());
System.out.println("testParallelMultipleSourcesAndSinks done");
}
static class StopWatch {
long startTime;
public StopWatch() {
startTime = System.currentTimeMillis();
}
public void elapsed() {
elapsed(null);
}
public void elapsed(String suffix) {
long elapsed = System.currentTimeMillis() - startTime;
if (suffix == null) {
suffix = "";
} else {
suffix = "{ " + suffix + " }";
}
if (elapsed < 10000) {
System.out.println(Thread.currentThread().getName()
+ " : [ " + elapsed + " ms ]. " + suffix);
} else {
System.out.println(Thread.currentThread().getName()
+ " : [ " + elapsed / 1000 + " sec ]. " + suffix);
}
}
}
}
| 9,683 |
0 | Create_ds/flume/flume-ng-channels/flume-spillable-memory-channel/src/main/java/org/apache/flume | Create_ds/flume/flume-ng-channels/flume-spillable-memory-channel/src/main/java/org/apache/flume/channel/SpillableMemoryChannel.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.flume.ChannelException;
import org.apache.flume.ChannelFullException;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.Transaction;
import org.apache.flume.annotations.InterfaceAudience;
import org.apache.flume.annotations.InterfaceStability;
import org.apache.flume.annotations.Recyclable;
import org.apache.flume.channel.file.FileChannel;
import org.apache.flume.instrumentation.ChannelCounter;
import org.apache.flume.lifecycle.LifecycleState;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.concurrent.GuardedBy;
import java.util.ArrayDeque;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
/**
* <p>
* SpillableMemoryChannel will use main memory for buffering events until it has reached capacity.
* Thereafter file channel will be used as overflow.
* </p>
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
@Recyclable
public class SpillableMemoryChannel extends FileChannel {
// config settings
/**
* Max number of events to be stored in memory
*/
public static final String MEMORY_CAPACITY = "memoryCapacity";
/**
* Seconds to wait before enabling disk overflow when memory fills up
*/
public static final String OVERFLOW_TIMEOUT = "overflowTimeout";
/**
* Internal use only. To remain undocumented in User guide. Determines the
* percent free space available in mem queue when we stop spilling to overflow
*/
public static final String OVERFLOW_DEACTIVATION_THRESHOLD
= "overflowDeactivationThreshold";
/**
* percent of buffer between byteCapacity and the estimated event size.
*/
public static final String BYTE_CAPACITY_BUFFER_PERCENTAGE
= "byteCapacityBufferPercentage";
/**
* max number of bytes used for all events in the queue.
*/
public static final String BYTE_CAPACITY = "byteCapacity";
/**
* max number of events in overflow.
*/
public static final String OVERFLOW_CAPACITY = "overflowCapacity";
/**
* file channel setting that is overriden by Spillable Channel
*/
public static final String KEEP_ALIVE = "keep-alive";
/**
* file channel capacity overridden by Spillable Channel
*/
public static final String CAPACITY = "capacity";
/**
* Estimated average size of events expected to be in the channel
*/
public static final String AVG_EVENT_SIZE = "avgEventSize";
private static Logger LOGGER = LoggerFactory.getLogger(SpillableMemoryChannel.class);
public static final int defaultMemoryCapacity = 10000;
public static final int defaultOverflowCapacity = 100000000;
public static final int defaultOverflowTimeout = 3;
public static final int defaultOverflowDeactivationThreshold = 5; // percent
// memory consumption control
private static final int defaultAvgEventSize = 500;
private static final Long defaultByteCapacity
= (long) (Runtime.getRuntime().maxMemory() * .80);
private static final int defaultByteCapacityBufferPercentage = 20;
private volatile int byteCapacity;
private volatile double avgEventSize = defaultAvgEventSize;
private volatile int lastByteCapacity;
private volatile int byteCapacityBufferPercentage;
private Semaphore bytesRemaining;
// for synchronizing access to primary/overflow channels & drain order
private final Object queueLock = new Object();
@GuardedBy(value = "queueLock")
public ArrayDeque<Event> memQueue;
// This semaphore tracks number of free slots in primary channel (includes
// all active put lists) .. used to determine if the puts
// should go into primary or overflow
private Semaphore memQueRemaining;
// tracks number of events in both channels. Takes will block on this
private Semaphore totalStored;
private int maxMemQueueSize = 0; // max sie of memory Queue
private boolean overflowDisabled; // if true indicates the overflow should not be used at all.
// indicates if overflow can be used. invariant: false if overflowDisabled is true.
private boolean overflowActivated = false;
// if true overflow can be used. invariant: false if overflowDisabled is true.
private int memoryCapacity = -1; // max events that the channel can hold in memory
private int overflowCapacity;
private int overflowTimeout;
// mem full % at which we stop spill to overflow
private double overflowDeactivationThreshold
= defaultOverflowDeactivationThreshold / 100;
public SpillableMemoryChannel() {
super();
}
protected int getTotalStored() {
return totalStored.availablePermits();
}
public int getMemoryCapacity() {
return memoryCapacity;
}
public int getOverflowTimeout() {
return overflowTimeout;
}
public int getMaxMemQueueSize() {
return maxMemQueueSize;
}
protected Integer getOverflowCapacity() {
return overflowCapacity;
}
protected boolean isOverflowDisabled() {
return overflowDisabled;
}
@VisibleForTesting
protected ChannelCounter channelCounter;
public final DrainOrderQueue drainOrder = new DrainOrderQueue();
public int queueSize() {
synchronized (queueLock) {
return memQueue.size();
}
}
private static class MutableInteger {
private int value;
public MutableInteger(int val) {
value = val;
}
public void add(int amount) {
value += amount;
}
public int intValue() {
return value;
}
}
// pop on a empty queue will throw NoSuchElementException
// invariant: 0 will never be left in the queue
public static class DrainOrderQueue {
public ArrayDeque<MutableInteger> queue = new ArrayDeque<MutableInteger>(1000);
public int totalPuts = 0; // for debugging only
private long overflowCounter = 0; // # of items in overflow channel
public String dump() {
StringBuilder sb = new StringBuilder();
sb.append(" [ ");
for (MutableInteger i : queue) {
sb.append(i.intValue());
sb.append(" ");
}
sb.append("]");
return sb.toString();
}
public void putPrimary(Integer eventCount) {
totalPuts += eventCount;
if ((queue.peekLast() == null) || queue.getLast().intValue() < 0) {
queue.addLast(new MutableInteger(eventCount));
} else {
queue.getLast().add(eventCount);
}
}
public void putFirstPrimary(Integer eventCount) {
if ((queue.peekFirst() == null) || queue.getFirst().intValue() < 0) {
queue.addFirst(new MutableInteger(eventCount));
} else {
queue.getFirst().add(eventCount);
}
}
public void putOverflow(Integer eventCount) {
totalPuts += eventCount;
if ((queue.peekLast() == null) || queue.getLast().intValue() > 0) {
queue.addLast(new MutableInteger(-eventCount));
} else {
queue.getLast().add(-eventCount);
}
overflowCounter += eventCount;
}
public void putFirstOverflow(Integer eventCount) {
if ((queue.peekFirst() == null) || queue.getFirst().intValue() > 0) {
queue.addFirst(new MutableInteger(-eventCount));
} else {
queue.getFirst().add(-eventCount);
}
overflowCounter += eventCount;
}
public int front() {
return queue.getFirst().intValue();
}
public boolean isEmpty() {
return queue.isEmpty();
}
public void takePrimary(int takeCount) {
MutableInteger headValue = queue.getFirst();
// this condition is optimization to avoid redundant conversions of
// int -> Integer -> string in hot path
if (headValue.intValue() < takeCount) {
throw new IllegalStateException("Cannot take " + takeCount +
" from " + headValue.intValue() + " in DrainOrder Queue");
}
headValue.add(-takeCount);
if (headValue.intValue() == 0) {
queue.removeFirst();
}
}
public void takeOverflow(int takeCount) {
MutableInteger headValue = queue.getFirst();
if (headValue.intValue() > -takeCount) {
throw new IllegalStateException("Cannot take " + takeCount + " from "
+ headValue.intValue() + " in DrainOrder Queue head ");
}
headValue.add(takeCount);
if (headValue.intValue() == 0) {
queue.removeFirst();
}
overflowCounter -= takeCount;
}
}
private class SpillableMemoryTransaction extends BasicTransactionSemantics {
BasicTransactionSemantics overflowTakeTx = null; // Take-Txn for overflow
BasicTransactionSemantics overflowPutTx = null; // Put-Txn for overflow
boolean useOverflow = false;
boolean putCalled = false; // set on first invocation to put
boolean takeCalled = false; // set on first invocation to take
int largestTakeTxSize = 5000; // not a constraint, just hint for allocation
int largestPutTxSize = 5000; // not a constraint, just hint for allocation
Integer overflowPutCount = 0; // # of puts going to overflow in this Txn
private int putListByteCount = 0;
private int takeListByteCount = 0;
private int takeCount = 0;
ArrayDeque<Event> takeList;
ArrayDeque<Event> putList;
private final ChannelCounter channelCounter;
public SpillableMemoryTransaction(ChannelCounter counter) {
takeList = new ArrayDeque<Event>(largestTakeTxSize);
putList = new ArrayDeque<Event>(largestPutTxSize);
channelCounter = counter;
}
@Override
public void begin() {
super.begin();
}
@Override
public void close() {
if (overflowTakeTx != null) {
overflowTakeTx.close();
}
if (overflowPutTx != null) {
overflowPutTx.close();
}
super.close();
}
@Override
protected void doPut(Event event) throws InterruptedException {
channelCounter.incrementEventPutAttemptCount();
putCalled = true;
int eventByteSize = (int) Math.ceil(estimateEventSize(event) / avgEventSize);
if (!putList.offer(event)) {
throw new ChannelFullException("Put queue in " + getName() +
" channel's Transaction having capacity " + putList.size() +
" full, consider reducing batch size of sources");
}
putListByteCount += eventByteSize;
}
// Take will limit itself to a single channel within a transaction.
// This ensures commits/rollbacks are restricted to a single channel.
@Override
protected Event doTake() throws InterruptedException {
channelCounter.incrementEventTakeAttemptCount();
if (!totalStored.tryAcquire(overflowTimeout, TimeUnit.SECONDS)) {
LOGGER.debug("Take is backing off as channel is empty.");
return null;
}
boolean takeSuceeded = false;
try {
Event event;
synchronized (queueLock) {
int drainOrderTop = drainOrder.front();
if (!takeCalled) {
takeCalled = true;
if (drainOrderTop < 0) {
useOverflow = true;
overflowTakeTx = getOverflowTx();
overflowTakeTx.begin();
}
}
if (useOverflow) {
if (drainOrderTop > 0) {
LOGGER.debug("Take is switching to primary");
return null; // takes should now occur from primary channel
}
event = overflowTakeTx.take();
++takeCount;
drainOrder.takeOverflow(1);
} else {
if (drainOrderTop < 0) {
LOGGER.debug("Take is switching to overflow");
return null; // takes should now occur from overflow channel
}
event = memQueue.poll();
++takeCount;
drainOrder.takePrimary(1);
Preconditions.checkNotNull(event, "Queue.poll returned NULL despite"
+ " semaphore signalling existence of entry");
}
}
int eventByteSize = (int) Math.ceil(estimateEventSize(event) / avgEventSize);
if (!useOverflow) {
// takeList is thd pvt, so no need to do this in synchronized block
takeList.offer(event);
}
takeListByteCount += eventByteSize;
takeSuceeded = true;
return event;
} finally {
if (!takeSuceeded) {
totalStored.release();
}
}
}
@Override
protected void doCommit() throws InterruptedException {
if (putCalled) {
putCommit();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Put Committed. Drain Order Queue state : " + drainOrder.dump());
}
} else if (takeCalled) {
takeCommit();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Take Committed. Drain Order Queue state : " + drainOrder.dump());
}
}
}
private void takeCommit() {
if (takeCount > largestTakeTxSize) {
largestTakeTxSize = takeCount;
}
synchronized (queueLock) {
if (overflowTakeTx != null) {
overflowTakeTx.commit();
}
double memoryPercentFree = (memoryCapacity == 0) ? 0 :
(memoryCapacity - memQueue.size() + takeCount) / (double) memoryCapacity;
if (overflowActivated && memoryPercentFree >= overflowDeactivationThreshold) {
overflowActivated = false;
LOGGER.info("Overflow Deactivated");
}
channelCounter.setChannelSize(getTotalStored());
}
if (!useOverflow) {
memQueRemaining.release(takeCount);
bytesRemaining.release(takeListByteCount);
}
channelCounter.addToEventTakeSuccessCount(takeCount);
}
private void putCommit() throws InterruptedException {
// decide if overflow needs to be used
int timeout = overflowActivated ? 0 : overflowTimeout;
if (memoryCapacity != 0) {
// check for enough event slots(memoryCapacity) for using memory queue
if (!memQueRemaining.tryAcquire(putList.size(), timeout,
TimeUnit.SECONDS)) {
if (overflowDisabled) {
throw new ChannelFullException("Spillable Memory Channel's " +
"memory capacity has been reached and overflow is " +
"disabled. Consider increasing memoryCapacity.");
}
overflowActivated = true;
useOverflow = true;
// check if we have enough byteCapacity for using memory queue
} else if (!bytesRemaining.tryAcquire(putListByteCount,
overflowTimeout, TimeUnit.SECONDS)) {
memQueRemaining.release(putList.size());
if (overflowDisabled) {
throw new ChannelFullException("Spillable Memory Channel's "
+ "memory capacity has been reached. "
+ (bytesRemaining.availablePermits() * (int) avgEventSize)
+ " bytes are free and overflow is disabled. Consider "
+ "increasing byteCapacity or capacity.");
}
overflowActivated = true;
useOverflow = true;
}
} else {
useOverflow = true;
}
if (putList.size() > largestPutTxSize) {
largestPutTxSize = putList.size();
}
if (useOverflow) {
commitPutsToOverflow();
} else {
commitPutsToPrimary();
}
}
private void commitPutsToOverflow() throws InterruptedException {
overflowPutTx = getOverflowTx();
overflowPutTx.begin();
for (Event event : putList) {
overflowPutTx.put(event);
}
commitPutsToOverflow_core(overflowPutTx);
totalStored.release(putList.size());
overflowPutCount += putList.size();
channelCounter.addToEventPutSuccessCount(putList.size());
}
private void commitPutsToOverflow_core(Transaction overflowPutTx)
throws InterruptedException {
// reattempt only once if overflow is full first time around
for (int i = 0; i < 2; ++i) {
try {
synchronized (queueLock) {
overflowPutTx.commit();
drainOrder.putOverflow(putList.size());
channelCounter.setChannelSize(memQueue.size()
+ drainOrder.overflowCounter);
break;
}
} catch (ChannelFullException e) { // drop lock & reattempt
if (i == 0) {
Thread.sleep(overflowTimeout * 1000L);
} else {
throw e;
}
}
}
}
private void commitPutsToPrimary() {
synchronized (queueLock) {
for (Event e : putList) {
if (!memQueue.offer(e)) {
throw new ChannelException("Unable to insert event into memory " +
"queue in spite of spare capacity, this is very unexpected");
}
}
drainOrder.putPrimary(putList.size());
maxMemQueueSize = (memQueue.size() > maxMemQueueSize) ? memQueue.size()
: maxMemQueueSize;
channelCounter.setChannelSize(memQueue.size()
+ drainOrder.overflowCounter);
}
// update counters and semaphores
totalStored.release(putList.size());
channelCounter.addToEventPutSuccessCount(putList.size());
}
@Override
protected void doRollback() {
LOGGER.debug("Rollback() of " +
(takeCalled ? " Take Tx" : (putCalled ? " Put Tx" : "Empty Tx")));
if (putCalled) {
if (overflowPutTx != null) {
overflowPutTx.rollback();
}
if (!useOverflow) {
bytesRemaining.release(putListByteCount);
putList.clear();
}
putListByteCount = 0;
} else if (takeCalled) {
synchronized (queueLock) {
if (overflowTakeTx != null) {
overflowTakeTx.rollback();
}
if (useOverflow) {
drainOrder.putFirstOverflow(takeCount);
} else {
int remainingCapacity = memoryCapacity - memQueue.size();
Preconditions.checkState(remainingCapacity >= takeCount,
"Not enough space in memory queue to rollback takes. This" +
" should never happen, please report");
while (!takeList.isEmpty()) {
memQueue.addFirst(takeList.removeLast());
}
drainOrder.putFirstPrimary(takeCount);
}
}
totalStored.release(takeCount);
} else {
overflowTakeTx.rollback();
}
channelCounter.setChannelSize(memQueue.size() + drainOrder.overflowCounter);
}
} // Transaction
/**
* Read parameters from context
* <li>memoryCapacity = total number of events allowed at one time in the memory queue.
* <li>overflowCapacity = total number of events allowed at one time in the overflow file channel.
* <li>byteCapacity = the max number of bytes used for events in the memory queue.
* <li>byteCapacityBufferPercentage = type int. Defines the percent of buffer between byteCapacity
* and the estimated event size.
* <li>overflowTimeout = type int. Number of seconds to wait on a full memory before deciding to
* enable overflow
*/
@Override
public void configure(Context context) {
if (getLifecycleState() == LifecycleState.START || // does not support reconfig when running
getLifecycleState() == LifecycleState.ERROR) {
stop();
}
if (totalStored == null) {
totalStored = new Semaphore(0);
}
if (channelCounter == null) {
channelCounter = new ChannelCounter(getName());
}
// 1) Memory Capacity
Integer newMemoryCapacity;
try {
newMemoryCapacity = context.getInteger(MEMORY_CAPACITY, defaultMemoryCapacity);
if (newMemoryCapacity == null) {
newMemoryCapacity = defaultMemoryCapacity;
}
if (newMemoryCapacity < 0) {
throw new NumberFormatException(MEMORY_CAPACITY + " must be >= 0");
}
} catch (NumberFormatException e) {
newMemoryCapacity = defaultMemoryCapacity;
LOGGER.warn("Invalid " + MEMORY_CAPACITY + " specified, initializing " +
getName() + " channel to default value of {}", defaultMemoryCapacity);
}
try {
resizePrimaryQueue(newMemoryCapacity);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
// overflowTimeout - wait time before switching to overflow when mem is full
try {
Integer newOverflowTimeout =
context.getInteger(OVERFLOW_TIMEOUT, defaultOverflowTimeout);
overflowTimeout = (newOverflowTimeout != null) ? newOverflowTimeout
: defaultOverflowTimeout;
} catch (NumberFormatException e) {
LOGGER.warn("Incorrect value for " + getName() + "'s " + OVERFLOW_TIMEOUT
+ " setting. Using default value {}", defaultOverflowTimeout);
overflowTimeout = defaultOverflowTimeout;
}
try {
Integer newThreshold = context.getInteger(OVERFLOW_DEACTIVATION_THRESHOLD);
overflowDeactivationThreshold = (newThreshold != null) ?
newThreshold / 100.0
: defaultOverflowDeactivationThreshold / 100.0;
} catch (NumberFormatException e) {
LOGGER.warn("Incorrect value for " + getName() + "'s " +
OVERFLOW_DEACTIVATION_THRESHOLD + ". Using default value {} %",
defaultOverflowDeactivationThreshold);
overflowDeactivationThreshold = defaultOverflowDeactivationThreshold / 100.0;
}
// 3) Memory consumption control
try {
byteCapacityBufferPercentage =
context.getInteger(BYTE_CAPACITY_BUFFER_PERCENTAGE, defaultByteCapacityBufferPercentage);
} catch (NumberFormatException e) {
LOGGER.warn("Error parsing " + BYTE_CAPACITY_BUFFER_PERCENTAGE + " for "
+ getName() + ". Using default="
+ defaultByteCapacityBufferPercentage + ". " + e.getMessage());
byteCapacityBufferPercentage = defaultByteCapacityBufferPercentage;
}
try {
avgEventSize = context.getInteger(AVG_EVENT_SIZE, defaultAvgEventSize);
} catch (NumberFormatException e) {
LOGGER.warn("Error parsing " + AVG_EVENT_SIZE + " for " + getName()
+ ". Using default = " + defaultAvgEventSize + ". "
+ e.getMessage());
avgEventSize = defaultAvgEventSize;
}
try {
byteCapacity = (int) ((context.getLong(BYTE_CAPACITY, defaultByteCapacity) *
(1 - byteCapacityBufferPercentage * .01)) / avgEventSize);
if (byteCapacity < 1) {
byteCapacity = Integer.MAX_VALUE;
}
} catch (NumberFormatException e) {
LOGGER.warn("Error parsing " + BYTE_CAPACITY + " setting for " + getName()
+ ". Using default = " + defaultByteCapacity + ". "
+ e.getMessage());
byteCapacity = (int)
((defaultByteCapacity * (1 - byteCapacityBufferPercentage * .01))
/ avgEventSize);
}
if (bytesRemaining == null) {
bytesRemaining = new Semaphore(byteCapacity);
lastByteCapacity = byteCapacity;
} else {
if (byteCapacity > lastByteCapacity) {
bytesRemaining.release(byteCapacity - lastByteCapacity);
lastByteCapacity = byteCapacity;
} else {
try {
if (!bytesRemaining.tryAcquire(lastByteCapacity - byteCapacity,
overflowTimeout, TimeUnit.SECONDS)) {
LOGGER.warn("Couldn't acquire permits to downsize the byte capacity, " +
"resizing has been aborted");
} else {
lastByteCapacity = byteCapacity;
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
try {
// file channel capacity
overflowCapacity = context.getInteger(OVERFLOW_CAPACITY, defaultOverflowCapacity);
// Determine if File channel needs to be disabled
if (memoryCapacity < 1 && overflowCapacity < 1) {
LOGGER.warn("For channel " + getName() + OVERFLOW_CAPACITY +
" cannot be set to 0 if " + MEMORY_CAPACITY + " is also 0. " +
"Using default value " + OVERFLOW_CAPACITY + " = " +
defaultOverflowCapacity);
overflowCapacity = defaultOverflowCapacity;
}
overflowDisabled = (overflowCapacity < 1);
if (overflowDisabled) {
overflowActivated = false;
}
} catch (NumberFormatException e) {
overflowCapacity = defaultOverflowCapacity;
}
// Configure File channel
context.put(KEEP_ALIVE, "0"); // override keep-alive for File channel
context.put(CAPACITY, Integer.toString(overflowCapacity)); // file channel capacity
super.configure(context);
}
private void resizePrimaryQueue(int newMemoryCapacity) throws InterruptedException {
if (memQueue != null && memoryCapacity == newMemoryCapacity) {
return;
}
if (memoryCapacity > newMemoryCapacity) {
int diff = memoryCapacity - newMemoryCapacity;
if (!memQueRemaining.tryAcquire(diff, overflowTimeout, TimeUnit.SECONDS)) {
LOGGER.warn("Memory buffer currently contains more events than the new size. " +
"Downsizing has been aborted.");
return;
}
synchronized (queueLock) {
ArrayDeque<Event> newQueue = new ArrayDeque<Event>(newMemoryCapacity);
newQueue.addAll(memQueue);
memQueue = newQueue;
memoryCapacity = newMemoryCapacity;
}
} else { // if (memoryCapacity <= newMemoryCapacity)
synchronized (queueLock) {
ArrayDeque<Event> newQueue = new ArrayDeque<Event>(newMemoryCapacity);
if (memQueue != null) {
newQueue.addAll(memQueue);
}
memQueue = newQueue;
if (memQueRemaining == null) {
memQueRemaining = new Semaphore(newMemoryCapacity);
} else {
int diff = newMemoryCapacity - memoryCapacity;
memQueRemaining.release(diff);
}
memoryCapacity = newMemoryCapacity;
}
}
}
@Override
public synchronized void start() {
super.start();
int overFlowCount = super.getDepth();
if (drainOrder.isEmpty()) {
drainOrder.putOverflow(overFlowCount);
totalStored.release(overFlowCount);
}
channelCounter.start();
int totalCount = overFlowCount + memQueue.size();
channelCounter.setChannelCapacity(memoryCapacity + getOverflowCapacity());
channelCounter.setChannelSize(totalCount);
}
@Override
public synchronized void stop() {
if (getLifecycleState() == LifecycleState.STOP) {
return;
}
channelCounter.setChannelSize(memQueue.size() + drainOrder.overflowCounter);
channelCounter.stop();
super.stop();
}
@Override
protected BasicTransactionSemantics createTransaction() {
return new SpillableMemoryTransaction(channelCounter);
}
private BasicTransactionSemantics getOverflowTx() {
return super.createTransaction();
}
private long estimateEventSize(Event event) {
byte[] body = event.getBody();
if (body != null && body.length != 0) {
return body.length;
}
//Each event occupies at least 1 slot, so return 1.
return 1;
}
}
| 9,684 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestFlumeEventQueue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.commons.io.FileUtils;
import org.apache.flume.channel.file.instrumentation.FileChannelCounter;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.google.common.collect.SetMultimap;
import com.google.common.collect.Sets;
import com.google.common.io.Files;
import java.io.RandomAccessFile;
@RunWith(value = Parameterized.class)
public class TestFlumeEventQueue {
FlumeEventPointer pointer1 = new FlumeEventPointer(1, 1);
FlumeEventPointer pointer2 = new FlumeEventPointer(2, 2);
FlumeEventPointer pointer3 = new FlumeEventPointer(3, 3);
FlumeEventQueue queue;
EventQueueBackingStoreSupplier backingStoreSupplier;
EventQueueBackingStore backingStore;
private abstract static class EventQueueBackingStoreSupplier {
File baseDir;
File checkpoint;
File inflightTakes;
File inflightPuts;
File queueSetDir;
EventQueueBackingStoreSupplier() {
baseDir = Files.createTempDir();
checkpoint = new File(baseDir, "checkpoint");
inflightTakes = new File(baseDir, "inflightputs");
inflightPuts = new File(baseDir, "inflighttakes");
queueSetDir = new File(baseDir, "queueset");
}
File getCheckpoint() {
return checkpoint;
}
File getInflightPuts() {
return inflightPuts;
}
File getInflightTakes() {
return inflightTakes;
}
File getQueueSetDir() {
return queueSetDir;
}
void delete() {
FileUtils.deleteQuietly(baseDir);
}
abstract EventQueueBackingStore get() throws Exception;
}
@Parameters
public static Collection<Object[]> data() throws Exception {
Object[][] data = new Object[][] {
{
new EventQueueBackingStoreSupplier() {
@Override
public EventQueueBackingStore get() throws Exception {
Assert.assertTrue(baseDir.isDirectory() || baseDir.mkdirs());
return new EventQueueBackingStoreFileV2(getCheckpoint(), 1000,
"test", new FileChannelCounter("test"));
}
}
},
{
new EventQueueBackingStoreSupplier() {
@Override
public EventQueueBackingStore get() throws Exception {
Assert.assertTrue(baseDir.isDirectory() || baseDir.mkdirs());
return new EventQueueBackingStoreFileV3(
getCheckpoint(), 1000, "test", new FileChannelCounter("test")
);
}
}
}
};
return Arrays.asList(data);
}
public TestFlumeEventQueue(EventQueueBackingStoreSupplier backingStoreSupplier) {
this.backingStoreSupplier = backingStoreSupplier;
}
@Before
public void setup() throws Exception {
backingStore = backingStoreSupplier.get();
}
@After
public void cleanup() throws IOException {
if (backingStore != null) {
backingStore.close();
}
backingStoreSupplier.delete();
}
@Test
public void testCapacity() throws Exception {
backingStore.close();
File checkpoint = backingStoreSupplier.getCheckpoint();
Assert.assertTrue(checkpoint.delete());
backingStore = new EventQueueBackingStoreFileV2(
checkpoint, 1, "test", new FileChannelCounter("test")
);
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
Assert.assertTrue(queue.addTail(pointer1));
Assert.assertFalse(queue.addTail(pointer2));
}
@Test(expected = IllegalArgumentException.class)
public void testInvalidCapacityZero() throws Exception {
backingStore.close();
File checkpoint = backingStoreSupplier.getCheckpoint();
Assert.assertTrue(checkpoint.delete());
backingStore = new EventQueueBackingStoreFileV2(
checkpoint, 0, "test", new FileChannelCounter("test")
);
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
}
@Test(expected = IllegalArgumentException.class)
public void testInvalidCapacityNegative() throws Exception {
backingStore.close();
File checkpoint = backingStoreSupplier.getCheckpoint();
Assert.assertTrue(checkpoint.delete());
backingStore = new EventQueueBackingStoreFileV2(
checkpoint, -1, "test", new FileChannelCounter("test")
);
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
}
@Test
public void testQueueIsEmptyAfterCreation() throws Exception {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
Assert.assertNull(queue.removeHead(0L));
}
@Test
public void addTail1() throws Exception {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
Assert.assertTrue(queue.addTail(pointer1));
Assert.assertEquals(pointer1, queue.removeHead(0));
Assert.assertEquals(Sets.newHashSet(), queue.getFileIDs());
}
@Test
public void addTail2() throws Exception {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
Assert.assertTrue(queue.addTail(pointer1));
Assert.assertTrue(queue.addTail(pointer2));
Assert.assertEquals(Sets.newHashSet(1, 2), queue.getFileIDs());
Assert.assertEquals(pointer1, queue.removeHead(0));
Assert.assertEquals(Sets.newHashSet(2), queue.getFileIDs());
}
@Test
public void addTailLarge() throws Exception {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
int size = 500;
Set<Integer> fileIDs = Sets.newHashSet();
for (int i = 1; i <= size; i++) {
Assert.assertTrue(queue.addTail(new FlumeEventPointer(i, i)));
fileIDs.add(i);
Assert.assertEquals(fileIDs, queue.getFileIDs());
}
for (int i = 1; i <= size; i++) {
Assert.assertEquals(new FlumeEventPointer(i, i), queue.removeHead(0));
fileIDs.remove(i);
Assert.assertEquals(fileIDs, queue.getFileIDs());
}
Assert.assertEquals(Sets.newHashSet(), queue.getFileIDs());
}
@Test
public void addHead1() throws Exception {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
Assert.assertTrue(queue.addHead(pointer1));
Assert.assertEquals(Sets.newHashSet(1), queue.getFileIDs());
Assert.assertEquals(pointer1, queue.removeHead(0));
Assert.assertEquals(Sets.newHashSet(), queue.getFileIDs());
}
@Test
public void addHead2() throws Exception {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
queue.replayComplete();
Assert.assertTrue(queue.addHead(pointer1));
Assert.assertTrue(queue.addHead(pointer2));
Assert.assertEquals(Sets.newHashSet(1, 2), queue.getFileIDs());
Assert.assertEquals(pointer2, queue.removeHead(0));
Assert.assertEquals(Sets.newHashSet(1), queue.getFileIDs());
}
@Test
public void addHeadLarge() throws Exception {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
queue.replayComplete();
int size = 500;
Set<Integer> fileIDs = Sets.newHashSet();
for (int i = 1; i <= size; i++) {
Assert.assertTrue(queue.addHead(new FlumeEventPointer(i, i)));
fileIDs.add(i);
Assert.assertEquals(fileIDs, queue.getFileIDs());
}
for (int i = size; i > 0; i--) {
Assert.assertEquals(new FlumeEventPointer(i, i), queue.removeHead(0));
fileIDs.remove(i);
Assert.assertEquals(fileIDs, queue.getFileIDs());
}
Assert.assertEquals(Sets.newHashSet(), queue.getFileIDs());
}
@Test
public void addTailRemove1() throws Exception {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
Assert.assertTrue(queue.addTail(pointer1));
Assert.assertEquals(Sets.newHashSet(1), queue.getFileIDs());
Assert.assertTrue(queue.remove(pointer1));
queue.replayComplete();
Assert.assertEquals(Sets.newHashSet(), queue.getFileIDs());
Assert.assertNull(queue.removeHead(0));
Assert.assertEquals(Sets.newHashSet(), queue.getFileIDs());
}
@Test
public void addTailRemove2() throws Exception {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
Assert.assertTrue(queue.addTail(pointer1));
Assert.assertTrue(queue.addTail(pointer2));
Assert.assertTrue(queue.remove(pointer1));
queue.replayComplete();
Assert.assertEquals(pointer2, queue.removeHead(0));
}
@Test
public void addHeadRemove1() throws Exception {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
queue.addHead(pointer1);
Assert.assertTrue(queue.remove(pointer1));
Assert.assertNull(queue.removeHead(0));
}
@Test
public void addHeadRemove2() throws Exception {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
Assert.assertTrue(queue.addHead(pointer1));
Assert.assertTrue(queue.addHead(pointer2));
Assert.assertTrue(queue.remove(pointer1));
queue.replayComplete();
Assert.assertEquals(pointer2, queue.removeHead(0));
}
@Test
public void testUnknownPointerDoesNotCauseSearch() throws Exception {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
Assert.assertTrue(queue.addHead(pointer1));
Assert.assertTrue(queue.addHead(pointer2));
Assert.assertFalse(queue.remove(pointer3)); // does search
Assert.assertTrue(queue.remove(pointer1));
Assert.assertTrue(queue.remove(pointer2));
queue.replayComplete();
Assert.assertEquals(2, queue.getSearchCount());
}
@Test(expected = IllegalStateException.class)
public void testRemoveAfterReplayComplete() throws Exception {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
queue.replayComplete();
queue.remove(pointer1);
}
@Test
public void testWrappingCorrectly() throws Exception {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
int size = Integer.MAX_VALUE;
for (int i = 1; i <= size; i++) {
if (!queue.addHead(new FlumeEventPointer(i, i))) {
break;
}
}
for (int i = queue.getSize() / 2; i > 0; i--) {
Assert.assertNotNull(queue.removeHead(0));
}
// addHead below would throw an IndexOOBounds with
// bad version of FlumeEventQueue.convert
for (int i = 1; i <= size; i++) {
if (!queue.addHead(new FlumeEventPointer(i, i))) {
break;
}
}
}
@Test
public void testInflightPuts() throws Exception {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
long txnID1 = new Random().nextInt(Integer.MAX_VALUE - 1);
long txnID2 = txnID1 + 1;
queue.addWithoutCommit(new FlumeEventPointer(1, 1), txnID1);
queue.addWithoutCommit(new FlumeEventPointer(2, 1), txnID1);
queue.addWithoutCommit(new FlumeEventPointer(2, 2), txnID2);
queue.checkpoint(true);
TimeUnit.SECONDS.sleep(3L);
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
SetMultimap<Long, Long> deserializedMap = queue.deserializeInflightPuts();
Assert.assertTrue(deserializedMap.get(txnID1).contains(new FlumeEventPointer(1, 1).toLong()));
Assert.assertTrue(deserializedMap.get(txnID1).contains(new FlumeEventPointer(2, 1).toLong()));
Assert.assertTrue(deserializedMap.get(txnID2).contains(new FlumeEventPointer(2, 2).toLong()));
}
@Test
public void testInflightTakes() throws Exception {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
long txnID1 = new Random().nextInt(Integer.MAX_VALUE - 1);
long txnID2 = txnID1 + 1;
queue.addTail(new FlumeEventPointer(1, 1));
queue.addTail(new FlumeEventPointer(2, 1));
queue.addTail(new FlumeEventPointer(2, 2));
queue.removeHead(txnID1);
queue.removeHead(txnID2);
queue.removeHead(txnID2);
queue.checkpoint(true);
TimeUnit.SECONDS.sleep(3L);
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
SetMultimap<Long, Long> deserializedMap = queue.deserializeInflightTakes();
Assert.assertTrue(deserializedMap.get(
txnID1).contains(new FlumeEventPointer(1, 1).toLong()));
Assert.assertTrue(deserializedMap.get(
txnID2).contains(new FlumeEventPointer(2, 1).toLong()));
Assert.assertTrue(deserializedMap.get(
txnID2).contains(new FlumeEventPointer(2, 2).toLong()));
}
@Test(expected = BadCheckpointException.class)
public void testCorruptInflightPuts() throws Exception {
RandomAccessFile inflight = null;
try {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
long txnID1 = new Random().nextInt(Integer.MAX_VALUE - 1);
long txnID2 = txnID1 + 1;
queue.addWithoutCommit(new FlumeEventPointer(1, 1), txnID1);
queue.addWithoutCommit(new FlumeEventPointer(2, 1), txnID1);
queue.addWithoutCommit(new FlumeEventPointer(2, 2), txnID2);
queue.checkpoint(true);
TimeUnit.SECONDS.sleep(3L);
inflight = new RandomAccessFile(
backingStoreSupplier.getInflightPuts(), "rw");
inflight.seek(0);
inflight.writeInt(new Random().nextInt());
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
SetMultimap<Long, Long> deserializedMap = queue.deserializeInflightPuts();
Assert.assertTrue(deserializedMap.get(
txnID1).contains(new FlumeEventPointer(1, 1).toLong()));
Assert.assertTrue(deserializedMap.get(
txnID1).contains(new FlumeEventPointer(2, 1).toLong()));
Assert.assertTrue(deserializedMap.get(
txnID2).contains(new FlumeEventPointer(2, 2).toLong()));
} finally {
inflight.close();
}
}
@Test(expected = BadCheckpointException.class)
public void testCorruptInflightTakes() throws Exception {
RandomAccessFile inflight = null;
try {
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
long txnID1 = new Random().nextInt(Integer.MAX_VALUE - 1);
long txnID2 = txnID1 + 1;
queue.addWithoutCommit(new FlumeEventPointer(1, 1), txnID1);
queue.addWithoutCommit(new FlumeEventPointer(2, 1), txnID1);
queue.addWithoutCommit(new FlumeEventPointer(2, 2), txnID2);
queue.checkpoint(true);
TimeUnit.SECONDS.sleep(3L);
inflight = new RandomAccessFile(
backingStoreSupplier.getInflightTakes(), "rw");
inflight.seek(0);
inflight.writeInt(new Random().nextInt());
queue = new FlumeEventQueue(backingStore,
backingStoreSupplier.getInflightTakes(),
backingStoreSupplier.getInflightPuts(),
backingStoreSupplier.getQueueSetDir());
SetMultimap<Long, Long> deserializedMap = queue.deserializeInflightTakes();
Assert.assertTrue(deserializedMap.get(
txnID1).contains(new FlumeEventPointer(1, 1).toLong()));
Assert.assertTrue(deserializedMap.get(
txnID1).contains(new FlumeEventPointer(2, 1).toLong()));
Assert.assertTrue(deserializedMap.get(
txnID2).contains(new FlumeEventPointer(2, 2).toLong()));
} finally {
inflight.close();
}
}
}
| 9,685 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestFileChannel.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.base.Charsets;
import com.google.common.base.Throwables;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.flume.ChannelException;
import org.apache.flume.Event;
import org.apache.flume.Transaction;
import org.apache.flume.channel.file.FileChannel.FileBackedTransaction;
import org.apache.flume.channel.file.FlumeEventQueue.InflightEventWrapper;
import org.apache.flume.channel.file.instrumentation.FileChannelCounter;
import org.apache.flume.conf.Configurables;
import org.apache.flume.event.EventBuilder;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static org.apache.flume.channel.file.TestUtils.compareInputAndOut;
import static org.apache.flume.channel.file.TestUtils.consumeChannel;
import static org.apache.flume.channel.file.TestUtils.fillChannel;
import static org.apache.flume.channel.file.TestUtils.forceCheckpoint;
import static org.apache.flume.channel.file.TestUtils.putEvents;
import static org.apache.flume.channel.file.TestUtils.putWithoutCommit;
import static org.apache.flume.channel.file.TestUtils.takeEvents;
import static org.apache.flume.channel.file.TestUtils.takeWithoutCommit;
import static org.fest.reflect.core.Reflection.field;
public class TestFileChannel extends TestFileChannelBase {
private static final Logger LOG = LoggerFactory
.getLogger(TestFileChannel.class);
public static final String TEST_KEY = "test_key";
@Before
public void setup() throws Exception {
super.setup();
}
@After
public void teardown() {
super.teardown();
}
@Test
public void testNegativeCapacities() {
Map<String, String> parms = Maps.newHashMap();
parms.put(FileChannelConfiguration.CAPACITY, "-3");
parms.put(FileChannelConfiguration.TRANSACTION_CAPACITY, "-1");
parms.put(FileChannelConfiguration.CHECKPOINT_INTERVAL, "-2");
FileChannel channel = createFileChannel(parms);
Assert.assertTrue(field("capacity")
.ofType(Integer.class)
.in(channel).get() > 0);
Assert.assertTrue(field("transactionCapacity")
.ofType(Integer.class)
.in(channel).get() > 0);
Assert.assertTrue(field("checkpointInterval")
.ofType(Long.class)
.in(channel).get() > 0);
}
@Test
public void testFailAfterTakeBeforeCommit() throws Throwable {
final FileChannel channel = createFileChannel();
channel.start();
final Set<String> eventSet =
putEvents(channel, "testTakeFailBeforeCommit", 5, 5);
Transaction tx = channel.getTransaction();
takeWithoutCommit(channel, tx, 2);
//Simulate multiple sources, so separate thread - txns are thread local,
//so a new txn wont be created here unless it is in a different thread.
Executors.newSingleThreadExecutor().submit(new Runnable() {
@Override
public void run() {
Transaction tx = channel.getTransaction();
takeWithoutCommit(channel, tx, 3);
}
}).get();
forceCheckpoint(channel);
channel.stop();
//Simulate a sink, so separate thread.
try {
Executors.newSingleThreadExecutor().submit(new Runnable() {
@Override
public void run() {
FileChannel channel = createFileChannel();
channel.start();
Set<String> output = null;
try {
output = takeEvents(channel, 5);
} catch (Exception e) {
Throwables.propagate(e);
}
compareInputAndOut(eventSet, output);
channel.stop();
}
}).get();
} catch (ExecutionException e) {
throw e.getCause();
}
}
@Test
public void testFailAfterPutCheckpointCommit() throws Throwable {
final Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.CHECKPOINT_INTERVAL, "10000");
final FileChannel channel = createFileChannel(overrides);
channel.start();
Transaction tx = channel.getTransaction();
final Set<String> input = putWithoutCommit(channel, tx, "failAfterPut", 3);
//Simulate multiple sources, so separate thread - txns are thread local,
//so a new txn wont be created here unless it is in a different thread.
final CountDownLatch latch = new CountDownLatch(1);
Executors.newSingleThreadExecutor().submit(new Runnable() {
@Override
public void run() {
Transaction tx = channel.getTransaction();
input.addAll(putWithoutCommit(channel, tx, "failAfterPut", 3));
try {
latch.await();
tx.commit();
} catch (InterruptedException e) {
tx.rollback();
Throwables.propagate(e);
} finally {
tx.close();
}
}
});
forceCheckpoint(channel);
tx.commit();
tx.close();
latch.countDown();
Thread.sleep(2000);
channel.stop();
final Set<String> out = Sets.newHashSet();
//Simulate a sink, so separate thread.
try {
Executors.newSingleThreadExecutor().submit(new Runnable() {
@Override
public void run() {
try {
FileChannel channel = createFileChannel();
channel.start();
out.addAll(takeEvents(channel, 6));
channel.stop();
} catch (Exception ex) {
Throwables.propagate(ex);
}
}
}).get();
} catch (ExecutionException e) {
throw e.getCause();
}
}
@Test
public void testReconfigure() throws Exception {
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = Sets.newHashSet();
try {
while (true) {
in.addAll(putEvents(channel, "reconfig", 1, 1));
}
} catch (ChannelException e) {
Assert.assertEquals("The channel has reached it's capacity. "
+ "This might be the result of a sink on the channel having too "
+ "low of batch size, a downstream system running slower than "
+ "normal, or that the channel capacity is just too low. [channel="
+ channel.getName() + "]", e.getMessage());
}
Configurables.configure(channel, createContext());
Set<String> out = takeEvents(channel, 1, Integer.MAX_VALUE);
compareInputAndOut(in, out);
}
@Test
public void testPut() throws Exception {
channel.start();
Assert.assertTrue(channel.isOpen());
// should find no items
int found = takeEvents(channel, 1, 5).size();
Assert.assertEquals(0, found);
Set<String> expected = Sets.newHashSet();
expected.addAll(putEvents(channel, "unbatched", 1, 5));
expected.addAll(putEvents(channel, "batched", 5, 5));
Set<String> actual = takeEvents(channel, 1);
compareInputAndOut(expected, actual);
}
@Test
public void testPutConvertsNullValueToEmptyStrInHeader() throws Exception {
channel.start();
Event event = EventBuilder.withBody("test body".getBytes(Charsets.UTF_8),
Collections.<String, String>singletonMap(TEST_KEY, null));
Transaction txPut = channel.getTransaction();
txPut.begin();
channel.put(event);
txPut.commit();
txPut.close();
Transaction txTake = channel.getTransaction();
txTake.begin();
Event eventTaken = channel.take();
Assert.assertArrayEquals(event.getBody(), eventTaken.getBody());
Assert.assertEquals("", eventTaken.getHeaders().get(TEST_KEY));
txTake.commit();
txTake.close();
}
@Test
public void testCommitAfterNoPutTake() throws Exception {
channel.start();
Assert.assertTrue(channel.isOpen());
Transaction transaction;
transaction = channel.getTransaction();
transaction.begin();
transaction.commit();
transaction.close();
// ensure we can reopen log with no error
channel.stop();
channel = createFileChannel();
channel.start();
Assert.assertTrue(channel.isOpen());
transaction = channel.getTransaction();
transaction.begin();
Assert.assertNull(channel.take());
transaction.commit();
transaction.close();
}
@Test
public void testCapacity() throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.CAPACITY, String.valueOf(5));
overrides.put(FileChannelConfiguration.TRANSACTION_CAPACITY,
String.valueOf(5));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
fillChannel(channel, "fillup");
// take an event, roll it back, and
// then make sure a put fails
Transaction transaction;
transaction = channel.getTransaction();
transaction.begin();
Event event = channel.take();
Assert.assertNotNull(event);
transaction.rollback();
transaction.close();
// ensure the take the didn't change the state of the capacity
Assert.assertEquals(0, fillChannel(channel, "capacity").size());
// ensure we the events back
Assert.assertEquals(5, takeEvents(channel, 1, 5).size());
}
/**
* This test is here to make sure we can replay a full queue
* when we have a PUT with a lower txid than the take which
* made that PUT possible. Here we fill up the queue so
* puts will block. Start the put (which assigns a txid)
* and while it's blocking initiate a take. That will
* allow the PUT to take place but at a lower txid
* than the take and additionally with pre-FLUME-1432 with
* the same timestamp. After FLUME-1432 the PUT will have a
* lower txid but a higher write order id and we can see
* which event occurred first.
*/
@Test
public void testRaceFoundInFLUME1432() throws Exception {
// the idea here is we will fill up the channel
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.KEEP_ALIVE, String.valueOf(10L));
overrides.put(FileChannelConfiguration.CAPACITY, String.valueOf(10L));
overrides.put(FileChannelConfiguration.TRANSACTION_CAPACITY, String.valueOf(10L));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
fillChannel(channel, "fillup");
// then do a put which will block but it will be assigned a tx id
Future<String> put = Executors.newSingleThreadExecutor().submit(new Callable<String>() {
@Override
public String call() throws Exception {
Set<String> result = putEvents(channel, "blocked-put", 1, 1);
Assert.assertTrue(result.toString(), result.size() == 1);
Iterator<String> iter = result.iterator();
return iter.next();
}
});
Thread.sleep(1000L); // ensure the put has started and is blocked
// after which we do a take, will have a tx id after the put
Set<String> result = takeEvents(channel, 1, 1);
Assert.assertTrue(result.toString(), result.size() == 1);
String putmsg = put.get();
Assert.assertNotNull(putmsg);
String takemsg = result.iterator().next();
Assert.assertNotNull(takemsg);
LOG.info("Got: put " + putmsg + ", take " + takemsg);
channel.stop();
channel = createFileChannel(overrides);
// now when we replay, the transaction the put will be ordered
// before the take when we used the txid as an order of operations
channel.start();
Assert.assertTrue(channel.isOpen());
}
@Test
public void testThreaded() throws IOException, InterruptedException {
channel.start();
Assert.assertTrue(channel.isOpen());
int numThreads = 10;
final CountDownLatch producerStopLatch = new CountDownLatch(numThreads);
final CountDownLatch consumerStopLatch = new CountDownLatch(numThreads);
final List<Exception> errors = Collections.synchronizedList(new ArrayList<Exception>());
final Set<String> expected = Collections.synchronizedSet(new HashSet<String>());
final Set<String> actual = Collections.synchronizedSet(new HashSet<String>());
for (int i = 0; i < numThreads; i++) {
final int id = i;
Thread t = new Thread() {
@Override
public void run() {
try {
if (id % 2 == 0) {
expected.addAll(putEvents(channel, Integer.toString(id), 1, 5));
} else {
expected.addAll(putEvents(channel, Integer.toString(id), 5, 5));
}
LOG.info("Completed some puts " + expected.size());
} catch (Exception e) {
LOG.error("Error doing puts", e);
errors.add(e);
} finally {
producerStopLatch.countDown();
}
}
};
t.setDaemon(true);
t.start();
}
for (int i = 0; i < numThreads; i++) {
final int id = i;
Thread t = new Thread() {
@Override
public void run() {
try {
while (!producerStopLatch.await(1, TimeUnit.SECONDS) ||
expected.size() > actual.size()) {
if (id % 2 == 0) {
actual.addAll(takeEvents(channel, 1, Integer.MAX_VALUE));
} else {
actual.addAll(takeEvents(channel, 5, Integer.MAX_VALUE));
}
}
if (actual.isEmpty()) {
LOG.error("Found nothing!");
} else {
LOG.info("Completed some takes " + actual.size());
}
} catch (Exception e) {
LOG.error("Error doing takes", e);
errors.add(e);
} finally {
consumerStopLatch.countDown();
}
}
};
t.setDaemon(true);
t.start();
}
Assert.assertTrue("Timed out waiting for producers",
producerStopLatch.await(30, TimeUnit.SECONDS));
Assert.assertTrue("Timed out waiting for consumer",
consumerStopLatch.await(30, TimeUnit.SECONDS));
Assert.assertEquals(Collections.EMPTY_LIST, errors);
compareInputAndOut(expected, actual);
}
@Test
public void testLocking() throws IOException {
channel.start();
Assert.assertTrue(channel.isOpen());
FileChannel fileChannel = createFileChannel();
fileChannel.start();
Assert.assertTrue(!fileChannel.isOpen());
}
/**
* Test contributed by Brock Noland during code review.
* @throws Exception
*/
@Test
public void testTakeTransactionCrossingCheckpoint() throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.CHECKPOINT_INTERVAL, "10000");
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = fillChannel(channel, "restart");
Set<String> out = Sets.newHashSet();
// now take one item off the channel
Transaction tx = channel.getTransaction();
out.addAll(takeWithoutCommit(channel, tx, 1));
// sleep so a checkpoint occurs. take is before
// and commit is after the checkpoint
forceCheckpoint(channel);
tx.commit();
tx.close();
channel.stop();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
// we should not geet the item we took of the queue above
Set<String> out2 = takeEvents(channel, 1, Integer.MAX_VALUE);
channel.stop();
in.removeAll(out);
compareInputAndOut(in, out2);
}
@Test
public void testPutForceCheckpointCommitReplay() throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.CAPACITY, String.valueOf(2));
overrides.put(FileChannelConfiguration.TRANSACTION_CAPACITY,
String.valueOf(2));
overrides.put(FileChannelConfiguration.CHECKPOINT_INTERVAL, "10000");
FileChannel channel = createFileChannel(overrides);
channel.start();
//Force a checkpoint by committing a transaction
Transaction tx = channel.getTransaction();
Set<String> in = putWithoutCommit(channel, tx, "putWithoutCommit", 1);
forceCheckpoint(channel);
tx.commit();
tx.close();
channel.stop();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> out = takeEvents(channel, 1);
compareInputAndOut(in, out);
channel.stop();
}
@Test
public void testPutCheckpointCommitCheckpointReplay() throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.CAPACITY, String.valueOf(2));
overrides.put(FileChannelConfiguration.TRANSACTION_CAPACITY,
String.valueOf(2));
overrides.put(FileChannelConfiguration.CHECKPOINT_INTERVAL, "10000");
FileChannel channel = createFileChannel(overrides);
channel.start();
//Force a checkpoint by committing a transaction
Transaction tx = channel.getTransaction();
Set<String> in = putWithoutCommit(channel, tx, "doubleCheckpoint", 1);
forceCheckpoint(channel);
tx.commit();
tx.close();
forceCheckpoint(channel);
channel.stop();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> out = takeEvents(channel, 5);
compareInputAndOut(in, out);
channel.stop();
}
@Test
public void testReferenceCounts() throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.CHECKPOINT_INTERVAL, "10000");
overrides.put(FileChannelConfiguration.MAX_FILE_SIZE, "150");
final FileChannel channel = createFileChannel(overrides);
channel.start();
putEvents(channel, "testing-reference-counting", 1, 15);
Transaction tx = channel.getTransaction();
takeWithoutCommit(channel, tx, 10);
forceCheckpoint(channel);
tx.rollback();
//Since we did not commit the original transaction. now we should get 15
//events back.
final Set<String> takenEvents = Sets.newHashSet();
Executors.newSingleThreadExecutor().submit(new Runnable() {
@Override
public void run() {
try {
takenEvents.addAll(takeEvents(channel, 15));
} catch (Exception ex) {
Throwables.propagate(ex);
}
}
}).get();
Assert.assertEquals(15, takenEvents.size());
}
// This test will fail without FLUME-1606.
@Test
public void testRollbackIncompleteTransaction() throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.CHECKPOINT_INTERVAL,
String.valueOf(Integer.MAX_VALUE));
final FileChannel channel = createFileChannel(overrides);
channel.start();
FileBackedTransaction tx = (FileBackedTransaction) channel.getTransaction();
InflightEventWrapper inflightPuts =
field("inflightPuts").ofType(InflightEventWrapper.class).in(
field("queue").ofType(FlumeEventQueue.class).in(tx).get()).get();
tx.begin();
for (int i = 0; i < 100; i++) {
channel.put(EventBuilder.withBody("TestEvent".getBytes()));
}
Assert.assertFalse(inflightPuts.getFileIDs().isEmpty());
Assert.assertFalse(inflightPuts.getInFlightPointers().isEmpty());
tx.rollback();
tx.close();
Assert.assertTrue(inflightPuts.getFileIDs().isEmpty());
Assert.assertTrue(inflightPuts.getInFlightPointers().isEmpty());
Assert.assertTrue(channel.getDepth() == 0);
Set<String> in = putEvents(channel, "testing-rollbacks", 100, 100);
tx = (FileBackedTransaction) channel.getTransaction();
InflightEventWrapper inflightTakes =
field("inflightTakes").ofType(InflightEventWrapper.class).in(
field("queue").ofType(FlumeEventQueue.class).in(tx).get()).get();
tx.begin();
for (int i = 0; i < 100; i++) {
channel.take();
}
Assert.assertFalse(inflightTakes.getFileIDs().isEmpty());
Assert.assertFalse(inflightTakes.getInFlightPointers().isEmpty());
tx.rollback();
tx.close();
Assert.assertTrue(inflightTakes.getFileIDs().isEmpty());
Assert.assertTrue(inflightTakes.getInFlightPointers().isEmpty());
Assert.assertTrue(channel.getDepth() == in.size());
}
@Test (expected = IllegalStateException.class)
public void testChannelDiesOnCorruptEventFsync() throws Exception {
testChannelDiesOnCorruptEvent(true);
}
@Test
public void testChannelDiesOnCorruptEventNoFsync() throws Exception {
testChannelDiesOnCorruptEvent(false);
}
private void testChannelDiesOnCorruptEvent(boolean fsyncPerTxn) throws Exception {
Map<String, String> overrides = new HashMap<String, String>();
overrides.put(FileChannelConfiguration.FSYNC_PER_TXN, String.valueOf(fsyncPerTxn));
final FileChannel channel = createFileChannel(overrides);
channel.start();
putEvents(channel,"test-corrupt-event",100,100);
for (File dataDir : dataDirs) {
File[] files = dataDir.listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
if (!name.endsWith("meta") && !name.contains("lock")) {
return true;
}
return false;
}
});
if (files != null && files.length > 0) {
for (int j = 0; j < files.length; j++) {
RandomAccessFile fileToCorrupt = new RandomAccessFile(files[0], "rw");
fileToCorrupt.seek(50);
fileToCorrupt.writeByte(234);
fileToCorrupt.close();
}
}
}
Set<String> events;
try {
events = consumeChannel(channel, true);
} catch (IllegalStateException ex) {
// The rollback call in takeEvents() in TestUtils will cause an
// IllegalArgumentException - and this should be tested to verify the
// channel is completely stopped.
Assert.assertTrue(ex.getMessage().contains("Log is closed"));
throw ex;
}
if (fsyncPerTxn) {
Assert.fail();
} else {
// The corrupt event must be missing, the rest should be
// returned
Assert.assertEquals(99, events.size());
}
}
@Test
public void testFileChannelCounterIsOpen() {
FileChannel channel = createFileChannel();
FileChannelCounter counter = channel.getChannelCounter();
Assert.assertEquals(counter.isOpen(), false);
channel.start();
Assert.assertEquals(counter.isOpen(), true);
channel.stop();
Assert.assertEquals(counter.isOpen(), false);
}
}
| 9,686 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.io.ByteStreams;
import com.google.common.io.Files;
import com.google.common.io.Resources;
import org.apache.flume.Channel;
import org.apache.flume.ChannelException;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.Transaction;
import org.apache.flume.conf.Configurables;
import org.apache.flume.event.EventBuilder;
import org.junit.Assert;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URL;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.zip.GZIPInputStream;
import static org.fest.reflect.core.Reflection.field;
import static org.fest.reflect.core.Reflection.method;
public class TestUtils {
public static FlumeEvent newPersistableEvent() {
Map<String, String> headers = Maps.newHashMap();
String timestamp = String.valueOf(System.currentTimeMillis());
headers.put("timestamp", timestamp);
FlumeEvent event = new FlumeEvent(headers,
timestamp.getBytes());
return event;
}
public static FlumeEvent newPersistableEvent(int size) {
Map<String, String> headers = Maps.newHashMap();
String timestamp = String.valueOf(System.currentTimeMillis());
headers.put("timestamp", timestamp);
byte[] data = new byte[size];
Arrays.fill(data, (byte) 54);
FlumeEvent event = new FlumeEvent(headers, data);
return event;
}
public static DataInput toDataInput(Writable writable) throws IOException {
ByteArrayOutputStream byteOutput = new ByteArrayOutputStream();
DataOutputStream dataOutput = new DataOutputStream(byteOutput);
writable.write(dataOutput);
ByteArrayInputStream byteInput = new ByteArrayInputStream(byteOutput.toByteArray());
DataInputStream dataInput = new DataInputStream(byteInput);
return dataInput;
}
public static void compareInputAndOut(Set<String> in, Set<String> out) {
Assert.assertNotNull(in);
Assert.assertNotNull(out);
Assert.assertEquals(in.size(), out.size());
Assert.assertTrue(in.equals(out));
}
public static Set<String> putWithoutCommit(Channel channel, Transaction tx,
String prefix, int number) {
Set<String> events = Sets.newHashSet();
tx.begin();
for (int i = 0; i < number; i++) {
String eventData = (prefix + UUID.randomUUID()).toString();
Event event = EventBuilder.withBody(eventData.getBytes());
channel.put(event);
events.add(eventData);
}
return events;
}
public static Set<String> takeWithoutCommit(Channel channel, Transaction tx,
int number) {
Set<String> events = Sets.newHashSet();
tx.begin();
for (int i = 0; i < number; i++) {
Event e = channel.take();
if (e == null) {
break;
}
events.add(new String(e.getBody()));
}
return events;
}
public static List<File> getAllLogs(File[] dataDirs) {
List<File> result = Lists.newArrayList();
for (File dataDir : dataDirs) {
result.addAll(LogUtils.getLogs(dataDir));
}
return result;
}
public static void forceCheckpoint(FileChannel channel) {
// The compiler doesn't know that an IOException can be thrown here so won't let us catch it or
// even check for it normally.
// If the checkpoint backup is in progress, then retry.
String ioeClass = IOException.class.getName();
for (int i = 0; i < 10; ++i) {
try {
doForcedCheckpoint(channel);
break;
} catch (Throwable ioe) {
Throwable cause = ioe.getCause();
if (cause != null && cause.getClass().getName().equals(ioeClass)) {
String message = cause.getMessage();
if (message != null && message.startsWith("Previous backup")) {
try {
Thread.sleep(200);
} catch (InterruptedException ex) {
// Ignore it.
}
continue;
}
}
throw ioe;
}
}
}
public static void doForcedCheckpoint(FileChannel channel) {
Log log = field("log")
.ofType(Log.class)
.in(channel)
.get();
Assert.assertTrue("writeCheckpoint returned false",
method("writeCheckpoint")
.withReturnType(Boolean.class)
.withParameterTypes(Boolean.class)
.in(log)
.invoke(true));
}
public static Set<String> takeEvents(Channel channel, int batchSize) throws Exception {
return takeEvents(channel, batchSize, false);
}
public static Set<String> takeEvents(Channel channel, int batchSize, boolean checkForCorruption)
throws Exception {
return takeEvents(channel, batchSize, Integer.MAX_VALUE, checkForCorruption);
}
public static Set<String> takeEvents(Channel channel, int batchSize, int numEvents)
throws Exception {
return takeEvents(channel, batchSize, numEvents, false);
}
public static Set<String> takeEvents(Channel channel, int batchSize, int numEvents,
boolean checkForCorruption) throws Exception {
Set<String> result = Sets.newHashSet();
for (int i = 0; i < numEvents; i += batchSize) {
Transaction transaction = channel.getTransaction();
try {
transaction.begin();
for (int j = 0; j < batchSize; j++) {
Event event;
try {
event = channel.take();
} catch (ChannelException ex) {
Throwable th = ex;
String msg;
if (checkForCorruption) {
msg = "Corrupt event found. Please run File Channel";
th = ex.getCause();
} else {
msg = "Take list for FileBackedTransaction, capacity";
}
Assert.assertTrue(th.getMessage().startsWith(msg));
if (checkForCorruption) {
throw (Exception)th;
}
transaction.commit();
return result;
}
if (event == null) {
transaction.commit();
return result;
}
result.add(new String(event.getBody(), Charsets.UTF_8));
}
transaction.commit();
} catch (Throwable ex) {
transaction.rollback();
throw new RuntimeException(ex);
} finally {
transaction.close();
}
}
return result;
}
public static Set<String> consumeChannel(Channel channel) throws Exception {
return consumeChannel(channel, false);
}
public static Set<String> consumeChannel(Channel channel, boolean checkForCorruption)
throws Exception {
Set<String> result = Sets.newHashSet();
int[] batchSizes = new int[] {
1000, 100, 10, 1
};
for (int i = 0; i < batchSizes.length; i++) {
while (true) {
Set<String> batch = takeEvents(channel, batchSizes[i], checkForCorruption);
if (batch.isEmpty()) {
break;
}
result.addAll(batch);
}
}
return result;
}
public static Set<String> fillChannel(Channel channel, String prefix) throws Exception {
Set<String> result = Sets.newHashSet();
int[] batchSizes = new int[] {
1000, 100, 10, 1
};
for (int i = 0; i < batchSizes.length; i++) {
try {
while (true) {
Set<String> batch = putEvents(channel, prefix, batchSizes[i], Integer.MAX_VALUE, true);
if (batch.isEmpty()) {
break;
}
result.addAll(batch);
}
} catch (ChannelException e) {
Assert.assertTrue(("The channel has reached it's capacity. This might "
+ "be the result of a sink on the channel having too low of batch "
+ "size, a downstream system running slower than normal, or that "
+ "the channel capacity is just too low. [channel="
+ channel.getName() + "]").equals(e.getMessage())
|| e.getMessage().startsWith("Put queue for FileBackedTransaction of capacity "));
}
}
return result;
}
public static Set<String> putEvents(Channel channel, String prefix, int batchSize, int numEvents)
throws Exception {
return putEvents(channel, prefix, batchSize, numEvents, false);
}
public static Set<String> putEvents(Channel channel, String prefix, int batchSize, int numEvents,
boolean untilCapacityIsReached) throws Exception {
Set<String> result = Sets.newHashSet();
for (int i = 0; i < numEvents; i += batchSize) {
Transaction transaction = channel.getTransaction();
transaction.begin();
try {
Set<String> batch = Sets.newHashSet();
for (int j = 0; j < batchSize; j++) {
String s = prefix + "-" + i + "-" + j + "-" + UUID.randomUUID();
Event event = EventBuilder.withBody(s.getBytes(Charsets.UTF_8));
channel.put(event);
batch.add(s);
}
transaction.commit();
result.addAll(batch);
} catch (Exception ex) {
transaction.rollback();
if (untilCapacityIsReached && ex instanceof ChannelException &&
("The channel has reached it's capacity. "
+ "This might be the result of a sink on the channel having too "
+ "low of batch size, a downstream system running slower than "
+ "normal, or that the channel capacity is just too low. "
+ "[channel=" + channel.getName() + "]").equals(ex.getMessage())) {
break;
}
throw ex;
} finally {
transaction.close();
}
}
return result;
}
public static void copyDecompressed(String resource, File output)
throws IOException {
URL input = Resources.getResource(resource);
FileOutputStream fos = new FileOutputStream(output);
GZIPInputStream gzis = new GZIPInputStream(input.openStream());
ByteStreams.copy(gzis, fos);
fos.close();
gzis.close();
}
public static Context createFileChannelContext(String checkpointDir, String dataDir,
String backupDir, Map<String, String> overrides) {
Context context = new Context();
context.put(FileChannelConfiguration.CHECKPOINT_DIR, checkpointDir);
if (backupDir != null) {
context.put(FileChannelConfiguration.BACKUP_CHECKPOINT_DIR, backupDir);
}
context.put(FileChannelConfiguration.DATA_DIRS, dataDir);
context.put(FileChannelConfiguration.KEEP_ALIVE, String.valueOf(1));
context.put(FileChannelConfiguration.CAPACITY, String.valueOf(10000));
context.putAll(overrides);
return context;
}
public static FileChannel createFileChannel(String checkpointDir, String dataDir,
Map<String, String> overrides) {
return createFileChannel(checkpointDir, dataDir, null, overrides);
}
public static FileChannel createFileChannel(String checkpointDir, String dataDir,
String backupDir, Map<String, String> overrides) {
FileChannel channel = new FileChannel();
channel.setName("FileChannel-" + UUID.randomUUID());
Context context = createFileChannelContext(checkpointDir, dataDir, backupDir, overrides);
Configurables.configure(channel, context);
return channel;
}
public static File writeStringToFile(File baseDir, String name, String text) throws IOException {
File passwordFile = new File(baseDir, name);
Files.write(text, passwordFile, Charsets.UTF_8);
return passwordFile;
}
}
| 9,687 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestFlumeEvent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.io.IOException;
import java.util.Arrays;
import java.util.Map;
import junit.framework.Assert;
import org.junit.Test;
import com.google.common.base.Charsets;
import com.google.common.collect.Maps;
public class TestFlumeEvent {
@Test
public void testBasics() {
Map<String, String> headers = Maps.newHashMap();
headers.put("key", "value");
byte[] body = "flume".getBytes(Charsets.UTF_8);
FlumeEvent event = new FlumeEvent(headers, body);
Assert.assertEquals(headers, event.getHeaders());
Assert.assertTrue(Arrays.equals(body, event.getBody()));
}
@Test
public void testSerialization() throws IOException {
Map<String, String> headers = Maps.newHashMap();
headers.put("key", "value");
byte[] body = "flume".getBytes(Charsets.UTF_8);
FlumeEvent in = new FlumeEvent(headers, body);
FlumeEvent out = FlumeEvent.from(TestUtils.toDataInput(in));
Assert.assertEquals(headers, out.getHeaders());
Assert.assertTrue(Arrays.equals(body, out.getBody()));
in.setHeaders(null);
in.setBody(null);
out = FlumeEvent.from(TestUtils.toDataInput(in));
Assert.assertEquals(Maps.newHashMap(), out.getHeaders());
Assert.assertNull(out.getBody());
}
}
| 9,688 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestFileChannelRestart.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.io.Files;
import org.apache.commons.io.FileUtils;
import org.apache.flume.Transaction;
import org.apache.flume.channel.file.proto.ProtosFactory;
import org.fest.reflect.exception.ReflectionError;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.Executors;
import static org.apache.flume.channel.file.TestUtils.compareInputAndOut;
import static org.apache.flume.channel.file.TestUtils.consumeChannel;
import static org.apache.flume.channel.file.TestUtils.fillChannel;
import static org.apache.flume.channel.file.TestUtils.forceCheckpoint;
import static org.apache.flume.channel.file.TestUtils.doForcedCheckpoint;
import static org.apache.flume.channel.file.TestUtils.putEvents;
import static org.apache.flume.channel.file.TestUtils.putWithoutCommit;
import static org.apache.flume.channel.file.TestUtils.takeEvents;
import static org.apache.flume.channel.file.TestUtils.takeWithoutCommit;
import static org.fest.reflect.core.Reflection.*;
public class TestFileChannelRestart extends TestFileChannelBase {
protected static final Logger LOG = LoggerFactory.getLogger(TestFileChannelRestart.class);
@Before
public void setup() throws Exception {
super.setup();
}
@After
public void teardown() {
super.teardown();
}
@Override
protected FileChannel createFileChannel(Map<String, String> overrides) {
// FLUME-2482, making sure scheduled checkpoint never gets called
overrides.put(FileChannelConfiguration.CHECKPOINT_INTERVAL, "6000000");
return TestUtils.createFileChannel(checkpointDir.getAbsolutePath(), dataDir,
backupDir.getAbsolutePath(), overrides);
}
@Test
public void testRestartLogReplayV1() throws Exception {
doTestRestart(true, false, false, false);
}
@Test
public void testRestartLogReplayV2() throws Exception {
doTestRestart(false, false, false, false);
}
@Test
public void testFastReplayV1() throws Exception {
doTestRestart(true, true, true, true);
}
@Test
public void testFastReplayV2() throws Exception {
doTestRestart(false, true, true, true);
}
@Test
public void testFastReplayNegativeTestV1() throws Exception {
doTestRestart(true, true, false, true);
}
@Test
public void testFastReplayNegativeTestV2() throws Exception {
doTestRestart(false, true, false, true);
}
@Test
public void testNormalReplayV1() throws Exception {
doTestRestart(true, true, true, false);
}
@Test
public void testNormalReplayV2() throws Exception {
doTestRestart(false, true, true, false);
}
public void doTestRestart(boolean useLogReplayV1,
boolean forceCheckpoint, boolean deleteCheckpoint,
boolean useFastReplay) throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.USE_LOG_REPLAY_V1,
String.valueOf(useLogReplayV1));
overrides.put(
FileChannelConfiguration.USE_FAST_REPLAY,
String.valueOf(useFastReplay));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = fillChannel(channel, "restart");
if (forceCheckpoint) {
forceCheckpoint(channel);
}
channel.stop();
if (deleteCheckpoint) {
File checkpoint = new File(checkpointDir, "checkpoint");
Assert.assertTrue(checkpoint.delete());
File checkpointMetaData = Serialization.getMetaDataFile(checkpoint);
Assert.assertTrue(checkpointMetaData.delete());
}
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> out = consumeChannel(channel);
compareInputAndOut(in, out);
}
@Test
public void testRestartWhenMetaDataExistsButCheckpointDoesNot() throws Exception {
doTestRestartWhenMetaDataExistsButCheckpointDoesNot(false);
}
@Test
public void testRestartWhenMetaDataExistsButCheckpointDoesNotWithBackup() throws Exception {
doTestRestartWhenMetaDataExistsButCheckpointDoesNot(true);
}
private void doTestRestartWhenMetaDataExistsButCheckpointDoesNot(boolean backup)
throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS, String.valueOf(backup));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = putEvents(channel, "restart", 10, 100);
Assert.assertEquals(100, in.size());
forceCheckpoint(channel);
if (backup) {
Thread.sleep(2000);
}
channel.stop();
File checkpoint = new File(checkpointDir, "checkpoint");
Assert.assertTrue(checkpoint.delete());
File checkpointMetaData = Serialization.getMetaDataFile(checkpoint);
Assert.assertTrue(checkpointMetaData.exists());
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Assert.assertTrue(checkpoint.exists());
Assert.assertTrue(checkpointMetaData.exists());
Assert.assertTrue(!backup || channel.checkpointBackupRestored());
Set<String> out = consumeChannel(channel);
compareInputAndOut(in, out);
}
@Test
public void testRestartWhenCheckpointExistsButMetaDoesNot() throws Exception {
doTestRestartWhenCheckpointExistsButMetaDoesNot(false);
}
@Test
public void testRestartWhenCheckpointExistsButMetaDoesNotWithBackup() throws Exception {
doTestRestartWhenCheckpointExistsButMetaDoesNot(true);
}
private void doTestRestartWhenCheckpointExistsButMetaDoesNot(boolean backup) throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS, String.valueOf(backup));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = putEvents(channel, "restart", 10, 100);
Assert.assertEquals(100, in.size());
forceCheckpoint(channel);
if (backup) {
Thread.sleep(2000);
}
channel.stop();
File checkpoint = new File(checkpointDir, "checkpoint");
File checkpointMetaData = Serialization.getMetaDataFile(checkpoint);
Assert.assertTrue(checkpointMetaData.delete());
Assert.assertTrue(checkpoint.exists());
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Assert.assertTrue(checkpoint.exists());
Assert.assertTrue(checkpointMetaData.exists());
Assert.assertTrue(!backup || channel.checkpointBackupRestored());
Set<String> out = consumeChannel(channel);
compareInputAndOut(in, out);
}
@Test
public void testRestartWhenNoCheckpointExists() throws Exception {
doTestRestartWhenNoCheckpointExists(false);
}
@Test
public void testRestartWhenNoCheckpointExistsWithBackup() throws Exception {
doTestRestartWhenNoCheckpointExists(true);
}
private void doTestRestartWhenNoCheckpointExists(boolean backup) throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS, String.valueOf(backup));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = putEvents(channel, "restart", 10, 100);
Assert.assertEquals(100, in.size());
forceCheckpoint(channel);
if (backup) {
Thread.sleep(2000);
}
channel.stop();
File checkpoint = new File(checkpointDir, "checkpoint");
File checkpointMetaData = Serialization.getMetaDataFile(checkpoint);
Assert.assertTrue(checkpointMetaData.delete());
Assert.assertTrue(checkpoint.delete());
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Assert.assertTrue(checkpoint.exists());
Assert.assertTrue(checkpointMetaData.exists());
Assert.assertTrue(!backup || channel.checkpointBackupRestored());
Set<String> out = consumeChannel(channel);
compareInputAndOut(in, out);
}
@Test
public void testBadCheckpointVersion() throws Exception {
doTestBadCheckpointVersion(false);
}
@Test
public void testBadCheckpointVersionWithBackup() throws Exception {
doTestBadCheckpointVersion(true);
}
private void doTestBadCheckpointVersion(boolean backup) throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS, String.valueOf(backup));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = putEvents(channel, "restart", 10, 100);
Assert.assertEquals(100, in.size());
forceCheckpoint(channel);
if (backup) {
Thread.sleep(2000);
}
channel.stop();
File checkpoint = new File(checkpointDir, "checkpoint");
RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw");
writer.seek(EventQueueBackingStoreFile.INDEX_VERSION *
Serialization.SIZE_OF_LONG);
writer.writeLong(2L);
writer.getFD().sync();
writer.close();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Assert.assertTrue(!backup || channel.checkpointBackupRestored());
Set<String> out = consumeChannel(channel);
compareInputAndOut(in, out);
}
@Test
public void testBadCheckpointMetaVersion() throws Exception {
doTestBadCheckpointMetaVersion(false);
}
@Test
public void testBadCheckpointMetaVersionWithBackup() throws Exception {
doTestBadCheckpointMetaVersion(true);
}
private void doTestBadCheckpointMetaVersion(boolean backup) throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS, String.valueOf(backup));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = putEvents(channel, "restart", 10, 100);
Assert.assertEquals(100, in.size());
forceCheckpoint(channel);
if (backup) {
Thread.sleep(2000);
}
channel.stop();
File checkpoint = new File(checkpointDir, "checkpoint");
FileInputStream is = new FileInputStream(Serialization.getMetaDataFile(checkpoint));
ProtosFactory.Checkpoint meta = ProtosFactory.Checkpoint.parseDelimitedFrom(is);
Assert.assertNotNull(meta);
is.close();
FileOutputStream os = new FileOutputStream(
Serialization.getMetaDataFile(checkpoint));
meta.toBuilder().setVersion(2).build().writeDelimitedTo(os);
os.flush();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Assert.assertTrue(!backup || channel.checkpointBackupRestored());
Set<String> out = consumeChannel(channel);
compareInputAndOut(in, out);
}
@Test
public void testDifferingOrderIDCheckpointAndMetaVersion() throws Exception {
doTestDifferingOrderIDCheckpointAndMetaVersion(false);
}
@Test
public void testDifferingOrderIDCheckpointAndMetaVersionWithBackup() throws Exception {
doTestDifferingOrderIDCheckpointAndMetaVersion(true);
}
private void doTestDifferingOrderIDCheckpointAndMetaVersion(boolean backup) throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS, String.valueOf(backup));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = putEvents(channel, "restart", 10, 100);
Assert.assertEquals(100, in.size());
forceCheckpoint(channel);
if (backup) {
Thread.sleep(2000);
}
channel.stop();
File checkpoint = new File(checkpointDir, "checkpoint");
FileInputStream is = new FileInputStream(Serialization.getMetaDataFile(checkpoint));
ProtosFactory.Checkpoint meta = ProtosFactory.Checkpoint.parseDelimitedFrom(is);
Assert.assertNotNull(meta);
is.close();
FileOutputStream os = new FileOutputStream(
Serialization.getMetaDataFile(checkpoint));
meta.toBuilder().setWriteOrderID(12).build().writeDelimitedTo(os);
os.flush();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Assert.assertTrue(!backup || channel.checkpointBackupRestored());
Set<String> out = consumeChannel(channel);
compareInputAndOut(in, out);
}
@Test
public void testIncompleteCheckpoint() throws Exception {
doTestIncompleteCheckpoint(false);
}
@Test
public void testIncompleteCheckpointWithCheckpoint() throws Exception {
doTestIncompleteCheckpoint(true);
}
private void doTestIncompleteCheckpoint(boolean backup) throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS, String.valueOf(backup));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = putEvents(channel, "restart", 10, 100);
Assert.assertEquals(100, in.size());
forceCheckpoint(channel);
if (backup) {
Thread.sleep(2000);
}
channel.stop();
File checkpoint = new File(checkpointDir, "checkpoint");
RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw");
writer.seek(EventQueueBackingStoreFile.INDEX_CHECKPOINT_MARKER
* Serialization.SIZE_OF_LONG);
writer.writeLong(EventQueueBackingStoreFile.CHECKPOINT_INCOMPLETE);
writer.getFD().sync();
writer.close();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Assert.assertTrue(!backup || channel.checkpointBackupRestored());
Set<String> out = consumeChannel(channel);
compareInputAndOut(in, out);
}
@Test
public void testCorruptInflightPuts() throws Exception {
doTestCorruptInflights("inflightputs", false);
}
@Test
public void testCorruptInflightPutsWithBackup() throws Exception {
doTestCorruptInflights("inflightputs", true);
}
@Test
public void testCorruptInflightTakes() throws Exception {
doTestCorruptInflights("inflighttakes", false);
}
@Test
public void testCorruptInflightTakesWithBackup() throws Exception {
doTestCorruptInflights("inflighttakes", true);
}
@Test
public void testFastReplayWithCheckpoint() throws Exception {
testFastReplay(false, true);
}
@Test
public void testFastReplayWithBadCheckpoint() throws Exception {
testFastReplay(true, true);
}
@Test
public void testNoFastReplayWithCheckpoint() throws Exception {
testFastReplay(false, false);
}
@Test
public void testNoFastReplayWithBadCheckpoint() throws Exception {
testFastReplay(true, false);
}
private void testFastReplay(boolean shouldCorruptCheckpoint, boolean useFastReplay)
throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.USE_FAST_REPLAY,
String.valueOf(useFastReplay));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = putEvents(channel, "restart", 10, 100);
Assert.assertEquals(100, in.size());
forceCheckpoint(channel);
channel.stop();
if (shouldCorruptCheckpoint) {
File checkpoint = new File(checkpointDir, "checkpoint");
RandomAccessFile writer = new RandomAccessFile(
Serialization.getMetaDataFile(checkpoint),
"rw");
writer.seek(10);
writer.writeLong(new Random().nextLong());
writer.getFD().sync();
writer.close();
}
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> out = consumeChannel(channel);
if (useFastReplay && shouldCorruptCheckpoint) {
Assert.assertTrue(channel.didFastReplay());
} else {
Assert.assertFalse(channel.didFastReplay());
}
compareInputAndOut(in, out);
}
private void doTestCorruptInflights(String name, boolean backup) throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS, String.valueOf(backup));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
final Set<String> in1 = putEvents(channel, "restart-", 10, 100);
Assert.assertEquals(100, in1.size());
Executors.newSingleThreadScheduledExecutor().submit(new Runnable() {
@Override
public void run() {
Transaction tx = channel.getTransaction();
Set<String> out1 = takeWithoutCommit(channel, tx, 100);
Assert.assertEquals(100, out1.size());
}
});
Transaction tx = channel.getTransaction();
Set<String> in2 = putWithoutCommit(channel, tx, "restart", 100);
Assert.assertEquals(100, in2.size());
forceCheckpoint(channel);
if (backup) {
Thread.sleep(2000);
}
tx.commit();
tx.close();
channel.stop();
File inflight = new File(checkpointDir, name);
RandomAccessFile writer = new RandomAccessFile(inflight, "rw");
writer.write(new Random().nextInt());
writer.close();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Assert.assertTrue(!backup || channel.checkpointBackupRestored());
Set<String> out = consumeChannel(channel);
in1.addAll(in2);
compareInputAndOut(in1, out);
}
@Test
public void testTruncatedCheckpointMeta() throws Exception {
doTestTruncatedCheckpointMeta(false);
}
@Test
public void testTruncatedCheckpointMetaWithBackup() throws Exception {
doTestTruncatedCheckpointMeta(true);
}
private void doTestTruncatedCheckpointMeta(boolean backup) throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS, String.valueOf(backup));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = putEvents(channel, "restart", 10, 100);
Assert.assertEquals(100, in.size());
forceCheckpoint(channel);
if (backup) {
Thread.sleep(2000);
}
channel.stop();
File checkpoint = new File(checkpointDir, "checkpoint");
RandomAccessFile writer = new RandomAccessFile(Serialization.getMetaDataFile(checkpoint), "rw");
writer.setLength(0);
writer.getFD().sync();
writer.close();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Assert.assertTrue(!backup || channel.checkpointBackupRestored());
Set<String> out = consumeChannel(channel);
compareInputAndOut(in, out);
}
@Test
public void testCorruptCheckpointMeta() throws Exception {
doTestCorruptCheckpointMeta(false);
}
@Test
public void testCorruptCheckpointMetaWithBackup() throws Exception {
doTestCorruptCheckpointMeta(true);
}
private void doTestCorruptCheckpointMeta(boolean backup) throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS, String.valueOf(backup));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = putEvents(channel, "restart", 10, 100);
Assert.assertEquals(100, in.size());
forceCheckpoint(channel);
if (backup) {
Thread.sleep(2000);
}
channel.stop();
File checkpoint = new File(checkpointDir, "checkpoint");
RandomAccessFile writer = new RandomAccessFile(Serialization.getMetaDataFile(checkpoint), "rw");
writer.seek(10);
writer.writeLong(new Random().nextLong());
writer.getFD().sync();
writer.close();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Assert.assertTrue(!backup || channel.checkpointBackupRestored());
Set<String> out = consumeChannel(channel);
compareInputAndOut(in, out);
}
private void checkIfBackupUsed(boolean backup) {
boolean backupRestored = channel.checkpointBackupRestored();
if (backup) {
Assert.assertTrue(backupRestored);
} else {
Assert.assertFalse(backupRestored);
}
}
//This test will fail without FLUME-1893
@Test
public void testCorruptCheckpointVersionMostSignificant4Bytes() throws Exception {
Map<String, String> overrides = Maps.newHashMap();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = putEvents(channel, "restart", 10, 100);
Assert.assertEquals(100, in.size());
forceCheckpoint(channel);
channel.stop();
File checkpoint = new File(checkpointDir, "checkpoint");
RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw");
writer.seek(EventQueueBackingStoreFile.INDEX_VERSION *
Serialization.SIZE_OF_LONG);
writer.write(new byte[] { (byte) 1, (byte) 5 });
writer.getFD().sync();
writer.close();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> out = consumeChannel(channel);
Assert.assertTrue(channel.didFullReplayDueToBadCheckpointException());
compareInputAndOut(in, out);
}
//This test will fail without FLUME-1893
@Test
public void testCorruptCheckpointCompleteMarkerMostSignificant4Bytes() throws Exception {
Map<String, String> overrides = Maps.newHashMap();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = putEvents(channel, "restart", 10, 100);
Assert.assertEquals(100, in.size());
forceCheckpoint(channel);
channel.stop();
File checkpoint = new File(checkpointDir, "checkpoint");
RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw");
writer.seek(EventQueueBackingStoreFile.INDEX_CHECKPOINT_MARKER *
Serialization.SIZE_OF_LONG);
writer.write(new byte[] { (byte) 1, (byte) 5 });
writer.getFD().sync();
writer.close();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> out = consumeChannel(channel);
Assert.assertTrue(channel.didFullReplayDueToBadCheckpointException());
compareInputAndOut(in, out);
}
@Test
public void testWithExtraLogs() throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.CAPACITY, "10");
overrides.put(FileChannelConfiguration.TRANSACTION_CAPACITY, "10");
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = fillChannel(channel, "extralogs");
for (int i = 0; i < dataDirs.length; i++) {
File file = new File(dataDirs[i], Log.PREFIX + (1000 + i));
Assert.assertTrue(file.createNewFile());
Assert.assertTrue(file.length() == 0);
File metaDataFile = Serialization.getMetaDataFile(file);
File metaDataTempFile = Serialization.getMetaDataTempFile(metaDataFile);
Assert.assertTrue(metaDataTempFile.createNewFile());
}
channel.stop();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> out = consumeChannel(channel);
compareInputAndOut(in, out);
}
// Make sure the entire channel was not replayed, only the events from the
// backup.
@Test
public void testBackupUsedEnsureNoFullReplayWithoutCompression() throws Exception {
testBackupUsedEnsureNoFullReplay(false);
}
@Test
public void testBackupUsedEnsureNoFullReplayWithCompression() throws Exception {
testBackupUsedEnsureNoFullReplay(true);
}
private void testBackupUsedEnsureNoFullReplay(boolean compressedBackup)
throws Exception {
File dataDir = Files.createTempDir();
File tempBackup = Files.createTempDir();
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.DATA_DIRS, dataDir.getAbsolutePath());
overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS, "true");
overrides.put(FileChannelConfiguration.COMPRESS_BACKUP_CHECKPOINT,
String.valueOf(compressedBackup));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = putEvents(channel, "restart", 10, 100);
Assert.assertEquals(100, in.size());
Thread.sleep(5000);
forceCheckpoint(channel);
Thread.sleep(5000);
in = putEvents(channel, "restart", 10, 100);
takeEvents(channel, 10, 100);
Assert.assertEquals(100, in.size());
for (File file : backupDir.listFiles()) {
if (file.getName().equals(Log.FILE_LOCK)) {
continue;
}
Files.copy(file, new File(tempBackup, file.getName()));
}
forceCheckpoint(channel);
channel.stop();
Serialization.deleteAllFiles(checkpointDir, Log.EXCLUDES);
// The last checkpoint may have been already backed up (it did while I
// was running this test, since the checkpoint itself is tiny in unit
// tests), so throw away the backup and force the use of an older backup by
// bringing in the copy of the last backup before the checkpoint.
Serialization.deleteAllFiles(backupDir, Log.EXCLUDES);
for (File file : tempBackup.listFiles()) {
if (file.getName().equals(Log.FILE_LOCK)) {
continue;
}
Files.copy(file, new File(backupDir, file.getName()));
}
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
checkIfBackupUsed(true);
Assert.assertEquals(100, channel.getLog().getPutCount());
Assert.assertEquals(20, channel.getLog().getCommittedCount());
Assert.assertEquals(100, channel.getLog().getTakeCount());
Assert.assertEquals(0, channel.getLog().getRollbackCount());
//Read Count = 100 puts + 10 commits + 100 takes + 10 commits
Assert.assertEquals(220, channel.getLog().getReadCount());
consumeChannel(channel);
FileUtils.deleteQuietly(dataDir);
FileUtils.deleteQuietly(tempBackup);
}
//Make sure data files required by the backup checkpoint are not deleted.
@Test
public void testDataFilesRequiredByBackupNotDeleted() throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS, "true");
overrides.put(FileChannelConfiguration.MAX_FILE_SIZE, "1000");
channel = createFileChannel(overrides);
channel.start();
String prefix = "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz";
Assert.assertTrue(channel.isOpen());
putEvents(channel, prefix, 10, 100);
Set<String> origFiles = Sets.newHashSet();
for (File dir : dataDirs) {
origFiles.addAll(Lists.newArrayList(dir.list()));
}
forceCheckpoint(channel);
takeEvents(channel, 10, 50);
long beforeSecondCheckpoint = System.currentTimeMillis();
forceCheckpoint(channel);
Set<String> newFiles = Sets.newHashSet();
int olderThanCheckpoint = 0;
int totalMetaFiles = 0;
for (File dir : dataDirs) {
File[] metadataFiles = dir.listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
if (name.endsWith(".meta")) {
return true;
}
return false;
}
});
totalMetaFiles = metadataFiles.length;
for (File metadataFile : metadataFiles) {
if (metadataFile.lastModified() < beforeSecondCheckpoint) {
olderThanCheckpoint++;
}
}
newFiles.addAll(Lists.newArrayList(dir.list()));
}
/*
* Files which are not required by the new checkpoint should not have been
* modified by the checkpoint.
*/
Assert.assertTrue(olderThanCheckpoint > 0);
Assert.assertTrue(totalMetaFiles != olderThanCheckpoint);
/*
* All files needed by original checkpoint should still be there.
*/
Assert.assertTrue(newFiles.containsAll(origFiles));
takeEvents(channel, 10, 50);
forceCheckpoint(channel);
newFiles = Sets.newHashSet();
for (File dir : dataDirs) {
newFiles.addAll(Lists.newArrayList(dir.list()));
}
Assert.assertTrue(!newFiles.containsAll(origFiles));
}
@Test(expected = IOException.class)
public void testSlowBackup() throws Throwable {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS, "true");
overrides.put(FileChannelConfiguration.MAX_FILE_SIZE, "1000");
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = putEvents(channel, "restart", 10, 100);
Assert.assertEquals(100, in.size());
slowdownBackup(channel);
doForcedCheckpoint(channel);
in = putEvents(channel, "restart", 10, 100);
takeEvents(channel, 10, 100);
Assert.assertEquals(100, in.size());
try {
doForcedCheckpoint(channel);
} catch (ReflectionError ex) {
throw ex.getCause();
} finally {
channel.stop();
}
}
@Test
public void testCompressBackup() throws Throwable {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS,
"true");
overrides.put(FileChannelConfiguration.MAX_FILE_SIZE, "1000");
overrides.put(FileChannelConfiguration.COMPRESS_BACKUP_CHECKPOINT,
"true");
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
putEvents(channel, "restart", 10, 100);
forceCheckpoint(channel);
//Wait for the backup checkpoint
Thread.sleep(2000);
Assert.assertTrue(compressedBackupCheckpoint.exists());
Serialization.decompressFile(compressedBackupCheckpoint, uncompressedBackupCheckpoint);
File checkpoint = new File(checkpointDir, "checkpoint");
Assert.assertTrue(FileUtils.contentEquals(checkpoint, uncompressedBackupCheckpoint));
channel.stop();
}
@Test
public void testToggleCheckpointCompressionFromTrueToFalse()
throws Exception {
restartToggleCompression(true);
}
@Test
public void testToggleCheckpointCompressionFromFalseToTrue()
throws Exception {
restartToggleCompression(false);
}
public void restartToggleCompression(boolean originalCheckpointCompressed)
throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS,
"true");
overrides.put(FileChannelConfiguration.MAX_FILE_SIZE, "1000");
overrides.put(FileChannelConfiguration.TRANSACTION_CAPACITY, "1000");
overrides.put(FileChannelConfiguration.CAPACITY, "1000");
overrides.put(FileChannelConfiguration.COMPRESS_BACKUP_CHECKPOINT,
String.valueOf(originalCheckpointCompressed));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> in = fillChannel(channel, "restart");
forceCheckpoint(channel);
Thread.sleep(2000);
Assert.assertEquals(compressedBackupCheckpoint.exists(),
originalCheckpointCompressed);
Assert.assertEquals(uncompressedBackupCheckpoint.exists(),
!originalCheckpointCompressed);
channel.stop();
File checkpoint = new File(checkpointDir, "checkpoint");
Assert.assertTrue(checkpoint.delete());
File checkpointMetaData = Serialization.getMetaDataFile(
checkpoint);
Assert.assertTrue(checkpointMetaData.delete());
overrides.put(FileChannelConfiguration.COMPRESS_BACKUP_CHECKPOINT,
String.valueOf(!originalCheckpointCompressed));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> out = consumeChannel(channel);
compareInputAndOut(in, out);
forceCheckpoint(channel);
Thread.sleep(2000);
Assert.assertEquals(compressedBackupCheckpoint.exists(),
!originalCheckpointCompressed);
Assert.assertEquals(uncompressedBackupCheckpoint.exists(),
originalCheckpointCompressed);
}
private static void slowdownBackup(FileChannel channel) {
Log log = field("log").ofType(Log.class).in(channel).get();
FlumeEventQueue queue = field("queue")
.ofType(FlumeEventQueue.class)
.in(log).get();
EventQueueBackingStore backingStore = field("backingStore")
.ofType(EventQueueBackingStore.class)
.in(queue).get();
field("slowdownBackup").ofType(Boolean.class).in(backingStore).set(true);
}
}
| 9,689 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestFileChannelErrorMetrics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import junit.framework.Assert;
import org.apache.commons.io.FileUtils;
import org.apache.flume.ChannelException;
import org.apache.flume.Transaction;
import org.apache.flume.channel.file.instrumentation.FileChannelCounter;
import org.apache.flume.event.EventBuilder;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import static junit.framework.Assert.assertEquals;
import static junit.framework.Assert.assertFalse;
import static junit.framework.Assert.assertNotNull;
import static junit.framework.Assert.assertTrue;
public class TestFileChannelErrorMetrics extends TestFileChannelBase {
public TestFileChannelErrorMetrics() {
// use only 1 data directory in order to make it simpler to edit the data files
// in testCorruptEventTaken() and testUnhealthy() methods
super(1);
}
/**
* This tests multiple successful and failed put and take operations
* and checks the values of the channel's counters.
*/
@Test
public void testEventTakePutErrorCount() throws Exception {
final long usableSpaceRefreshInterval = 1;
FileChannel channel = Mockito.spy(createFileChannel());
Mockito.when(channel.createLogBuilder()).then(new Answer<Log.Builder>() {
@Override
public Log.Builder answer(InvocationOnMock invocation) throws Throwable {
Log.Builder ret = (Log.Builder) invocation.callRealMethod();
ret.setUsableSpaceRefreshInterval(usableSpaceRefreshInterval);
return ret;
}
});
channel.start();
FileChannelCounter channelCounter = channel.getChannelCounter();
Transaction tx = channel.getTransaction();
tx.begin();
channel.put(EventBuilder.withBody("test1".getBytes()));
channel.put(EventBuilder.withBody("test2".getBytes()));
tx.commit();
tx.close();
assertEquals(2, channelCounter.getEventPutAttemptCount());
assertEquals(2, channelCounter.getEventPutSuccessCount());
assertEquals(0, channelCounter.getEventPutErrorCount());
tx = channel.getTransaction();
tx.begin();
channel.take();
tx.commit();
tx.close();
assertEquals(1, channelCounter.getEventTakeAttemptCount());
assertEquals(1, channelCounter.getEventTakeSuccessCount());
assertEquals(0, channelCounter.getEventTakeErrorCount());
FileUtils.deleteDirectory(baseDir);
Thread.sleep(2 * usableSpaceRefreshInterval);
tx = channel.getTransaction();
tx.begin();
ChannelException putException = null;
try {
channel.put(EventBuilder.withBody("test".getBytes()));
} catch (ChannelException ex) {
putException = ex;
}
assertNotNull(putException);
assertTrue(putException.getCause() instanceof IOException);
assertEquals(3, channelCounter.getEventPutAttemptCount());
assertEquals(2, channelCounter.getEventPutSuccessCount());
assertEquals(1, channelCounter.getEventPutErrorCount());
ChannelException takeException = null;
try {
channel.take(); // This is guaranteed to throw an error if the above put() threw an error.
} catch (ChannelException ex) {
takeException = ex;
}
assertNotNull(takeException);
assertTrue(takeException.getCause() instanceof IOException);
assertEquals(2, channelCounter.getEventTakeAttemptCount());
assertEquals(1, channelCounter.getEventTakeSuccessCount());
assertEquals(1, channelCounter.getEventTakeErrorCount());
}
/**
* Test the FileChannelCounter.eventTakeErrorCount value if the data file
* contains an invalid record thus CorruptEventException is thrown during
* the take() operation.
* The first byte of the record (= the first byte of the file in this case)
* is the operation byte, changing it to an unexpected value will cause the
* CorruptEventException to be thrown.
*/
@Test
public void testCorruptEventTaken() throws Exception {
FileChannel channel = createFileChannel(
Collections.singletonMap(FileChannelConfiguration.FSYNC_PER_TXN, "false"));
channel.start();
FileChannelCounter channelCounter = channel.getChannelCounter();
Transaction tx = channel.getTransaction();
tx.begin();
channel.put(EventBuilder.withBody("test".getBytes()));
tx.commit();
tx.close();
byte[] data = FileUtils.readFileToByteArray(new File(dataDirs[0], "log-1"));
data[0] = LogFile.OP_EOF; // change the first (operation) byte to unexpected value
FileUtils.writeByteArrayToFile(new File(dataDirs[0], "log-1"), data);
tx = channel.getTransaction();
tx.begin();
try {
channel.take();
} catch (Throwable t) {
// If fsyncPerTransaction is false then Log.get throws the CorruptEventException
// without wrapping it to IOException (which is the case when fsyncPerTransaciton is true)
// but in this case it is swallowed in FileBackedTransaction.doTake()
// The eventTakeErrorCount should be increased regardless of this.
Assert.fail("No exception should be thrown as fsyncPerTransaction is false");
}
assertEquals(1, channelCounter.getEventTakeAttemptCount());
assertEquals(0, channelCounter.getEventTakeSuccessCount());
assertEquals(1, channelCounter.getEventTakeErrorCount());
}
@Test
public void testCheckpointWriteErrorCount() throws Exception {
int checkpointInterval = 1500;
final FileChannel channel = createFileChannel(Collections.singletonMap(
FileChannelConfiguration.CHECKPOINT_INTERVAL, String.valueOf(checkpointInterval)));
channel.start();
Transaction tx = channel.getTransaction();
tx.begin();
channel.put(EventBuilder.withBody("test".getBytes()));
tx.commit();
tx.close();
final long beforeCheckpointWrite = System.currentTimeMillis();
// first checkpoint should be written successfully -> the counter should remain 0
assertEventuallyTrue("checkpoint should have been written", new BooleanPredicate() {
@Override
public boolean get() {
return new File(checkpointDir, "checkpoint").lastModified() > beforeCheckpointWrite;
}
}, checkpointInterval * 3);
assertEquals(0, channel.getChannelCounter().getCheckpointWriteErrorCount());
FileUtils.deleteDirectory(baseDir);
// the channel's directory has been deleted so the checkpoint write should have been failed
assertEventuallyTrue("checkpointWriterErrorCount should be 1", new BooleanPredicate() {
@Override
public boolean get() {
return channel.getChannelCounter().getCheckpointWriteErrorCount() == 1;
}
}, checkpointInterval * 3);
}
/**
* Test the value of the FileChannelCounter.unhealthy flag after normal startup.
* It is expected to be 0
*/
@Test
public void testHealthy() throws Exception {
FileChannel channel = createFileChannel();
assertEquals(0, channel.getChannelCounter().getUnhealthy());
assertEquals(1, channel.getChannelCounter().getClosed());
assertFalse(channel.getChannelCounter().isOpen());
channel.start();
assertEquals(0, channel.getChannelCounter().getUnhealthy());
assertEquals(0, channel.getChannelCounter().getClosed());
assertTrue(channel.getChannelCounter().isOpen());
}
/**
* Test the value of the FileChannelCounter.unhealthy flag after a failed startup.
* It is expected to be 1
*/
@Test
public void testUnhealthy() throws Exception {
FileChannel channel = createFileChannel();
assertEquals(0, channel.getChannelCounter().getUnhealthy());
assertEquals(1, channel.getChannelCounter().getClosed());
assertFalse(channel.getChannelCounter().isOpen());
FileUtils.write(new File(dataDirs[0], "log-1"), "invalid data file content");
channel.start();
assertEquals(1, channel.getChannelCounter().getUnhealthy());
assertEquals(1, channel.getChannelCounter().getClosed());
assertFalse(channel.getChannelCounter().isOpen());
}
@Test
public void testCheckpointBackupWriteErrorShouldIncreaseCounter()
throws IOException, InterruptedException {
FileChannelCounter fileChannelCounter = new FileChannelCounter("test");
File checkpointFile = File.createTempFile("checkpoint", ".tmp");
File backupDir = Files.createTempDirectory("checkpoint").toFile();
backupDir.deleteOnExit();
checkpointFile.deleteOnExit();
EventQueueBackingStoreFileV3 backingStoreFileV3 = new EventQueueBackingStoreFileV3(
checkpointFile, 1, "test", fileChannelCounter, backupDir,true, false
);
// Exception will be thrown by state check if beforeCheckpoint is not called
backingStoreFileV3.checkpoint();
// wait for other thread to reach the error state
assertEventuallyTrue("checkpoint backup write failure should increase counter to 1",
new BooleanPredicate() {
@Override
public boolean get() {
return fileChannelCounter.getCheckpointBackupWriteErrorCount() == 1;
}
},
100
);
}
@Test
public void testCheckpointBackupWriteErrorShouldIncreaseCounter2()
throws Exception {
int checkpointInterval = 1500;
Map config = new HashMap();
config.put(FileChannelConfiguration.CHECKPOINT_INTERVAL, String.valueOf(checkpointInterval));
config.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS, "true");
final FileChannel channel = createFileChannel(Collections.unmodifiableMap(config));
channel.start();
Transaction tx = channel.getTransaction();
tx.begin();
channel.put(EventBuilder.withBody("test".getBytes()));
tx.commit();
tx.close();
final long beforeCheckpointWrite = System.currentTimeMillis();
// first checkpoint should be written successfully -> the counter should remain 0
assertEventuallyTrue("checkpoint backup should have been written", new BooleanPredicate() {
@Override
public boolean get() {
return new File(backupDir, "checkpoint").lastModified() > beforeCheckpointWrite;
}
}, checkpointInterval * 3);
assertEquals(0, channel.getChannelCounter().getCheckpointBackupWriteErrorCount());
FileUtils.deleteDirectory(backupDir);
tx = channel.getTransaction();
tx.begin();
channel.put(EventBuilder.withBody("test2".getBytes()));
tx.commit();
tx.close();
// the backup directory has been deleted so the backup checkpoint write should have been failed
assertEventuallyTrue("checkpointBackupWriteErrorCount should be 1", new BooleanPredicate() {
@Override
public boolean get() {
return channel.getChannelCounter().getCheckpointBackupWriteErrorCount() >= 1;
}
}, checkpointInterval * 3);
}
private interface BooleanPredicate {
boolean get();
}
private static void assertEventuallyTrue(String description, BooleanPredicate expression,
long timeoutMillis)
throws InterruptedException {
long start = System.currentTimeMillis();
while (System.currentTimeMillis() < start + timeoutMillis) {
if (expression.get()) break;
Thread.sleep(timeoutMillis / 10);
}
assertTrue(description, expression.get());
}
}
| 9,690 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestEventUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import junit.framework.Assert;
import org.apache.flume.Event;
import org.junit.Test;
public class TestEventUtils {
@Test
public void testPutEvent() {
FlumeEvent event = new FlumeEvent(null, new byte[5]);
Put put = new Put(1L, 1L, event);
Event returnEvent = EventUtils.getEventFromTransactionEvent(put);
Assert.assertNotNull(returnEvent);
Assert.assertEquals(5, returnEvent.getBody().length);
}
@Test
public void testInvalidEvent() {
Take take = new Take(1L, 1L);
Event returnEvent = EventUtils.getEventFromTransactionEvent(take);
Assert.assertNull(returnEvent);
}
}
| 9,691 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestWriteOrderOracle.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import junit.framework.Assert;
import org.junit.Test;
public class TestWriteOrderOracle {
@Test
public void testSetSeed() {
long current = WriteOrderOracle.next();
current += Integer.MAX_VALUE;
WriteOrderOracle.setSeed(current);
Assert.assertTrue(WriteOrderOracle.next() > System.currentTimeMillis());
}
}
| 9,692 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestIntegration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import org.apache.commons.io.FileUtils;
import org.apache.flume.Context;
import org.apache.flume.conf.Configurables;
import org.apache.flume.sink.NullSink;
import org.apache.flume.source.SequenceGeneratorSource;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
public class TestIntegration {
private static final Logger LOG = LoggerFactory
.getLogger(TestIntegration.class);
private FileChannel channel;
private File baseDir;
private File checkpointDir;
private File[] dataDirs;
private String dataDir;
@Before
public void setup() {
baseDir = Files.createTempDir();
checkpointDir = new File(baseDir, "chkpt");
Assert.assertTrue(checkpointDir.mkdirs() || checkpointDir.isDirectory());
dataDirs = new File[3];
dataDir = "";
for (int i = 0; i < dataDirs.length; i++) {
dataDirs[i] = new File(baseDir, "data" + (i + 1));
Assert.assertTrue(dataDirs[i].mkdirs() || dataDirs[i].isDirectory());
dataDir += dataDirs[i].getAbsolutePath() + ",";
}
dataDir = dataDir.substring(0, dataDir.length() - 1);
}
@After
public void teardown() {
if (channel != null && channel.isOpen()) {
channel.stop();
}
FileUtils.deleteQuietly(baseDir);
}
@Test
public void testIntegration() throws IOException, InterruptedException {
// set shorter checkpoint and filesize to ensure
// checkpoints and rolls occur during the test
Context context = new Context();
context.put(FileChannelConfiguration.CHECKPOINT_DIR,
checkpointDir.getAbsolutePath());
context.put(FileChannelConfiguration.DATA_DIRS, dataDir);
context.put(FileChannelConfiguration.CAPACITY, String.valueOf(10000));
// Set checkpoint for 5 seconds otherwise test will run out of memory
context.put(FileChannelConfiguration.CHECKPOINT_INTERVAL, "5000");
context.put(FileChannelConfiguration.MAX_FILE_SIZE,
String.valueOf(1024 * 1024 * 5));
// do reconfiguration
channel = new FileChannel();
channel.setName("FileChannel-" + UUID.randomUUID());
Configurables.configure(channel, context);
channel.start();
Assert.assertTrue(channel.isOpen());
SequenceGeneratorSource source = new SequenceGeneratorSource();
CountingSourceRunner sourceRunner = new CountingSourceRunner(source, channel);
source.configure(context);
source.start();
NullSink sink = new NullSink();
sink.setChannel(channel);
CountingSinkRunner sinkRunner = new CountingSinkRunner(sink);
sinkRunner.start();
sourceRunner.start();
TimeUnit.SECONDS.sleep(30);
// shutdown source
sourceRunner.shutdown();
while (sourceRunner.isAlive()) {
Thread.sleep(10L);
}
// wait for queue to clear
while (channel.getDepth() > 0) {
Thread.sleep(10L);
}
// shutdown size
sinkRunner.shutdown();
// wait a few seconds
TimeUnit.SECONDS.sleep(5);
List<File> logs = Lists.newArrayList();
for (int i = 0; i < dataDirs.length; i++) {
logs.addAll(LogUtils.getLogs(dataDirs[i]));
}
LOG.info("Total Number of Logs = " + logs.size());
for (File logFile : logs) {
LOG.info("LogFile = " + logFile);
}
LOG.info("Source processed " + sinkRunner.getCount());
LOG.info("Sink processed " + sourceRunner.getCount());
for (Exception ex : sourceRunner.getErrors()) {
LOG.warn("Source had error", ex);
}
for (Exception ex : sinkRunner.getErrors()) {
LOG.warn("Sink had error", ex);
}
Assert.assertEquals(sinkRunner.getCount(), sinkRunner.getCount());
Assert.assertEquals(Collections.EMPTY_LIST, sinkRunner.getErrors());
Assert.assertEquals(Collections.EMPTY_LIST, sourceRunner.getErrors());
}
}
| 9,693 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestLogFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.base.Throwables;
import com.google.common.collect.Maps;
import com.google.common.io.Files;
import org.apache.commons.io.FileUtils;
import org.apache.flume.channel.file.proto.ProtosFactory;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionService;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicLong;
public class TestLogFile {
private int fileID;
private long transactionID;
private LogFile.Writer logFileWriter;
private File dataDir;
private File dataFile;
@Before
public void setup() throws IOException {
fileID = 1;
transactionID = 1L;
dataDir = Files.createTempDir();
dataFile = new File(dataDir, String.valueOf(fileID));
Assert.assertTrue(dataDir.isDirectory());
logFileWriter = LogFileFactory.getWriter(dataFile, fileID,
Integer.MAX_VALUE, null, null, null, Long.MAX_VALUE, true, 0);
}
@After
public void cleanup() throws IOException {
try {
if (logFileWriter != null) {
logFileWriter.close();
}
} finally {
FileUtils.deleteQuietly(dataDir);
}
}
@Test
public void testWriterRefusesToOverwriteFile() throws IOException {
Assert.assertTrue(dataFile.isFile() || dataFile.createNewFile());
try {
LogFileFactory.getWriter(dataFile, fileID, Integer.MAX_VALUE, null, null,
null, Long.MAX_VALUE, true, 0);
Assert.fail();
} catch (IllegalStateException e) {
Assert.assertEquals("File already exists " + dataFile.getAbsolutePath(), e.getMessage());
}
}
@Test
public void testWriterFailsWithDirectory() throws IOException {
FileUtils.deleteQuietly(dataFile);
Assert.assertFalse(dataFile.exists());
Assert.assertTrue(dataFile.mkdirs());
try {
LogFileFactory.getWriter(dataFile, fileID, Integer.MAX_VALUE, null, null,
null, Long.MAX_VALUE, true, 0);
Assert.fail();
} catch (IllegalStateException e) {
Assert.assertEquals("File already exists " + dataFile.getAbsolutePath(), e.getMessage());
}
}
@Test
public void testPutGet() throws InterruptedException, IOException {
final List<Throwable> errors =
Collections.synchronizedList(new ArrayList<Throwable>());
ExecutorService executorService = Executors.newFixedThreadPool(10);
CompletionService<Void> completionService = new ExecutorCompletionService
<Void>(executorService);
final LogFile.RandomReader logFileReader = LogFileFactory.getRandomReader(dataFile, null, true);
for (int i = 0; i < 1000; i++) {
// first try and throw failures
synchronized (errors) {
for (Throwable throwable : errors) {
Throwables.propagateIfInstanceOf(throwable, AssertionError.class);
}
// then throw errors
for (Throwable throwable : errors) {
Throwables.propagate(throwable);
}
}
final FlumeEvent eventIn = TestUtils.newPersistableEvent();
final Put put = new Put(++transactionID, WriteOrderOracle.next(),
eventIn);
ByteBuffer bytes = TransactionEventRecord.toByteBuffer(put);
FlumeEventPointer ptr = logFileWriter.put(bytes);
final int offset = ptr.getOffset();
completionService.submit(new Runnable() {
@Override
public void run() {
try {
FlumeEvent eventOut = logFileReader.get(offset);
Assert.assertEquals(eventIn.getHeaders(), eventOut.getHeaders());
Assert.assertTrue(Arrays.equals(eventIn.getBody(), eventOut.getBody()));
} catch (Throwable throwable) {
synchronized (errors) {
errors.add(throwable);
}
}
}
}, null);
}
for (int i = 0; i < 1000; i++) {
completionService.take();
}
// first try and throw failures
for (Throwable throwable : errors) {
Throwables.propagateIfInstanceOf(throwable, AssertionError.class);
}
// then throw errors
for (Throwable throwable : errors) {
Throwables.propagate(throwable);
}
}
@Test
public void testReader() throws InterruptedException, IOException,
CorruptEventException {
Map<Integer, Put> puts = Maps.newHashMap();
for (int i = 0; i < 1000; i++) {
FlumeEvent eventIn = TestUtils.newPersistableEvent();
Put put = new Put(++transactionID, WriteOrderOracle.next(), eventIn);
ByteBuffer bytes = TransactionEventRecord.toByteBuffer(put);
FlumeEventPointer ptr = logFileWriter.put(bytes);
puts.put(ptr.getOffset(), put);
}
LogFile.SequentialReader reader =
LogFileFactory.getSequentialReader(dataFile, null, true);
LogRecord entry;
while ((entry = reader.next()) != null) {
Integer offset = entry.getOffset();
TransactionEventRecord record = entry.getEvent();
Put put = puts.get(offset);
FlumeEvent eventIn = put.getEvent();
Assert.assertEquals(put.getTransactionID(), record.getTransactionID());
Assert.assertTrue(record instanceof Put);
FlumeEvent eventOut = ((Put) record).getEvent();
Assert.assertEquals(eventIn.getHeaders(), eventOut.getHeaders());
Assert.assertTrue(Arrays.equals(eventIn.getBody(), eventOut.getBody()));
}
}
@Test
public void testReaderOldMetaFile() throws InterruptedException,
IOException, CorruptEventException {
Map<Integer, Put> puts = Maps.newHashMap();
for (int i = 0; i < 1000; i++) {
FlumeEvent eventIn = TestUtils.newPersistableEvent();
Put put = new Put(++transactionID, WriteOrderOracle.next(),
eventIn);
ByteBuffer bytes = TransactionEventRecord.toByteBuffer(put);
FlumeEventPointer ptr = logFileWriter.put(bytes);
puts.put(ptr.getOffset(), put);
}
//rename the meta file to meta.old
File metadataFile = Serialization.getMetaDataFile(dataFile);
File oldMetadataFile = Serialization.getOldMetaDataFile(dataFile);
if (!metadataFile.renameTo(oldMetadataFile)) {
Assert.fail("Renaming to meta.old failed");
}
LogFile.SequentialReader reader =
LogFileFactory.getSequentialReader(dataFile, null, true);
Assert.assertTrue(metadataFile.exists());
Assert.assertFalse(oldMetadataFile.exists());
LogRecord entry;
while ((entry = reader.next()) != null) {
Integer offset = entry.getOffset();
TransactionEventRecord record = entry.getEvent();
Put put = puts.get(offset);
FlumeEvent eventIn = put.getEvent();
Assert.assertEquals(put.getTransactionID(), record.getTransactionID());
Assert.assertTrue(record instanceof Put);
FlumeEvent eventOut = ((Put) record).getEvent();
Assert.assertEquals(eventIn.getHeaders(), eventOut.getHeaders());
Assert.assertTrue(Arrays.equals(eventIn.getBody(), eventOut.getBody()));
}
}
@Test
public void testReaderTempMetaFile()
throws InterruptedException, IOException, CorruptEventException {
Map<Integer, Put> puts = Maps.newHashMap();
for (int i = 0; i < 1000; i++) {
FlumeEvent eventIn = TestUtils.newPersistableEvent();
Put put = new Put(++transactionID, WriteOrderOracle.next(),
eventIn);
ByteBuffer bytes = TransactionEventRecord.toByteBuffer(put);
FlumeEventPointer ptr = logFileWriter.put(bytes);
puts.put(ptr.getOffset(), put);
}
//rename the meta file to meta.old
File metadataFile = Serialization.getMetaDataFile(dataFile);
File tempMetadataFile = Serialization.getMetaDataTempFile(dataFile);
File oldMetadataFile = Serialization.getOldMetaDataFile(dataFile);
oldMetadataFile.createNewFile(); //Make sure temp file is picked up.
if (!metadataFile.renameTo(tempMetadataFile)) {
Assert.fail("Renaming to meta.temp failed");
}
LogFile.SequentialReader reader =
LogFileFactory.getSequentialReader(dataFile, null, true);
Assert.assertTrue(metadataFile.exists());
Assert.assertFalse(tempMetadataFile.exists());
Assert.assertFalse(oldMetadataFile.exists());
LogRecord entry;
while ((entry = reader.next()) != null) {
Integer offset = entry.getOffset();
TransactionEventRecord record = entry.getEvent();
Put put = puts.get(offset);
FlumeEvent eventIn = put.getEvent();
Assert.assertEquals(put.getTransactionID(), record.getTransactionID());
Assert.assertTrue(record instanceof Put);
FlumeEvent eventOut = ((Put) record).getEvent();
Assert.assertEquals(eventIn.getHeaders(), eventOut.getHeaders());
Assert.assertTrue(Arrays.equals(eventIn.getBody(), eventOut.getBody()));
}
}
@Test
public void testWriteDelimitedTo() throws IOException {
if (dataFile.isFile()) {
Assert.assertTrue(dataFile.delete());
}
Assert.assertTrue(dataFile.createNewFile());
ProtosFactory.LogFileMetaData.Builder metaDataBuilder =
ProtosFactory.LogFileMetaData.newBuilder();
metaDataBuilder.setVersion(1);
metaDataBuilder.setLogFileID(2);
metaDataBuilder.setCheckpointPosition(3);
metaDataBuilder.setCheckpointWriteOrderID(4);
LogFileV3.writeDelimitedTo(metaDataBuilder.build(), dataFile);
ProtosFactory.LogFileMetaData metaData =
ProtosFactory.LogFileMetaData.parseDelimitedFrom(new FileInputStream(dataFile));
Assert.assertEquals(1, metaData.getVersion());
Assert.assertEquals(2, metaData.getLogFileID());
Assert.assertEquals(3, metaData.getCheckpointPosition());
Assert.assertEquals(4, metaData.getCheckpointWriteOrderID());
}
@Test(expected = CorruptEventException.class)
public void testPutGetCorruptEvent() throws Exception {
final LogFile.RandomReader logFileReader =
LogFileFactory.getRandomReader(dataFile, null, true);
final FlumeEvent eventIn = TestUtils.newPersistableEvent(2500);
final Put put = new Put(++transactionID, WriteOrderOracle.next(), eventIn);
ByteBuffer bytes = TransactionEventRecord.toByteBuffer(put);
FlumeEventPointer ptr = logFileWriter.put(bytes);
logFileWriter.commit(TransactionEventRecord.toByteBuffer(
new Commit(transactionID, WriteOrderOracle.next())));
logFileWriter.sync();
final int offset = ptr.getOffset();
RandomAccessFile writer = new RandomAccessFile(dataFile, "rw");
writer.seek(offset + 1500);
writer.write((byte) 45);
writer.write((byte) 12);
writer.getFD().sync();
logFileReader.get(offset);
// Should have thrown an exception by now.
Assert.fail();
}
@Test(expected = NoopRecordException.class)
public void testPutGetNoopEvent() throws Exception {
final LogFile.RandomReader logFileReader =
LogFileFactory.getRandomReader(dataFile, null, true);
final FlumeEvent eventIn = TestUtils.newPersistableEvent(2500);
final Put put = new Put(++transactionID, WriteOrderOracle.next(), eventIn);
ByteBuffer bytes = TransactionEventRecord.toByteBuffer(put);
FlumeEventPointer ptr = logFileWriter.put(bytes);
logFileWriter.commit(TransactionEventRecord.toByteBuffer(
new Commit(transactionID, WriteOrderOracle.next())));
logFileWriter.sync();
final int offset = ptr.getOffset();
LogFile.OperationRecordUpdater updater =
new LogFile.OperationRecordUpdater(dataFile);
updater.markRecordAsNoop(offset);
logFileReader.get(offset);
// Should have thrown an exception by now.
Assert.fail();
}
@Test
public void testOperationRecordUpdater() throws Exception {
File tempDir = Files.createTempDir();
File temp = new File(tempDir, "temp");
final RandomAccessFile tempFile = new RandomAccessFile(temp, "rw");
for (int i = 0; i < 5000; i++) {
tempFile.write(LogFile.OP_RECORD);
}
tempFile.seek(0);
LogFile.OperationRecordUpdater recordUpdater = new LogFile
.OperationRecordUpdater(temp);
//Convert every 10th byte into a noop byte
for (int i = 0; i < 5000; i += 10) {
recordUpdater.markRecordAsNoop(i);
}
recordUpdater.close();
tempFile.seek(0);
// Verify every 10th byte is actually a NOOP
for (int i = 0; i < 5000; i += 10) {
tempFile.seek(i);
Assert.assertEquals(LogFile.OP_NOOP, tempFile.readByte());
}
}
@Test
public void testOpRecordUpdaterWithFlumeEvents() throws Exception {
final FlumeEvent eventIn = TestUtils.newPersistableEvent(2500);
final Put put = new Put(++transactionID, WriteOrderOracle.next(), eventIn);
ByteBuffer bytes = TransactionEventRecord.toByteBuffer(put);
FlumeEventPointer ptr = logFileWriter.put(bytes);
logFileWriter.commit(TransactionEventRecord.toByteBuffer(
new Commit(transactionID, WriteOrderOracle.next())));
logFileWriter.sync();
final int offset = ptr.getOffset();
LogFile.OperationRecordUpdater updater =
new LogFile.OperationRecordUpdater(dataFile);
updater.markRecordAsNoop(offset);
RandomAccessFile fileReader = new RandomAccessFile(dataFile, "rw");
Assert.assertEquals(LogFile.OP_NOOP, fileReader.readByte());
}
@Test
public void testGroupCommit() throws Exception {
final FlumeEvent eventIn = TestUtils.newPersistableEvent(250);
final CyclicBarrier barrier = new CyclicBarrier(20);
ExecutorService executorService = Executors.newFixedThreadPool(20);
ExecutorCompletionService<Void> completionService = new
ExecutorCompletionService<Void>(executorService);
final LogFile.Writer writer = logFileWriter;
final AtomicLong txnId = new AtomicLong(++transactionID);
for (int i = 0; i < 20; i++) {
completionService.submit(new Callable<Void>() {
@Override
public Void call() {
try {
Put put = new Put(txnId.incrementAndGet(),
WriteOrderOracle.next(), eventIn);
ByteBuffer bytes = TransactionEventRecord.toByteBuffer(put);
writer.put(bytes);
writer.commit(TransactionEventRecord.toByteBuffer(
new Commit(txnId.get(), WriteOrderOracle.next())));
barrier.await();
writer.sync();
} catch (Exception ex) {
Throwables.propagate(ex);
}
return null;
}
});
}
for (int i = 0; i < 20; i++) {
completionService.take().get();
}
// At least 250*20, but can be higher due to serialization overhead
Assert.assertTrue(logFileWriter.position() >= 5000);
Assert.assertEquals(1, writer.getSyncCount());
Assert.assertTrue(logFileWriter.getLastCommitPosition() == logFileWriter.getLastSyncPosition());
executorService.shutdown();
}
}
| 9,694 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestFileChannelFormatRegression.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.collect.Maps;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import static org.apache.flume.channel.file.TestUtils.compareInputAndOut;
import static org.apache.flume.channel.file.TestUtils.takeEvents;
public class TestFileChannelFormatRegression extends TestFileChannelBase {
protected static final Logger LOG = LoggerFactory
.getLogger(TestFileChannelFormatRegression.class);
@Before
public void setup() throws Exception {
super.setup();
}
@After
public void teardown() {
super.teardown();
}
/**
* This is regression test with files generated by a file channel
* with the FLUME-1432 patch.
*/
@Test
public void testFileFormatV2postFLUME1432()
throws Exception {
TestUtils.copyDecompressed("fileformat-v2-checkpoint.gz",
new File(checkpointDir, "checkpoint"));
for (int i = 0; i < dataDirs.length; i++) {
int fileIndex = i + 1;
TestUtils.copyDecompressed("fileformat-v2-log-" + fileIndex + ".gz",
new File(dataDirs[i], "log-" + fileIndex));
}
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.CAPACITY, String.valueOf(10));
overrides.put(FileChannelConfiguration.TRANSACTION_CAPACITY,
String.valueOf(10));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> events = takeEvents(channel, 1);
Set<String> expected = new HashSet<String>();
expected.addAll(Arrays.asList(
(new String[]{
"2684", "2685", "2686", "2687", "2688", "2689", "2690", "2691"
})));
compareInputAndOut(expected, events);
}
/**
* This is a regression test with files generated by a file channel
* without the FLUME-1432 patch.
*/
@Test
public void testFileFormatV2PreFLUME1432LogReplayV1()
throws Exception {
doTestFileFormatV2PreFLUME1432(true);
}
@Test
public void testFileFormatV2PreFLUME1432LogReplayV2()
throws Exception {
doTestFileFormatV2PreFLUME1432(false);
}
public void doTestFileFormatV2PreFLUME1432(boolean useLogReplayV1)
throws Exception {
TestUtils.copyDecompressed("fileformat-v2-pre-FLUME-1432-checkpoint.gz",
new File(checkpointDir, "checkpoint"));
for (int i = 0; i < dataDirs.length; i++) {
int fileIndex = i + 1;
TestUtils.copyDecompressed("fileformat-v2-pre-FLUME-1432-log-" + fileIndex
+ ".gz", new File(dataDirs[i], "log-" + fileIndex));
}
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.CAPACITY, String.valueOf(10000));
overrides.put(FileChannelConfiguration.USE_LOG_REPLAY_V1,
String.valueOf(useLogReplayV1));
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> events = takeEvents(channel, 1);
Assert.assertEquals(50, events.size());
}
}
| 9,695 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestTransactionIDOracle.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import junit.framework.Assert;
import org.junit.Test;
public class TestTransactionIDOracle {
@Test
public void testSetSeed() {
long current = TransactionIDOracle.next();
current += Integer.MAX_VALUE;
TransactionIDOracle.setSeed(current);
Assert.assertTrue(TransactionIDOracle.next() > System.currentTimeMillis());
}
}
| 9,696 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestLog.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import org.apache.commons.io.FileUtils;
import org.apache.flume.channel.file.instrumentation.FileChannelCounter;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.List;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestLog {
private static final Logger LOGGER = LoggerFactory.getLogger(TestLog.class);
private static final long MAX_FILE_SIZE = 1000;
private static final int CAPACITY = 10000;
private Log log;
private File checkpointDir;
private File[] dataDirs;
private long transactionID;
@Before
public void setup() throws IOException {
transactionID = 0;
checkpointDir = Files.createTempDir();
FileUtils.forceDeleteOnExit(checkpointDir);
Assert.assertTrue(checkpointDir.isDirectory());
dataDirs = new File[3];
for (int i = 0; i < dataDirs.length; i++) {
dataDirs[i] = Files.createTempDir();
Assert.assertTrue(dataDirs[i].isDirectory());
}
log = new Log.Builder().setCheckpointInterval(1L)
.setMaxFileSize(MAX_FILE_SIZE)
.setQueueSize(CAPACITY)
.setCheckpointDir(checkpointDir)
.setLogDirs(dataDirs)
.setCheckpointOnClose(false)
.setChannelName("testlog")
.setChannelCounter(new FileChannelCounter("testlog"))
.build();
log.replay();
}
@After
public void cleanup() throws Exception {
if (log != null) {
log.close();
}
FileUtils.deleteQuietly(checkpointDir);
for (int i = 0; i < dataDirs.length; i++) {
FileUtils.deleteQuietly(dataDirs[i]);
}
}
/**
* Test that we can put, commit and then get. Note that get is
* not transactional so the commit is not required.
*/
@Test
public void testPutGet()
throws IOException, InterruptedException, NoopRecordException, CorruptEventException {
FlumeEvent eventIn = TestUtils.newPersistableEvent();
long transactionID = ++this.transactionID;
FlumeEventPointer eventPointer = log.put(transactionID, eventIn);
log.commitPut(transactionID); // this is not required since
// get is not transactional
FlumeEvent eventOut = log.get(eventPointer);
Assert.assertNotNull(eventOut);
Assert.assertEquals(eventIn.getHeaders(), eventOut.getHeaders());
Assert.assertArrayEquals(eventIn.getBody(), eventOut.getBody());
}
@Test
public void testRoll()
throws IOException, InterruptedException, NoopRecordException, CorruptEventException {
log.shutdownWorker();
Thread.sleep(1000);
for (int i = 0; i < 1000; i++) {
FlumeEvent eventIn = TestUtils.newPersistableEvent();
long transactionID = ++this.transactionID;
FlumeEventPointer eventPointer = log.put(transactionID, eventIn);
// get is not transactional
FlumeEvent eventOut = log.get(eventPointer);
Assert.assertNotNull(eventOut);
Assert.assertEquals(eventIn.getHeaders(), eventOut.getHeaders());
Assert.assertArrayEquals(eventIn.getBody(), eventOut.getBody());
}
int logCount = 0;
for (File dataDir : dataDirs) {
for (File logFile : dataDir.listFiles()) {
if (logFile.getName().startsWith("log-")) {
logCount++;
}
}
}
// 93 (*2 for meta) files with TestLog.MAX_FILE_SIZE=1000
Assert.assertEquals(186, logCount);
}
/**
* After replay of the log, we should find the event because the put
* was committed
*/
@Test
public void testPutCommit()
throws IOException, InterruptedException, NoopRecordException, CorruptEventException {
FlumeEvent eventIn = TestUtils.newPersistableEvent();
long transactionID = ++this.transactionID;
FlumeEventPointer eventPointerIn = log.put(transactionID, eventIn);
log.commitPut(transactionID);
log.close();
log = new Log.Builder().setCheckpointInterval(Long.MAX_VALUE)
.setMaxFileSize(FileChannelConfiguration.DEFAULT_MAX_FILE_SIZE)
.setQueueSize(CAPACITY)
.setCheckpointDir(checkpointDir)
.setLogDirs(dataDirs)
.setChannelName("testlog")
.setChannelCounter(new FileChannelCounter("testlog"))
.build();
log.replay();
takeAndVerify(eventPointerIn, eventIn);
}
/**
* After replay of the log, we should not find the event because the
* put was rolled back
*/
@Test
public void testPutRollback() throws IOException, InterruptedException {
FlumeEvent eventIn = TestUtils.newPersistableEvent();
long transactionID = ++this.transactionID;
log.put(transactionID, eventIn);
log.rollback(transactionID); // rolled back so it should not be replayed
log.close();
log = new Log.Builder().setCheckpointInterval(Long.MAX_VALUE)
.setMaxFileSize(FileChannelConfiguration.DEFAULT_MAX_FILE_SIZE)
.setQueueSize(CAPACITY)
.setCheckpointDir(checkpointDir)
.setLogDirs(dataDirs)
.setChannelName("testlog")
.setChannelCounter(new FileChannelCounter("testlog"))
.build();
log.replay();
FlumeEventQueue queue = log.getFlumeEventQueue();
Assert.assertNull(queue.removeHead(transactionID));
}
@Test
public void testMinimumRequiredSpaceTooSmallOnStartup() throws IOException,
InterruptedException {
log.close();
log = new Log.Builder().setCheckpointInterval(Long.MAX_VALUE)
.setMaxFileSize(FileChannelConfiguration.DEFAULT_MAX_FILE_SIZE)
.setQueueSize(CAPACITY)
.setCheckpointDir(checkpointDir)
.setLogDirs(dataDirs)
.setChannelName("testlog")
.setMinimumRequiredSpace(Long.MAX_VALUE)
.setChannelCounter(new FileChannelCounter("testlog"))
.build();
try {
log.replay();
Assert.fail();
} catch (IOException e) {
Assert.assertTrue(e.getMessage(),
e.getMessage().startsWith("Usable space exhausted"));
}
}
/**
* There is a race here in that someone could take up some space
*/
@Test
public void testMinimumRequiredSpaceTooSmallForPut() throws IOException, InterruptedException {
try {
doTestMinimumRequiredSpaceTooSmallForPut();
} catch (IOException e) {
LOGGER.info("Error during test, retrying", e);
doTestMinimumRequiredSpaceTooSmallForPut();
} catch (AssertionError e) {
LOGGER.info("Test failed, let's be sure it failed for good reason", e);
doTestMinimumRequiredSpaceTooSmallForPut();
}
}
public void doTestMinimumRequiredSpaceTooSmallForPut() throws IOException,
InterruptedException {
long minimumRequiredSpace = checkpointDir.getUsableSpace() -
(10L * 1024L * 1024L);
log.close();
log = new Log.Builder().setCheckpointInterval(Long.MAX_VALUE)
.setMaxFileSize(FileChannelConfiguration.DEFAULT_MAX_FILE_SIZE)
.setQueueSize(CAPACITY)
.setCheckpointDir(checkpointDir)
.setLogDirs(dataDirs)
.setChannelName("testlog")
.setMinimumRequiredSpace(minimumRequiredSpace)
.setUsableSpaceRefreshInterval(1L)
.setChannelCounter(new FileChannelCounter("testlog"))
.build();
log.replay();
File filler = new File(checkpointDir, "filler");
byte[] buffer = new byte[64 * 1024];
FileOutputStream out = new FileOutputStream(filler);
while (checkpointDir.getUsableSpace() > minimumRequiredSpace) {
out.write(buffer);
}
out.close();
try {
FlumeEvent eventIn = TestUtils.newPersistableEvent();
long transactionID = ++this.transactionID;
log.put(transactionID, eventIn);
Assert.fail();
} catch (IOException e) {
Assert.assertTrue(e.getMessage(),
e.getMessage().startsWith("Usable space exhausted"));
}
}
/**
* After replay of the log, we should not find the event because the take
* was committed
*/
@Test
public void testPutTakeCommit() throws IOException, InterruptedException {
FlumeEvent eventIn = TestUtils.newPersistableEvent();
long putTransactionID = ++transactionID;
FlumeEventPointer eventPointer = log.put(putTransactionID, eventIn);
log.commitPut(putTransactionID);
long takeTransactionID = ++transactionID;
log.take(takeTransactionID, eventPointer);
log.commitTake(takeTransactionID);
log.close();
new Log.Builder().setCheckpointInterval(Long.MAX_VALUE)
.setMaxFileSize(FileChannelConfiguration.DEFAULT_MAX_FILE_SIZE)
.setQueueSize(1)
.setCheckpointDir(checkpointDir)
.setLogDirs(dataDirs)
.setChannelName("testlog")
.setChannelCounter(new FileChannelCounter("testlog"))
.build();
log.replay();
FlumeEventQueue queue = log.getFlumeEventQueue();
Assert.assertNull(queue.removeHead(0));
}
/**
* After replay of the log, we should get the event because the take
* was rolled back
*/
@Test
public void testPutTakeRollbackLogReplayV1()
throws IOException, InterruptedException, NoopRecordException, CorruptEventException {
doPutTakeRollback(true);
}
@Test
public void testPutTakeRollbackLogReplayV2()
throws IOException, InterruptedException, NoopRecordException, CorruptEventException {
doPutTakeRollback(false);
}
public void doPutTakeRollback(boolean useLogReplayV1)
throws IOException, InterruptedException, NoopRecordException, CorruptEventException {
FlumeEvent eventIn = TestUtils.newPersistableEvent();
long putTransactionID = ++transactionID;
FlumeEventPointer eventPointerIn = log.put(putTransactionID, eventIn);
log.commitPut(putTransactionID);
long takeTransactionID = ++transactionID;
log.take(takeTransactionID, eventPointerIn);
log.rollback(takeTransactionID);
log.close();
new Log.Builder().setCheckpointInterval(Long.MAX_VALUE)
.setMaxFileSize(FileChannelConfiguration.DEFAULT_MAX_FILE_SIZE)
.setQueueSize(1)
.setCheckpointDir(checkpointDir)
.setLogDirs(dataDirs)
.setChannelName("testlog")
.setUseLogReplayV1(useLogReplayV1)
.setChannelCounter(new FileChannelCounter("testlog"))
.build();
log.replay();
takeAndVerify(eventPointerIn, eventIn);
}
@Test
public void testCommitNoPut() throws IOException, InterruptedException {
long putTransactionID = ++transactionID;
log.commitPut(putTransactionID);
log.close();
new Log.Builder().setCheckpointInterval(Long.MAX_VALUE)
.setMaxFileSize(FileChannelConfiguration.DEFAULT_MAX_FILE_SIZE)
.setQueueSize(1)
.setCheckpointDir(checkpointDir)
.setLogDirs(dataDirs)
.setChannelName("testlog")
.setChannelCounter(new FileChannelCounter("testlog"))
.build();
log.replay();
FlumeEventQueue queue = log.getFlumeEventQueue();
FlumeEventPointer eventPointerOut = queue.removeHead(0);
Assert.assertNull(eventPointerOut);
}
@Test
public void testCommitNoTake() throws IOException, InterruptedException {
long putTransactionID = ++transactionID;
log.commitTake(putTransactionID);
log.close();
new Log.Builder().setCheckpointInterval(Long.MAX_VALUE)
.setMaxFileSize(FileChannelConfiguration.DEFAULT_MAX_FILE_SIZE)
.setQueueSize(1)
.setCheckpointDir(checkpointDir)
.setLogDirs(dataDirs)
.setChannelName("testlog")
.setChannelCounter(new FileChannelCounter("testlog"))
.build();
log.replay();
FlumeEventQueue queue = log.getFlumeEventQueue();
FlumeEventPointer eventPointerOut = queue.removeHead(0);
Assert.assertNull(eventPointerOut);
}
@Test
public void testRollbackNoPutTake() throws IOException, InterruptedException {
long putTransactionID = ++transactionID;
log.rollback(putTransactionID);
log.close();
new Log.Builder().setCheckpointInterval(Long.MAX_VALUE)
.setMaxFileSize(FileChannelConfiguration.DEFAULT_MAX_FILE_SIZE)
.setQueueSize(1)
.setCheckpointDir(checkpointDir)
.setLogDirs(dataDirs)
.setChannelName("testlog")
.setChannelCounter(new FileChannelCounter("testlog"))
.build();
log.replay();
FlumeEventQueue queue = log.getFlumeEventQueue();
FlumeEventPointer eventPointerOut = queue.removeHead(0);
Assert.assertNull(eventPointerOut);
}
@Test
public void testGetLogs() throws IOException {
File logDir = dataDirs[0];
List<File> expected = Lists.newArrayList();
for (int i = 0; i < 10; i++) {
File log = new File(logDir, Log.PREFIX + i);
expected.add(log);
Assert.assertTrue(log.isFile() || log.createNewFile());
File metaDataFile = Serialization.getMetaDataFile(log);
File metaDataTempFile = Serialization.getMetaDataTempFile(metaDataFile);
File logGzip = new File(logDir, Log.PREFIX + i + ".gz");
Assert.assertTrue(metaDataFile.isFile() || metaDataFile.createNewFile());
Assert.assertTrue(metaDataTempFile.isFile() ||
metaDataTempFile.createNewFile());
Assert.assertTrue(log.isFile() || logGzip.createNewFile());
}
List<File> actual = LogUtils.getLogs(logDir);
LogUtils.sort(actual);
LogUtils.sort(expected);
Assert.assertEquals(expected, actual);
}
@Test
public void testReplayFailsWithAllEmptyLogMetaDataNormalReplay()
throws IOException, InterruptedException {
doTestReplayFailsWithAllEmptyLogMetaData(false);
}
@Test
public void testReplayFailsWithAllEmptyLogMetaDataFastReplay()
throws IOException, InterruptedException {
doTestReplayFailsWithAllEmptyLogMetaData(true);
}
public void doTestReplayFailsWithAllEmptyLogMetaData(boolean useFastReplay)
throws IOException, InterruptedException {
// setup log with correct fast replay parameter
log.close();
log = new Log.Builder().setCheckpointInterval(1L)
.setMaxFileSize(MAX_FILE_SIZE)
.setQueueSize(CAPACITY)
.setCheckpointDir(checkpointDir)
.setLogDirs(dataDirs)
.setChannelName("testlog")
.setUseFastReplay(useFastReplay)
.setChannelCounter(new FileChannelCounter("testlog"))
.build();
log.replay();
FlumeEvent eventIn = TestUtils.newPersistableEvent();
long transactionID = ++this.transactionID;
log.put(transactionID, eventIn);
log.commitPut(transactionID);
log.close();
if (useFastReplay) {
FileUtils.deleteQuietly(checkpointDir);
Assert.assertTrue(checkpointDir.mkdir());
}
List<File> logFiles = Lists.newArrayList();
for (int i = 0; i < dataDirs.length; i++) {
logFiles.addAll(LogUtils.getLogs(dataDirs[i]));
}
Assert.assertTrue(logFiles.size() > 0);
for (File logFile : logFiles) {
File logFileMeta = Serialization.getMetaDataFile(logFile);
Assert.assertTrue(logFileMeta.delete());
Assert.assertTrue(logFileMeta.createNewFile());
}
log = new Log.Builder().setCheckpointInterval(1L)
.setMaxFileSize(MAX_FILE_SIZE)
.setQueueSize(CAPACITY)
.setCheckpointDir(checkpointDir)
.setLogDirs(dataDirs)
.setChannelName("testlog")
.setUseFastReplay(useFastReplay)
.setChannelCounter(new FileChannelCounter("testlog"))
.build();
try {
log.replay();
Assert.fail();
} catch (IllegalStateException expected) {
String msg = expected.getMessage();
Assert.assertNotNull(msg);
Assert.assertTrue(msg, msg.contains(".meta is empty, but log"));
}
}
@Test
public void testReplaySucceedsWithUnusedEmptyLogMetaDataNormalReplay()
throws IOException, InterruptedException, NoopRecordException, CorruptEventException {
FlumeEvent eventIn = TestUtils.newPersistableEvent();
long transactionID = ++this.transactionID;
FlumeEventPointer eventPointer = log.put(transactionID, eventIn);
log.commitPut(transactionID); // this is not required since
log.close();
log = new Log.Builder().setCheckpointInterval(1L)
.setMaxFileSize(MAX_FILE_SIZE)
.setQueueSize(CAPACITY)
.setCheckpointDir(checkpointDir)
.setLogDirs(dataDirs)
.setChannelName("testlog")
.setChannelCounter(new FileChannelCounter("testlog"))
.build();
doTestReplaySucceedsWithUnusedEmptyLogMetaData(eventIn, eventPointer);
}
@Test
public void testReplaySucceedsWithUnusedEmptyLogMetaDataFastReplay()
throws IOException, InterruptedException, NoopRecordException, CorruptEventException {
FlumeEvent eventIn = TestUtils.newPersistableEvent();
long transactionID = ++this.transactionID;
FlumeEventPointer eventPointer = log.put(transactionID, eventIn);
log.commitPut(transactionID); // this is not required since
log.close();
checkpointDir = Files.createTempDir();
FileUtils.forceDeleteOnExit(checkpointDir);
Assert.assertTrue(checkpointDir.isDirectory());
log = new Log.Builder().setCheckpointInterval(1L)
.setMaxFileSize(MAX_FILE_SIZE)
.setQueueSize(CAPACITY)
.setCheckpointDir(checkpointDir)
.setLogDirs(dataDirs)
.setChannelName("testlog")
.setUseFastReplay(true)
.setChannelCounter(new FileChannelCounter("testlog"))
.build();
doTestReplaySucceedsWithUnusedEmptyLogMetaData(eventIn, eventPointer);
}
public void doTestReplaySucceedsWithUnusedEmptyLogMetaData(FlumeEvent eventIn,
FlumeEventPointer eventPointer)
throws IOException, InterruptedException, NoopRecordException, CorruptEventException {
for (int i = 0; i < dataDirs.length; i++) {
for (File logFile : LogUtils.getLogs(dataDirs[i])) {
if (logFile.length() == 0L) {
File logFileMeta = Serialization.getMetaDataFile(logFile);
Assert.assertTrue(logFileMeta.delete());
Assert.assertTrue(logFileMeta.createNewFile());
}
}
}
log.replay();
FlumeEvent eventOut = log.get(eventPointer);
Assert.assertNotNull(eventOut);
Assert.assertEquals(eventIn.getHeaders(), eventOut.getHeaders());
Assert.assertArrayEquals(eventIn.getBody(), eventOut.getBody());
}
@Test
public void testCachedFSUsableSpace() throws Exception {
File fs = mock(File.class);
when(fs.getUsableSpace()).thenReturn(Long.MAX_VALUE);
LogFile.CachedFSUsableSpace cachedFS = new LogFile.CachedFSUsableSpace(fs, 1000L);
Assert.assertEquals(cachedFS.getUsableSpace(), Long.MAX_VALUE);
cachedFS.decrement(Integer.MAX_VALUE);
Assert.assertEquals(cachedFS.getUsableSpace(), Long.MAX_VALUE - Integer.MAX_VALUE);
try {
cachedFS.decrement(-1);
Assert.fail();
} catch (IllegalArgumentException expected) {
}
when(fs.getUsableSpace()).thenReturn(Long.MAX_VALUE - 1L);
Thread.sleep(1100);
Assert.assertEquals(cachedFS.getUsableSpace(), Long.MAX_VALUE - 1L);
}
@Test
public void testCheckpointOnClose() throws Exception {
log.close();
log = new Log.Builder().setCheckpointInterval(1L)
.setMaxFileSize(MAX_FILE_SIZE)
.setQueueSize(CAPACITY)
.setCheckpointDir(checkpointDir)
.setLogDirs(dataDirs)
.setCheckpointOnClose(true)
.setChannelName("testLog")
.setChannelCounter(new FileChannelCounter("testlog"))
.build();
log.replay();
// 1 Write One Event
FlumeEvent eventIn = TestUtils.newPersistableEvent();
log.put(transactionID, eventIn);
log.commitPut(transactionID);
// 2 Check state of checkpoint before close
File checkPointMetaFile =
FileUtils.listFiles(checkpointDir, new String[] { "meta" }, false).iterator().next();
long before = FileUtils.checksumCRC32(checkPointMetaFile);
// 3 Close Log
log.close();
// 4 Verify that checkpoint was modified on close
long after = FileUtils.checksumCRC32(checkPointMetaFile);
Assert.assertFalse(before == after);
}
private void takeAndVerify(FlumeEventPointer eventPointerIn, FlumeEvent eventIn)
throws IOException, InterruptedException, NoopRecordException, CorruptEventException {
FlumeEventQueue queue = log.getFlumeEventQueue();
FlumeEventPointer eventPointerOut = queue.removeHead(0);
Assert.assertNotNull(eventPointerOut);
Assert.assertNull(queue.removeHead(0));
Assert.assertEquals(eventPointerIn, eventPointerOut);
Assert.assertEquals(eventPointerIn.hashCode(), eventPointerOut.hashCode());
FlumeEvent eventOut = log.get(eventPointerOut);
Assert.assertNotNull(eventOut);
Assert.assertEquals(eventIn.getHeaders(), eventOut.getHeaders());
Assert.assertArrayEquals(eventIn.getBody(), eventOut.getBody());
}
}
| 9,697 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestTransactionEventRecordV3.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import junit.framework.Assert;
import org.junit.Test;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestTransactionEventRecordV3 {
@Test
public void testTypes() throws IOException {
Put put = new Put(System.currentTimeMillis(), WriteOrderOracle.next());
Assert.assertEquals(TransactionEventRecord.Type.PUT.get(),
put.getRecordType());
Take take = new Take(System.currentTimeMillis(), WriteOrderOracle.next());
Assert.assertEquals(TransactionEventRecord.Type.TAKE.get(),
take.getRecordType());
Rollback rollback = new Rollback(System.currentTimeMillis(),
WriteOrderOracle.next());
Assert.assertEquals(TransactionEventRecord.Type.ROLLBACK.get(),
rollback.getRecordType());
Commit commit = new Commit(System.currentTimeMillis(),
WriteOrderOracle.next());
Assert.assertEquals(TransactionEventRecord.Type.COMMIT.get(),
commit.getRecordType());
}
@Test
public void testPutSerialization() throws IOException, CorruptEventException {
Map<String, String> headers = new HashMap<String, String>();
headers.put("key", "value");
Put in = new Put(System.currentTimeMillis(),
WriteOrderOracle.next(),
new FlumeEvent(headers, new byte[0]));
Put out = (Put)TransactionEventRecord.fromByteArray(toByteArray(in));
Assert.assertEquals(in.getClass(), out.getClass());
Assert.assertEquals(in.getRecordType(), out.getRecordType());
Assert.assertEquals(in.getTransactionID(), out.getTransactionID());
Assert.assertEquals(in.getLogWriteOrderID(), out.getLogWriteOrderID());
Assert.assertEquals(in.getEvent().getHeaders(), out.getEvent().getHeaders());
Assert.assertEquals(headers, in.getEvent().getHeaders());
Assert.assertEquals(headers, out.getEvent().getHeaders());
Assert.assertTrue(Arrays.equals(in.getEvent().getBody(), out.getEvent().getBody()));
}
@Test
public void testPutSerializationNullHeader() throws IOException, CorruptEventException {
Put in = new Put(System.currentTimeMillis(),
WriteOrderOracle.next(),
new FlumeEvent(null, new byte[0]));
Put out = (Put)TransactionEventRecord.fromByteArray(toByteArray(in));
Assert.assertEquals(in.getClass(), out.getClass());
Assert.assertEquals(in.getRecordType(), out.getRecordType());
Assert.assertEquals(in.getTransactionID(), out.getTransactionID());
Assert.assertEquals(in.getLogWriteOrderID(), out.getLogWriteOrderID());
Assert.assertNull(in.getEvent().getHeaders());
Assert.assertNotNull(out.getEvent().getHeaders());
Assert.assertTrue(Arrays.equals(in.getEvent().getBody(), out.getEvent().getBody()));
}
@Test
public void testTakeSerialization() throws IOException, CorruptEventException {
Take in = new Take(System.currentTimeMillis(), WriteOrderOracle.next(), 10, 20);
Take out = (Take)TransactionEventRecord.fromByteArray(toByteArray(in));
Assert.assertEquals(in.getClass(), out.getClass());
Assert.assertEquals(in.getRecordType(), out.getRecordType());
Assert.assertEquals(in.getTransactionID(), out.getTransactionID());
Assert.assertEquals(in.getLogWriteOrderID(), out.getLogWriteOrderID());
Assert.assertEquals(in.getFileID(), out.getFileID());
Assert.assertEquals(in.getOffset(), out.getOffset());
}
@Test
public void testRollbackSerialization() throws IOException, CorruptEventException {
Rollback in = new Rollback(System.currentTimeMillis(), WriteOrderOracle.next());
Rollback out = (Rollback)TransactionEventRecord.fromByteArray(toByteArray(in));
Assert.assertEquals(in.getClass(), out.getClass());
Assert.assertEquals(in.getRecordType(), out.getRecordType());
Assert.assertEquals(in.getTransactionID(), out.getTransactionID());
Assert.assertEquals(in.getLogWriteOrderID(), out.getLogWriteOrderID());
}
@Test
public void testCommitSerialization() throws IOException, CorruptEventException {
Commit in = new Commit(System.currentTimeMillis(), WriteOrderOracle.next());
Commit out = (Commit)TransactionEventRecord.fromByteArray(toByteArray(in));
Assert.assertEquals(in.getClass(), out.getClass());
Assert.assertEquals(in.getRecordType(), out.getRecordType());
Assert.assertEquals(in.getTransactionID(), out.getTransactionID());
Assert.assertEquals(in.getLogWriteOrderID(), out.getLogWriteOrderID());
}
@Test
public void testBadType() throws IOException, CorruptEventException {
TransactionEventRecord in = mock(TransactionEventRecord.class);
when(in.getRecordType()).thenReturn(Short.MIN_VALUE);
try {
TransactionEventRecord.fromByteArray(toByteArray(in));
Assert.fail();
} catch (NullPointerException e) {
Assert.assertEquals("Unknown action ffff8000", e.getMessage());
}
}
private byte[] toByteArray(TransactionEventRecord record) throws IOException {
ByteBuffer buffer = TransactionEventRecord.toByteBuffer(record);
return buffer.array();
}
} | 9,698 |
0 | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel | Create_ds/flume/flume-ng-channels/flume-file-channel/src/test/java/org/apache/flume/channel/file/TestCheckpoint.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flume.channel.file;
import java.io.File;
import java.io.IOException;
import junit.framework.Assert;
import org.apache.flume.channel.file.instrumentation.FileChannelCounter;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestCheckpoint {
File file;
File inflightPuts;
File inflightTakes;
File queueSet;
@Before
public void setup() throws IOException {
file = File.createTempFile("Checkpoint", "");
inflightPuts = File.createTempFile("inflightPuts", "");
inflightTakes = File.createTempFile("inflightTakes", "");
queueSet = File.createTempFile("queueset", "");
Assert.assertTrue(file.isFile());
Assert.assertTrue(file.canWrite());
}
@After
public void cleanup() {
file.delete();
}
@Test
public void testSerialization() throws Exception {
EventQueueBackingStore backingStore =
new EventQueueBackingStoreFileV2(file, 1, "test", new FileChannelCounter("test"));
FlumeEventPointer ptrIn = new FlumeEventPointer(10, 20);
FlumeEventQueue queueIn = new FlumeEventQueue(backingStore,
inflightTakes, inflightPuts, queueSet);
queueIn.addHead(ptrIn);
FlumeEventQueue queueOut = new FlumeEventQueue(backingStore,
inflightTakes, inflightPuts, queueSet);
Assert.assertEquals(0, queueOut.getLogWriteOrderID());
queueIn.checkpoint(false);
FlumeEventQueue queueOut2 = new FlumeEventQueue(backingStore,
inflightTakes, inflightPuts, queueSet);
FlumeEventPointer ptrOut = queueOut2.removeHead(0L);
Assert.assertEquals(ptrIn, ptrOut);
Assert.assertTrue(queueOut2.getLogWriteOrderID() > 0);
}
}
| 9,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.