index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/ribbon/ribbon-httpclient/src/test/java/com/netflix | Create_ds/ribbon/ribbon-httpclient/src/test/java/com/netflix/client/ClientFactoryTest.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.util.HashSet;
import java.util.Set;
import org.junit.BeforeClass;
import org.junit.Test;
import com.netflix.config.ConfigurationManager;
import com.netflix.loadbalancer.ConfigurationBasedServerList;
import com.netflix.loadbalancer.DynamicServerListLoadBalancer;
import com.netflix.loadbalancer.Server;
import com.netflix.niws.client.http.RestClient;
public class ClientFactoryTest {
private static RestClient client;
@BeforeClass
public static void init() {
ConfigurationManager.getConfigInstance().setProperty("junit.ribbon.listOfServers", "www.example1.come:80,www.example2.come:80,www.example3.come:80");
client = (RestClient) ClientFactory.getNamedClient("junit");
}
@Test
public void testChooseServers() {
assertNotNull(client);
DynamicServerListLoadBalancer lb = (DynamicServerListLoadBalancer) client.getLoadBalancer();
assertTrue(lb.getServerListImpl() instanceof ConfigurationBasedServerList);
Set<Server> expected = new HashSet<Server>();
expected.add(new Server("www.example1.come:80"));
expected.add(new Server("www.example2.come:80"));
expected.add(new Server("www.example3.come:80"));
Set<Server> result = new HashSet<Server>();
for (int i = 0; i <= 10; i++) {
Server s = lb.chooseServer();
result.add(s);
}
assertEquals(expected, result);
}
}
| 7,200 |
0 | Create_ds/ribbon/ribbon-httpclient/src/test/java/com/netflix | Create_ds/ribbon/ribbon-httpclient/src/test/java/com/netflix/client/ManyShortLivedRequestsSurvivorTest.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client;
import com.google.mockwebserver.MockResponse;
import com.google.mockwebserver.MockWebServer;
import com.netflix.client.http.HttpRequest;
import com.netflix.client.http.HttpResponse;
import com.netflix.niws.client.http.RestClient;
import org.junit.Test;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import static com.netflix.config.ConfigurationManager.getConfigInstance;
public class ManyShortLivedRequestsSurvivorTest {
@Test
public void survive() throws IOException, ClientException, URISyntaxException, InterruptedException {
String clientName = "RibbonClientTest-loadBalancingDefaultPolicyRoundRobin";
String serverListKey = clientName + ".ribbon.listOfServers";
int nbHitsPerServer = 60;
MockWebServer server1 = new MockWebServer();
MockWebServer server2 = new MockWebServer();
for (int i = 0; i < nbHitsPerServer; i++) {
server1.enqueue(new MockResponse().setResponseCode(200).setBody("server1 success <" + i + ">!"));
server2.enqueue(new MockResponse().setResponseCode(200).setBody("server2 success <" + i + ">!"));
}
server1.play();
server2.play();
getConfigInstance().setProperty(serverListKey, hostAndPort(server1.getUrl("")) + "," + hostAndPort(server2.getUrl("")));
RestClient client = (RestClient) ClientFactory.getNamedClient(clientName);
HttpRequest request;
for (int i = 0; i < nbHitsPerServer * 2; i++) {
request = HttpRequest.newBuilder().uri(new URI("/")).build();
HttpResponse response = client.executeWithLoadBalancer(request);
response.close();
}
}
static String hostAndPort(URL url) {
return "localhost:" + url.getPort();
}
}
| 7,201 |
0 | Create_ds/ribbon/ribbon-httpclient/src/test/java/com/netflix/client | Create_ds/ribbon/ribbon-httpclient/src/test/java/com/netflix/client/samples/SampleApp.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client.samples;
import java.net.URI;
import org.junit.Ignore;
import com.netflix.client.ClientFactory;
import com.netflix.client.http.HttpRequest;
import com.netflix.client.http.HttpResponse;
import com.netflix.config.ConfigurationManager;
import com.netflix.loadbalancer.ZoneAwareLoadBalancer;
import com.netflix.niws.client.http.RestClient;
@Ignore
public class SampleApp {
public static void main(String[] args) throws Exception {
ConfigurationManager.loadPropertiesFromResources("sample-client.properties"); // 1
System.out.println(ConfigurationManager.getConfigInstance().getProperty("sample-client.ribbon.listOfServers"));
RestClient client = (RestClient) ClientFactory.getNamedClient("sample-client"); // 2
HttpRequest request = HttpRequest.newBuilder().uri(new URI("/")).build(); // 3
for (int i = 0; i < 20; i++) {
HttpResponse response = client.executeWithLoadBalancer(request); // 4
System.out.println("Status code for " + response.getRequestedURI() + " :" + response.getStatus());
}
ZoneAwareLoadBalancer lb = (ZoneAwareLoadBalancer) client.getLoadBalancer();
System.out.println(lb.getLoadBalancerStats());
ConfigurationManager.getConfigInstance().setProperty(
"sample-client.ribbon.listOfServers", "www.linkedin.com:80,www.google.com:80"); // 5
System.out.println("changing servers ...");
Thread.sleep(3000); // 6
for (int i = 0; i < 20; i++) {
HttpResponse response = client.executeWithLoadBalancer(request);
System.out.println("Status code for " + response.getRequestedURI() + " : " + response.getStatus());
}
System.out.println(lb.getLoadBalancerStats()); // 7
}
}
| 7,202 |
0 | Create_ds/ribbon/ribbon-httpclient/src/test/java/com/netflix/client | Create_ds/ribbon/ribbon-httpclient/src/test/java/com/netflix/client/testutil/SimpleSSLTestServer.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client.testutil;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.net.ServerSocket;
import java.net.Socket;
import java.nio.charset.Charset;
import java.security.KeyStore;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLServerSocket;
import javax.net.ssl.TrustManagerFactory;
import org.junit.Ignore;
import com.google.common.io.Closeables;
/**
*
* A simple SSL(TLS) server for which we can test against
* to ensure that the SSL connection can (or cannot) be established.
*
* @author jzarfoss
*
*/
@Ignore
public class SimpleSSLTestServer {
private static final String NL = System.getProperty("line.separator");
private static final String CANNED_RESPONSE =
"HTTP/1.0 200 OK" + NL +
"Content-Type: text/plain" + NL +
"Content-Length: 5" + NL + NL +
"hello" + NL;
private final ServerSocket ss;
@edu.umd.cs.findbugs.annotations.SuppressWarnings
public SimpleSSLTestServer(final File truststore, final String truststorePass,
final File keystore, final String keystorePass, final boolean clientAuthRequred) throws Exception{
KeyStore ks = KeyStore.getInstance("JKS");
ks.load(new FileInputStream(keystore), keystorePass.toCharArray());
KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
kmf.init(ks, keystorePass.toCharArray());
KeyStore ts = KeyStore.getInstance("JKS");
ts.load(new FileInputStream(truststore), keystorePass.toCharArray());
TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init(ts);
SSLContext sc = SSLContext.getInstance("TLS");
sc.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null);
ss = sc.getServerSocketFactory().createServerSocket(0);
((SSLServerSocket) ss).setNeedClientAuth(clientAuthRequred);
}
public void accept() throws Exception{
new Thread(){
@Override
public void run(){
Socket sock = null;
BufferedReader reader = null;
BufferedWriter writer = null;
try{
sock = ss.accept();
reader = new BufferedReader(new InputStreamReader(sock.getInputStream(), Charset.defaultCharset()));
writer = new BufferedWriter(new OutputStreamWriter(sock.getOutputStream(), Charset.defaultCharset()));
reader.readLine(); // we really don't care what the client says, he's getting the special regardless...
writer.write(CANNED_RESPONSE);
writer.flush();
}catch(Exception e){
e.printStackTrace();
}finally{
try{
Closeables.close(reader, true);
Closeables.close(writer, true);
sock.close();
}catch(Exception e){
e.printStackTrace();
}
}
}
}.start();
}
public void close() throws Exception{
ss.close();
}
public int getPort() {
return ss.getLocalPort();
}
}
| 7,203 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/niws/client | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/niws/client/http/HttpClientResponse.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.niws.client.http;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimap;
import com.google.common.reflect.TypeToken;
import com.netflix.client.ClientException;
import com.netflix.client.config.IClientConfig;
import com.netflix.client.http.HttpHeaders;
import com.netflix.client.http.HttpResponse;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.GenericType;
import javax.ws.rs.core.MultivaluedMap;
import java.io.InputStream;
import java.lang.reflect.Type;
import java.net.URI;
import java.util.AbstractMap;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
/**
* A NIWS Client Response
* (this version just wraps Jersey Client response)
* @author stonse
*
*/
class HttpClientResponse implements HttpResponse {
private final ClientResponse bcr;
private final Multimap<String, String> headers = ArrayListMultimap.<String, String>create();
private final HttpHeaders httpHeaders;
private final URI requestedURI;
private final IClientConfig overrideConfig;
public HttpClientResponse(ClientResponse cr, URI requestedURI, IClientConfig config){
bcr = cr;
this.requestedURI = requestedURI;
this.overrideConfig = config;
for (Map.Entry<String, List<String>> entry: bcr.getHeaders().entrySet()) {
if (entry.getKey() != null && entry.getValue() != null) {
headers.putAll(entry.getKey(), entry.getValue());
}
}
httpHeaders = new HttpHeaders() {
@Override
public String getFirstValue(String headerName) {
return bcr.getHeaders().getFirst(headerName);
}
@Override
public List<String> getAllValues(String headerName) {
return bcr.getHeaders().get(headerName);
}
@Override
public List<Entry<String, String>> getAllHeaders() {
MultivaluedMap<String, String> map = bcr.getHeaders();
List<Entry<String, String>> result = Lists.newArrayList();
for (Map.Entry<String, List<String>> header: map.entrySet()) {
String name = header.getKey();
for (String value: header.getValue()) {
result.add(new AbstractMap.SimpleEntry<String, String>(name, value));
}
}
return result;
}
@Override
public boolean containsHeader(String name) {
return bcr.getHeaders().containsKey(name);
}
};
}
/**
* Returns the raw entity if available from the response
* @return
* @throws IllegalArgumentException
*/
public InputStream getRawEntity() {
return bcr.getEntityInputStream();
}
public <T> T getEntity(Class<T> c) throws Exception {
return bcr.getEntity(c);
}
@Override
public Map<String, Collection<String>> getHeaders() {
return headers.asMap();
}
@Override
public int getStatus() {
return bcr.getStatus();
}
@Override
public boolean isSuccess() {
boolean isSuccess = false;
ClientResponse.Status s = bcr != null? bcr.getClientResponseStatus(): null;
isSuccess = s!=null? (s.getFamily() == javax.ws.rs.core.Response.Status.Family.SUCCESSFUL): false;
return isSuccess;
}
@Override
public boolean hasEntity() {
return bcr.hasEntity();
}
@Override
public URI getRequestedURI() {
return requestedURI;
}
@Override
public Object getPayload() throws ClientException {
if (hasEntity()) {
return getRawEntity();
} else {
return null;
}
}
@Override
public boolean hasPayload() {
return hasEntity();
}
public ClientResponse getJerseyClientResponse() {
return bcr;
}
@Override
public void close() {
bcr.close();
}
@Override
public InputStream getInputStream() {
return getRawEntity();
}
@Override
public String getStatusLine() {
return bcr.getClientResponseStatus().toString();
}
@Override
public HttpHeaders getHttpHeaders() {
return httpHeaders;
}
@Override
public <T> T getEntity(TypeToken<T> type) throws Exception {
return bcr.getEntity(new GenericType<T>(type.getType()));
}
@Override
public <T> T getEntity(Type type) throws Exception {
return bcr.getEntity(new GenericType<T>(type));
}
}
| 7,204 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/niws/client | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/niws/client/http/HttpClientRequest.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.niws.client.http;
import java.net.URI;
import javax.ws.rs.core.MultivaluedMap;
import com.netflix.client.ClientRequest;
import com.netflix.client.config.IClientConfig;
import com.netflix.client.http.HttpRequest;
/**
* @see HttpRequest
* @author awang
*
*/
@Deprecated
public class HttpClientRequest extends ClientRequest {
public enum Verb {
GET("GET"),
PUT("PUT"),
POST("POST"),
DELETE("DELETE"),
OPTIONS("OPTIONS"),
HEAD("HEAD");
private final String verb; // http method
Verb(String verb) {
this.verb = verb;
}
public String verb() {
return verb;
}
}
private MultivaluedMap<String, String> headers;
private MultivaluedMap<String, String> queryParams;
private Object entity;
private Verb verb;
private HttpClientRequest() {
this.verb = Verb.GET;
}
public static class Builder {
private HttpClientRequest request = new HttpClientRequest();
public Builder setUri(URI uri) {
request.setUri(uri);
return this;
}
public Builder setHeaders(MultivaluedMap<String, String> headers) {
request.headers = headers;
return this;
}
public Builder setOverrideConfig(IClientConfig config) {
request.setOverrideConfig(config);
return this;
}
public Builder setRetriable(boolean retriable) {
request.setRetriable(retriable);
return this;
}
public Builder setQueryParams(MultivaluedMap<String, String> queryParams) {
request.queryParams = queryParams;
return this;
}
public Builder setEntity(Object entity) {
request.entity = entity;
return this;
}
public Builder setVerb(Verb verb) {
request.verb = verb;
return this;
}
public Builder setLoadBalancerKey(Object loadBalancerKey) {
request.setLoadBalancerKey(loadBalancerKey);
return this;
}
public HttpClientRequest build() {
return request;
}
}
public MultivaluedMap<String, String> getQueryParams() {
return queryParams;
}
public Verb getVerb() {
return verb;
}
public MultivaluedMap<String, String> getHeaders() {
return headers;
}
public Object getEntity() {
return entity;
}
@Override
public boolean isRetriable() {
if (this.verb == Verb.GET && isRetriable == null) {
return true;
}
return super.isRetriable();
}
public static Builder newBuilder() {
return new Builder();
}
@Override
public HttpClientRequest replaceUri(URI newURI) {
return (new Builder()).setUri(newURI)
.setEntity(this.getEntity())
.setHeaders(this.getHeaders())
.setOverrideConfig(this.getOverrideConfig())
.setQueryParams(this.getQueryParams())
.setRetriable(this.isRetriable())
.setLoadBalancerKey(this.getLoadBalancerKey())
.setVerb(this.getVerb()).build();
}
}
| 7,205 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/niws/client | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/niws/client/http/HttpPrimeConnection.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.niws.client.http;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.client.IPrimeConnection;
import com.netflix.client.config.IClientConfig;
import com.netflix.http4.NFHttpClient;
import com.netflix.http4.NFHttpClientFactory;
import com.netflix.loadbalancer.Server;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.params.HttpConnectionParams;
/**
* An implementation of {@link IPrimeConnection} using Apache HttpClient.
*
* @author awang
*
*/
public class HttpPrimeConnection implements IPrimeConnection {
private static final Logger logger = LoggerFactory.getLogger(HttpPrimeConnection.class);
private NFHttpClient client;
public HttpPrimeConnection() {
}
@Override
public boolean connect(Server server, String primeConnectionsURIPath) throws Exception {
String url = "http://" + server.getHostPort() + primeConnectionsURIPath;
logger.debug("Trying URL: {}", url);
HttpUriRequest get = new HttpGet(url);
HttpResponse response = null;
try {
response = client.execute(get);
if (logger.isDebugEnabled() && response.getStatusLine() != null) {
logger.debug("Response code:" + response.getStatusLine().getStatusCode());
}
} finally {
get.abort();
}
return true;
}
@Override
public void initWithNiwsConfig(IClientConfig niwsClientConfig) {
client = NFHttpClientFactory.getNamedNFHttpClient(niwsClientConfig.getClientName() + "-PrimeConnsClient", false);
HttpConnectionParams.setConnectionTimeout(client.getParams(), 2000);
}
}
| 7,206 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/niws/client | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/niws/client/http/HttpClientLoadBalancerErrorHandler.java | package com.netflix.niws.client.http;
import java.net.ConnectException;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.util.List;
import org.apache.http.ConnectionClosedException;
import org.apache.http.NoHttpResponseException;
import org.apache.http.conn.ConnectTimeoutException;
import org.apache.http.conn.ConnectionPoolTimeoutException;
import org.apache.http.conn.HttpHostConnectException;
import com.google.common.collect.Lists;
import com.netflix.client.ClientException;
import com.netflix.client.DefaultLoadBalancerRetryHandler;
import com.netflix.client.config.IClientConfig;
import com.netflix.client.http.HttpResponse;
public class HttpClientLoadBalancerErrorHandler extends DefaultLoadBalancerRetryHandler {
@SuppressWarnings("unchecked")
protected List<Class<? extends Throwable>> retriable =
Lists.<Class<? extends Throwable>>newArrayList(ConnectException.class, SocketTimeoutException.class, ConnectTimeoutException.class,
NoHttpResponseException.class, ConnectionPoolTimeoutException.class, ConnectionClosedException.class, HttpHostConnectException.class);
@SuppressWarnings("unchecked")
protected List<Class<? extends Throwable>> circuitRelated =
Lists.<Class<? extends Throwable>>newArrayList(SocketException.class, SocketTimeoutException.class, ConnectTimeoutException.class,
ConnectionClosedException.class, HttpHostConnectException.class);
public HttpClientLoadBalancerErrorHandler() {
super();
}
public HttpClientLoadBalancerErrorHandler(IClientConfig clientConfig) {
super(clientConfig);
}
public HttpClientLoadBalancerErrorHandler(int retrySameServer,
int retryNextServer, boolean retryEnabled) {
super(retrySameServer, retryNextServer, retryEnabled);
}
/**
* @return true if the Throwable has one of the following exception type as a cause:
* {@link SocketException}, {@link SocketTimeoutException}
*/
@Override
public boolean isCircuitTrippingException(Throwable e) {
if (e instanceof ClientException) {
return ((ClientException) e).getErrorType() == ClientException.ErrorType.SERVER_THROTTLED;
}
return super.isCircuitTrippingException(e);
}
@Override
public boolean isRetriableException(Throwable e, boolean sameServer) {
if (e instanceof ClientException) {
ClientException ce = (ClientException) e;
if (ce.getErrorType() == ClientException.ErrorType.SERVER_THROTTLED) {
return !sameServer && retryEnabled;
}
}
return super.isRetriableException(e, sameServer);
}
@Override
protected List<Class<? extends Throwable>> getRetriableExceptions() {
return retriable;
}
@Override
protected List<Class<? extends Throwable>> getCircuitRelatedExceptions() {
return circuitRelated;
}
}
| 7,207 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/niws/client | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/niws/client/http/RestClient.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.niws.client.http;
import java.io.File;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLDecoder;
import java.security.KeyStore;
import java.util.Collection;
import java.util.Map;
import java.util.Optional;
import com.netflix.client.config.Property;
import org.apache.http.HttpHost;
import org.apache.http.client.HttpClient;
import org.apache.http.client.UserTokenHandler;
import org.apache.http.conn.ClientConnectionManager;
import org.apache.http.conn.params.ConnRouteParams;
import org.apache.http.conn.scheme.Scheme;
import org.apache.http.conn.scheme.SchemeRegistry;
import org.apache.http.conn.scheme.SchemeSocketFactory;
import org.apache.http.conn.ssl.SSLSocketFactory;
import org.apache.http.impl.client.AbstractHttpClient;
import org.apache.http.impl.client.BasicCookieStore;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.impl.conn.tsccm.ThreadSafeClientConnManager;
import org.apache.http.params.HttpConnectionParams;
import org.apache.http.params.HttpParams;
import org.apache.http.protocol.HttpContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.client.AbstractLoadBalancerAwareClient;
import com.netflix.client.ClientException;
import com.netflix.client.ClientFactory;
import com.netflix.client.RequestSpecificRetryHandler;
import com.netflix.client.config.CommonClientConfigKey;
import com.netflix.client.config.IClientConfig;
import com.netflix.client.config.IClientConfigKey;
import com.netflix.client.http.HttpRequest;
import com.netflix.client.http.HttpResponse;
import com.netflix.client.ssl.AbstractSslContextFactory;
import com.netflix.client.ssl.ClientSslSocketFactoryException;
import com.netflix.client.ssl.URLSslContextFactory;
import com.netflix.http4.NFHttpClient;
import com.netflix.http4.NFHttpClientFactory;
import com.netflix.http4.NFHttpMethodRetryHandler;
import com.netflix.http4.ssl.KeyStoreAwareSocketFactory;
import com.netflix.loadbalancer.BaseLoadBalancer;
import com.netflix.loadbalancer.ILoadBalancer;
import com.netflix.util.Pair;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.api.client.WebResource.Builder;
import com.sun.jersey.client.apache4.ApacheHttpClient4;
import com.sun.jersey.client.apache4.ApacheHttpClient4Handler;
import com.sun.jersey.client.apache4.config.ApacheHttpClient4Config;
import com.sun.jersey.client.apache4.config.DefaultApacheHttpClient4Config;
/**
* A client that is essentially a wrapper around Jersey client. By default, it uses HttpClient for underlying HTTP communication.
* Application can set its own Jersey client with this class, but doing so will void all client configurations set in {@link IClientConfig}.
*
* @deprecated Please see ribbon-rxnetty module for the Netty based client.
*
* @author awang
*
*/
@Deprecated
public class RestClient extends AbstractLoadBalancerAwareClient<HttpRequest, HttpResponse> {
private static IClientConfigKey<Integer> CONN_IDLE_EVICT_TIME_MILLIS = new CommonClientConfigKey<Integer>(
"%s.nfhttpclient.connIdleEvictTimeMilliSeconds") {};
private Client restClient;
private HttpClient httpClient4;
private IClientConfig ncc;
private String restClientName;
private boolean enableConnectionPoolCleanerTask = false;
private Property<Integer> connIdleEvictTimeMilliSeconds;
private int connectionCleanerRepeatInterval;
private int maxConnectionsperHost;
private int maxTotalConnections;
private int connectionTimeout;
private int readTimeout;
private String proxyHost;
private int proxyPort;
private boolean isSecure;
private boolean isHostnameValidationRequired;
private boolean isClientAuthRequired;
private boolean ignoreUserToken;
private ApacheHttpClient4Config config;
boolean bFollowRedirects = CommonClientConfigKey.FollowRedirects.defaultValue();
private static final Logger logger = LoggerFactory.getLogger(RestClient.class);
public RestClient() {
super(null);
}
public RestClient(ILoadBalancer lb) {
super(lb);
restClientName = "default";
}
public RestClient(ILoadBalancer lb, IClientConfig ncc) {
super(lb, ncc);
initWithNiwsConfig(ncc);
}
public RestClient(IClientConfig ncc) {
super(null, ncc);
initWithNiwsConfig(ncc);
}
public RestClient(ILoadBalancer lb, Client jerseyClient) {
super(lb);
this.restClient = jerseyClient;
this.setRetryHandler(new HttpClientLoadBalancerErrorHandler());
}
@Override
public void initWithNiwsConfig(IClientConfig clientConfig) {
super.initWithNiwsConfig(clientConfig);
this.ncc = clientConfig;
this.restClientName = ncc.getClientName();
this.isSecure = ncc.get(CommonClientConfigKey.IsSecure, this.isSecure);
this.isHostnameValidationRequired = ncc.get(CommonClientConfigKey.IsHostnameValidationRequired, this.isHostnameValidationRequired);
this.isClientAuthRequired = ncc.get(CommonClientConfigKey.IsClientAuthRequired, this.isClientAuthRequired);
this.bFollowRedirects = ncc.get(CommonClientConfigKey.FollowRedirects, true);
this.ignoreUserToken = ncc.get(CommonClientConfigKey.IgnoreUserTokenInConnectionPoolForSecureClient, this.ignoreUserToken);
this.config = new DefaultApacheHttpClient4Config();
this.config.getProperties().put(
ApacheHttpClient4Config.PROPERTY_CONNECT_TIMEOUT,
ncc.get(CommonClientConfigKey.ConnectTimeout));
this.config.getProperties().put(
ApacheHttpClient4Config.PROPERTY_READ_TIMEOUT,
ncc.get(CommonClientConfigKey.ReadTimeout));
this.restClient = apacheHttpClientSpecificInitialization();
this.setRetryHandler(new HttpClientLoadBalancerErrorHandler(ncc));
}
private void throwInvalidValue(IClientConfigKey<?> key, Exception e) {
throw new IllegalArgumentException("Invalid value for property:" + key, e);
}
protected Client apacheHttpClientSpecificInitialization() {
httpClient4 = NFHttpClientFactory.getNamedNFHttpClient(restClientName, this.ncc, true);
if (httpClient4 instanceof AbstractHttpClient) {
// DONT use our NFHttpClient's default Retry Handler since we have
// retry handling (same server/next server) in RestClient itself
((AbstractHttpClient) httpClient4).setHttpRequestRetryHandler(new NFHttpMethodRetryHandler(restClientName, 0, false, 0));
} else {
logger.warn("Unexpected error: Unable to disable NFHttpClient "
+ "retry handler, this most likely will not cause an "
+ "issue but probably should be looked at");
}
HttpParams httpClientParams = httpClient4.getParams();
// initialize Connection Manager cleanup facility
NFHttpClient nfHttpClient = (NFHttpClient) httpClient4;
// should we enable connection cleanup for idle connections?
try {
enableConnectionPoolCleanerTask = ncc.getOrDefault(CommonClientConfigKey.ConnectionPoolCleanerTaskEnabled);
nfHttpClient.getConnPoolCleaner().setEnableConnectionPoolCleanerTask(enableConnectionPoolCleanerTask);
} catch (Exception e1) {
throwInvalidValue(CommonClientConfigKey.ConnectionPoolCleanerTaskEnabled, e1);
}
if (enableConnectionPoolCleanerTask) {
try {
connectionCleanerRepeatInterval = ncc.getOrDefault(CommonClientConfigKey.ConnectionCleanerRepeatInterval);
nfHttpClient.getConnPoolCleaner().setConnectionCleanerRepeatInterval(connectionCleanerRepeatInterval);
} catch (Exception e1) {
throwInvalidValue(CommonClientConfigKey.ConnectionCleanerRepeatInterval, e1);
}
try {
connIdleEvictTimeMilliSeconds = ncc.getDynamicProperty(CommonClientConfigKey.ConnIdleEvictTimeMilliSeconds);
nfHttpClient.setConnIdleEvictTimeMilliSeconds(connIdleEvictTimeMilliSeconds);
} catch (Exception e1) {
throwInvalidValue(CommonClientConfigKey.ConnIdleEvictTimeMilliSeconds, e1);
}
nfHttpClient.initConnectionCleanerTask();
}
try {
maxConnectionsperHost = ncc.getOrDefault(CommonClientConfigKey.MaxHttpConnectionsPerHost);
ClientConnectionManager connMgr = httpClient4.getConnectionManager();
if (connMgr instanceof ThreadSafeClientConnManager) {
((ThreadSafeClientConnManager) connMgr)
.setDefaultMaxPerRoute(maxConnectionsperHost);
}
} catch (Exception e1) {
throwInvalidValue(CommonClientConfigKey.MaxHttpConnectionsPerHost, e1);
}
try {
maxTotalConnections = ncc.getOrDefault(CommonClientConfigKey.MaxTotalHttpConnections);
ClientConnectionManager connMgr = httpClient4.getConnectionManager();
if (connMgr instanceof ThreadSafeClientConnManager) {
((ThreadSafeClientConnManager) connMgr)
.setMaxTotal(maxTotalConnections);
}
} catch (Exception e1) {
throwInvalidValue(CommonClientConfigKey.MaxTotalHttpConnections, e1);
}
try {
connectionTimeout = ncc.getOrDefault(CommonClientConfigKey.ConnectTimeout);
HttpConnectionParams.setConnectionTimeout(httpClientParams,
connectionTimeout);
} catch (Exception e1) {
throwInvalidValue(CommonClientConfigKey.ConnectTimeout, e1);
}
try {
readTimeout = ncc.getOrDefault(CommonClientConfigKey.ReadTimeout);
HttpConnectionParams.setSoTimeout(httpClientParams, readTimeout);
} catch (Exception e1) {
throwInvalidValue(CommonClientConfigKey.ReadTimeout, e1);
}
// httpclient 4 seems to only have one buffer size controlling both
// send/receive - so let's take the bigger of the two values and use
// it as buffer size
int bufferSize = Integer.MIN_VALUE;
if (ncc.get(CommonClientConfigKey.ReceiveBufferSize) != null) {
try {
bufferSize = ncc.getOrDefault(CommonClientConfigKey.ReceiveBufferSize);
} catch (Exception e) {
throwInvalidValue(CommonClientConfigKey.ReceiveBufferSize, e);
}
if (ncc.get(CommonClientConfigKey.SendBufferSize) != null) {
try {
int sendBufferSize = ncc.getOrDefault(CommonClientConfigKey.SendBufferSize);
if (sendBufferSize > bufferSize) {
bufferSize = sendBufferSize;
}
} catch (Exception e) {
throwInvalidValue(CommonClientConfigKey.SendBufferSize,e);
}
}
}
if (bufferSize != Integer.MIN_VALUE) {
HttpConnectionParams.setSocketBufferSize(httpClientParams,
bufferSize);
}
if (ncc.get(CommonClientConfigKey.StaleCheckingEnabled) != null) {
try {
HttpConnectionParams.setStaleCheckingEnabled(
httpClientParams, ncc.getOrDefault(CommonClientConfigKey.StaleCheckingEnabled));
} catch (Exception e) {
throwInvalidValue(CommonClientConfigKey.StaleCheckingEnabled, e);
}
}
if (ncc.get(CommonClientConfigKey.Linger) != null) {
try {
HttpConnectionParams.setLinger(httpClientParams, ncc.getOrDefault(CommonClientConfigKey.Linger));
} catch (Exception e) {
throwInvalidValue(CommonClientConfigKey.Linger, e);
}
}
if (ncc.get(CommonClientConfigKey.ProxyHost) != null) {
try {
proxyHost = (String) ncc.getOrDefault(CommonClientConfigKey.ProxyHost);
proxyPort = ncc.getOrDefault(CommonClientConfigKey.ProxyPort);
HttpHost proxy = new HttpHost(proxyHost, proxyPort);
httpClient4.getParams()
.setParameter(ConnRouteParams.DEFAULT_PROXY, proxy);
} catch (Exception e) {
throwInvalidValue(CommonClientConfigKey.ProxyHost, e);
}
}
if (isSecure) {
final URL trustStoreUrl = getResourceForOptionalProperty(CommonClientConfigKey.TrustStore);
final URL keyStoreUrl = getResourceForOptionalProperty(CommonClientConfigKey.KeyStore);
final ClientConnectionManager currentManager = httpClient4.getConnectionManager();
AbstractSslContextFactory abstractFactory = null;
if ( // if client auth is required, need both a truststore and a keystore to warrant configuring
// if client is not is not required, we only need a keystore OR a truststore to warrant configuring
(isClientAuthRequired && (trustStoreUrl != null && keyStoreUrl != null))
|| (!isClientAuthRequired && (trustStoreUrl != null || keyStoreUrl != null))
) {
try {
abstractFactory = new URLSslContextFactory(trustStoreUrl,
ncc.get(CommonClientConfigKey.TrustStorePassword),
keyStoreUrl,
ncc.get(CommonClientConfigKey.KeyStorePassword));
} catch (ClientSslSocketFactoryException e) {
throw new IllegalArgumentException("Unable to configure custom secure socket factory", e);
}
}
KeyStoreAwareSocketFactory awareSocketFactory;
try {
awareSocketFactory = isHostnameValidationRequired ? new KeyStoreAwareSocketFactory(abstractFactory) :
new KeyStoreAwareSocketFactory(abstractFactory, SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
currentManager.getSchemeRegistry().register(new Scheme(
"https",443, awareSocketFactory));
} catch (Exception e) {
throw new IllegalArgumentException("Unable to configure custom secure socket factory", e);
}
}
// Warning that if user tokens are used (i.e. ignoreUserToken == false) this may be prevent SSL connections from being
// reused, which is generally not the intent for long-living proxy connections and the like.
// See http://hc.apache.org/httpcomponents-client-ga/tutorial/html/advanced.html
if (ignoreUserToken) {
((DefaultHttpClient) httpClient4).setUserTokenHandler(new UserTokenHandler() {
@Override
public Object getUserToken(HttpContext context) {
return null;
}
});
}
// custom SSL Factory handler
String customSSLFactoryClassName = ncc.get(CommonClientConfigKey.CustomSSLSocketFactoryClassName);
if (customSSLFactoryClassName != null){
try{
SSLSocketFactory customSocketFactory = (SSLSocketFactory) ClientFactory.instantiateInstanceWithClientConfig(customSSLFactoryClassName, ncc);
httpClient4.getConnectionManager().getSchemeRegistry().register(new Scheme(
"https",443, customSocketFactory));
} catch(Exception e){
throwInvalidValue(CommonClientConfigKey.CustomSSLSocketFactoryClassName, e);
}
}
ApacheHttpClient4Handler handler = new ApacheHttpClient4Handler(httpClient4, new BasicCookieStore(), false);
return new ApacheHttpClient4(handler, config);
}
public void resetSSLSocketFactory(AbstractSslContextFactory abstractContextFactory){
try {
KeyStoreAwareSocketFactory awareSocketFactory = isHostnameValidationRequired ? new KeyStoreAwareSocketFactory(abstractContextFactory) :
new KeyStoreAwareSocketFactory(abstractContextFactory, SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
httpClient4.getConnectionManager().getSchemeRegistry().register(new Scheme(
"https",443, awareSocketFactory));
} catch (Exception e) {
throw new IllegalArgumentException("Unable to configure custom secure socket factory", e);
}
}
public KeyStore getKeyStore(){
SchemeRegistry registry = httpClient4.getConnectionManager().getSchemeRegistry();
if(! registry.getSchemeNames().contains("https")){
throw new IllegalStateException("Registry does not include an 'https' entry.");
}
SchemeSocketFactory awareSocketFactory = httpClient4.getConnectionManager().getSchemeRegistry().getScheme("https").getSchemeSocketFactory();
if(awareSocketFactory instanceof KeyStoreAwareSocketFactory){
return ((KeyStoreAwareSocketFactory) awareSocketFactory).getKeyStore();
}else{
throw new IllegalStateException("Cannot extract keystore from scheme socket factory of type: " + awareSocketFactory.getClass().getName());
}
}
public static URL getResource(String resourceName)
{
URL url = null;
// attempt to load from the context classpath
ClassLoader loader = Thread.currentThread().getContextClassLoader();
if (loader != null) {
url = loader.getResource(resourceName);
}
if (url == null) {
// attempt to load from the system classpath
url = ClassLoader.getSystemResource(resourceName);
}
if (url == null) {
// attempt to load from the system classpath
url = RestClient.class.getResource(resourceName);
}
if (url == null) {
// attempt to load from the system classpath
url = RestClient.class.getClassLoader().getResource(resourceName);
}
if (url == null) {
try {
resourceName = URLDecoder.decode(resourceName, "UTF-8");
url = (new File(resourceName)).toURI().toURL();
} catch (Exception e) {
logger.error("Problem loading resource", e);
}
}
return url;
}
public Client getJerseyClient() {
return restClient;
}
public void setJerseyClient(Client c) {
restClient = c;
}
private URL getResourceForOptionalProperty(final IClientConfigKey configKey) {
final String propValue = (String) ncc.get(configKey);
URL result = null;
if (propValue != null) {
result = getResource(propValue);
if (result == null) {
throw new IllegalArgumentException("No resource found for " + configKey + ": "
+ propValue);
}
}
return result;
}
public HttpResponse execute(HttpRequest task) throws Exception {
return execute(task, null);
}
@Override
public HttpResponse execute(HttpRequest task, IClientConfig requestConfig) throws Exception {
IClientConfig config = (requestConfig == null) ? task.getOverrideConfig() : requestConfig;
return execute(task.getVerb(), task.getUri(),
task.getHeaders(), task.getQueryParams(), config, task.getEntity());
}
@Override
protected int getDefaultPortFromScheme(String scheme) {
int port = super.getDefaultPortFromScheme(scheme);
if (port < 0) {
return 80;
} else {
return port;
}
}
@Override
protected Pair<String, Integer> deriveSchemeAndPortFromPartialUri(URI uri) {
boolean isSecure = ncc.get(CommonClientConfigKey.IsSecure, this.isSecure);
String scheme = uri.getScheme();
if (scheme != null) {
isSecure = scheme.equalsIgnoreCase("https");
}
int port = uri.getPort();
if (port < 0 && !isSecure){
port = 80;
} else if (port < 0 && isSecure){
port = 443;
}
if (scheme == null){
if (isSecure) {
scheme = "https";
} else {
scheme = "http";
}
}
return new Pair<>(scheme, port);
}
private HttpResponse execute(HttpRequest.Verb verb, URI uri,
Map<String, Collection<String>> headers, Map<String, Collection<String>> params,
IClientConfig overriddenClientConfig, Object requestEntity) throws Exception {
HttpClientResponse thisResponse = null;
final boolean bbFollowRedirects = Optional.ofNullable(overriddenClientConfig)
.flatMap(config -> config.getIfSet(CommonClientConfigKey.FollowRedirects))
.orElse(bFollowRedirects);
restClient.setFollowRedirects(bbFollowRedirects);
if (logger.isDebugEnabled()) {
logger.debug("RestClient sending new Request(" + verb
+ ": ) " + uri);
}
WebResource xResource = restClient.resource(uri.toString());
if (params != null) {
for (Map.Entry<String, Collection<String>> entry: params.entrySet()) {
String name = entry.getKey();
for (String value: entry.getValue()) {
xResource = xResource.queryParam(name, value);
}
}
}
ClientResponse jerseyResponse;
Builder b = xResource.getRequestBuilder();
if (headers != null) {
for (Map.Entry<String, Collection<String>> entry: headers.entrySet()) {
String name = entry.getKey();
for (String value: entry.getValue()) {
b = b.header(name, value);
}
}
}
Object entity = requestEntity;
switch (verb) {
case GET:
jerseyResponse = b.get(ClientResponse.class);
break;
case POST:
jerseyResponse = b.post(ClientResponse.class, entity);
break;
case PUT:
jerseyResponse = b.put(ClientResponse.class, entity);
break;
case DELETE:
jerseyResponse = b.delete(ClientResponse.class);
break;
case HEAD:
jerseyResponse = b.head();
break;
case OPTIONS:
jerseyResponse = b.options(ClientResponse.class);
break;
default:
throw new ClientException(
ClientException.ErrorType.GENERAL,
"You have to one of the REST verbs such as GET, POST etc.");
}
thisResponse = new HttpClientResponse(jerseyResponse, uri, overriddenClientConfig);
if (thisResponse.getStatus() == 503){
thisResponse.close();
throw new ClientException(ClientException.ErrorType.SERVER_THROTTLED);
}
return thisResponse;
}
@Override
protected boolean isRetriableException(Throwable e) {
if (e instanceof ClientException
&& ((ClientException)e).getErrorType() == ClientException.ErrorType.SERVER_THROTTLED){
return false;
}
boolean shouldRetry = isConnectException(e) || isSocketException(e);
return shouldRetry;
}
@Override
protected boolean isCircuitBreakerException(Throwable e) {
if (e instanceof ClientException) {
ClientException clientException = (ClientException) e;
if (clientException.getErrorType() == ClientException.ErrorType.SERVER_THROTTLED) {
return true;
}
}
return isConnectException(e) || isSocketException(e);
}
private static boolean isSocketException(Throwable e) {
int levelCount = 0;
while (e != null && levelCount < 10) {
if ((e instanceof SocketException) || (e instanceof SocketTimeoutException)) {
return true;
}
e = e.getCause();
levelCount++;
}
return false;
}
private static boolean isConnectException(Throwable e) {
int levelCount = 0;
while (e != null && levelCount < 10) {
if ((e instanceof SocketException)
|| ((e instanceof org.apache.http.conn.ConnectTimeoutException)
&& !(e instanceof org.apache.http.conn.ConnectionPoolTimeoutException))) {
return true;
}
e = e.getCause();
levelCount++;
}
return false;
}
@Override
protected Pair<String, Integer> deriveHostAndPortFromVipAddress(String vipAddress)
throws URISyntaxException, ClientException {
if (!vipAddress.contains("http")) {
vipAddress = "http://" + vipAddress;
}
return super.deriveHostAndPortFromVipAddress(vipAddress);
}
@Override
public RequestSpecificRetryHandler getRequestSpecificRetryHandler(
HttpRequest request, IClientConfig requestConfig) {
if (!request.isRetriable()) {
return new RequestSpecificRetryHandler(false, false, this.getRetryHandler(), requestConfig);
}
if (this.ncc.get(CommonClientConfigKey.OkToRetryOnAllOperations, false)) {
return new RequestSpecificRetryHandler(true, true, this.getRetryHandler(), requestConfig);
}
if (request.getVerb() != HttpRequest.Verb.GET) {
return new RequestSpecificRetryHandler(true, false, this.getRetryHandler(), requestConfig);
} else {
return new RequestSpecificRetryHandler(true, true, this.getRetryHandler(), requestConfig);
}
}
public void shutdown() {
ILoadBalancer lb = this.getLoadBalancer();
if (lb instanceof BaseLoadBalancer) {
((BaseLoadBalancer) lb).shutdown();
}
NFHttpClientFactory.shutdownNFHttpClient(restClientName);
}
}
| 7,208 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/loadbalancer/PingUrl.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.loadbalancer;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
/**
* Ping implementation if you want to do a "health check" kind of Ping. This
* will be a "real" ping. As in a real http/s call is made to this url e.g.
* http://ec2-75-101-231-85.compute-1.amazonaws.com:7101/cs/hostRunning
*
* Some services/clients choose PingDiscovery - which is quick but is not a real
* ping. i.e It just asks discovery (eureka) in-memory cache if the server is present
* in its Roster PingUrl on the other hand, makes an actual call. This is more
* expensive - but its the "standard" way most VIPs and other services perform
* HealthChecks.
*
* Choose your Ping based on your needs.
*
* @author stonse
*
*/
public class PingUrl implements IPing {
private static final Logger LOGGER = LoggerFactory.getLogger(PingUrl.class);
String pingAppendString = "";
boolean isSecure = false;
String expectedContent = null;
/*
*
* Send one ping only.
*
* Well, send what you need to determine whether or not the
* server is still alive. Should return within a "reasonable"
* time.
*/
public PingUrl() {
}
public PingUrl(boolean isSecure, String pingAppendString) {
this.isSecure = isSecure;
this.pingAppendString = (pingAppendString != null) ? pingAppendString : "";
}
public void setPingAppendString(String pingAppendString) {
this.pingAppendString = (pingAppendString != null) ? pingAppendString : "";
}
public String getPingAppendString() {
return pingAppendString;
}
public boolean isSecure() {
return isSecure;
}
/**
* Should the Secure protocol be used to Ping
* @param isSecure
*/
public void setSecure(boolean isSecure) {
this.isSecure = isSecure;
}
public String getExpectedContent() {
return expectedContent;
}
/**
* Is there a particular content you are hoping to see?
* If so -set this here.
* for e.g. the WCS server sets the content body to be 'true'
* Please be advised that this content should match the actual
* content exactly for this to work. Else yo may get false status.
* @param expectedContent
*/
public void setExpectedContent(String expectedContent) {
this.expectedContent = expectedContent;
}
public boolean isAlive(Server server) {
String urlStr = "";
if (isSecure){
urlStr = "https://";
}else{
urlStr = "http://";
}
urlStr += server.getId();
urlStr += getPingAppendString();
boolean isAlive = false;
HttpClient httpClient = new DefaultHttpClient();
HttpUriRequest getRequest = new HttpGet(urlStr);
String content=null;
try {
HttpResponse response = httpClient.execute(getRequest);
content = EntityUtils.toString(response.getEntity());
isAlive = (response.getStatusLine().getStatusCode() == 200);
if (getExpectedContent()!=null){
LOGGER.debug("content:" + content);
if (content == null){
isAlive = false;
}else{
if (content.equals(getExpectedContent())){
isAlive = true;
}else{
isAlive = false;
}
}
}
} catch (IOException e) {
e.printStackTrace();
}finally{
// Release the connection.
getRequest.abort();
}
return isAlive;
}
public static void main(String[] args){
PingUrl p = new PingUrl(false,"/cs/hostRunning");
p.setExpectedContent("true");
Server s = new Server("ec2-75-101-231-85.compute-1.amazonaws.com", 7101);
boolean isAlive = p.isAlive(s);
System.out.println("isAlive:" + isAlive);
}
}
| 7,209 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/http4/MonitoredConnectionManager.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.http4;
import java.util.concurrent.TimeUnit;
import org.apache.http.conn.ClientConnectionRequest;
import org.apache.http.conn.routing.HttpRoute;
import org.apache.http.conn.scheme.SchemeRegistry;
import org.apache.http.impl.conn.tsccm.AbstractConnPool;
import org.apache.http.impl.conn.tsccm.ConnPoolByRoute;
import org.apache.http.impl.conn.tsccm.ThreadSafeClientConnManager;
import org.apache.http.params.HttpParams;
import com.google.common.annotations.VisibleForTesting;
/**
* A connection manager that uses {@link NamedConnectionPool}, which provides
* connection reuse statistics, as its underlying connection pool.
*
* @author awang
*
*/
public class MonitoredConnectionManager extends ThreadSafeClientConnManager {
public MonitoredConnectionManager(String name) {
super();
initMonitors(name);
}
public MonitoredConnectionManager(String name, SchemeRegistry schreg, long connTTL,
TimeUnit connTTLTimeUnit) {
super(schreg, connTTL, connTTLTimeUnit);
initMonitors(name);
}
public MonitoredConnectionManager(String name, SchemeRegistry schreg) {
super(schreg);
initMonitors(name);
}
void initMonitors(String name) {
if (this.pool instanceof NamedConnectionPool) {
((NamedConnectionPool) this.pool).initMonitors(name);
}
}
@Override
@Deprecated
protected AbstractConnPool createConnectionPool(HttpParams params) {
return new NamedConnectionPool(connOperator, params);
}
@Override
protected ConnPoolByRoute createConnectionPool(long connTTL,
TimeUnit connTTLTimeUnit) {
return new NamedConnectionPool(connOperator, connPerRoute, 20, connTTL, connTTLTimeUnit);
}
@VisibleForTesting
ConnPoolByRoute getConnectionPool() {
return this.pool;
}
@Override
public ClientConnectionRequest requestConnection(HttpRoute route,
Object state) {
// TODO Auto-generated method stub
return super.requestConnection(route, state);
}
}
| 7,210 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/http4/NFHttpMethodRetryHandler.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.http4;
/**
* A simple netflix specific extension class for {@link org.apache.commons.httpclient.DefaultHttpMethodRetryHandler}.
*
* Provides a configurable override for the number of retries. Also waits for a configurable time before retry.
*/
import java.io.IOException;
import org.apache.http.HttpRequest;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.impl.client.DefaultHttpRequestRetryHandler;
import org.apache.http.protocol.ExecutionContext;
import org.apache.http.protocol.HttpContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.servo.monitor.DynamicCounter;
public class NFHttpMethodRetryHandler extends DefaultHttpRequestRetryHandler {
private static final String RETRY_COUNTER = "PLATFORM:NFttpClient:Retries:";
private Logger logger = LoggerFactory.getLogger(NFHttpMethodRetryHandler.class);
private int sleepTimeFactorMs;
private String httpClientName;
/**
* Creates a new NFHttpMethodRetryHandler.
* @param httpClientName - the name of the nfhttpclient
* @param retryCount the number of times a method will be retried
* @param requestSentRetryEnabled if true, methods that have successfully sent their request will be retried
* @param sleepTimeFactorMs number of milliseconds to sleep before the next try. This factor is used along with execution count
* to determine the sleep time (ie) executionCount * sleepTimeFactorMs
*/
public NFHttpMethodRetryHandler(String httpClientName, int retryCount, boolean requestSentRetryEnabled, int sleepTimeFactorMs) {
super(retryCount, requestSentRetryEnabled);
this.httpClientName = httpClientName;
this.sleepTimeFactorMs = sleepTimeFactorMs;
}
@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "ICAST_INTEGER_MULTIPLY_CAST_TO_LONG")
public boolean retryRequest(
final IOException exception,
int executionCount,
HttpContext context
) {
if (super.retryRequest(exception, executionCount, context)) {
HttpRequest request = (HttpRequest)
context.getAttribute(ExecutionContext.HTTP_REQUEST);
String methodName = request.getRequestLine().getMethod();
String path = "UNKNOWN_PATH";
if(request instanceof HttpUriRequest) {
HttpUriRequest uriReq = (HttpUriRequest) request;
path = uriReq.getURI().toString();
}
try {
Thread.sleep(executionCount * this.sleepTimeFactorMs);
}
catch (InterruptedException e) {
logger.warn("Interrupted while sleep before retrying http method " + methodName + " " + path, e);
}
DynamicCounter.increment(RETRY_COUNTER + methodName + ":" + path);
return true;
}
return false;
}
}
| 7,211 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/http4/NamedConnectionPool.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.http4;
import java.util.concurrent.TimeUnit;
import org.apache.http.conn.ClientConnectionOperator;
import org.apache.http.conn.ConnectionPoolTimeoutException;
import org.apache.http.conn.params.ConnPerRoute;
import org.apache.http.conn.routing.HttpRoute;
import org.apache.http.impl.conn.tsccm.BasicPoolEntry;
import org.apache.http.impl.conn.tsccm.ConnPoolByRoute;
import org.apache.http.impl.conn.tsccm.PoolEntryRequest;
import org.apache.http.impl.conn.tsccm.RouteSpecificPool;
import org.apache.http.impl.conn.tsccm.WaitingThreadAborter;
import org.apache.http.params.HttpParams;
import com.google.common.base.Preconditions;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Counter;
import com.netflix.servo.monitor.Monitors;
import com.netflix.servo.monitor.Stopwatch;
import com.netflix.servo.monitor.Timer;
/**
* A connection pool that provides Servo counters to monitor the efficiency.
* Three counters are provided: counter for getting free entries (or reusing entries),
* counter for creating new entries, and counter for every connection request.
*
* @author awang
*
*/
public class NamedConnectionPool extends ConnPoolByRoute {
private Counter freeEntryCounter;
private Counter createEntryCounter;
private Counter requestCounter;
private Counter releaseCounter;
private Counter deleteCounter;
private Timer requestTimer;
private Timer creationTimer;
private String name;
public NamedConnectionPool(String name, ClientConnectionOperator operator,
ConnPerRoute connPerRoute, int maxTotalConnections, long connTTL,
TimeUnit connTTLTimeUnit) {
super(operator, connPerRoute, maxTotalConnections, connTTL, connTTLTimeUnit);
initMonitors(name);
}
public NamedConnectionPool(String name, ClientConnectionOperator operator,
ConnPerRoute connPerRoute, int maxTotalConnections) {
super(operator, connPerRoute, maxTotalConnections);
initMonitors(name);
}
public NamedConnectionPool(String name, ClientConnectionOperator operator,
HttpParams params) {
super(operator, params);
initMonitors(name);
}
NamedConnectionPool(ClientConnectionOperator operator,
ConnPerRoute connPerRoute, int maxTotalConnections, long connTTL,
TimeUnit connTTLTimeUnit) {
super(operator, connPerRoute, maxTotalConnections, connTTL, connTTLTimeUnit);
}
NamedConnectionPool(ClientConnectionOperator operator,
ConnPerRoute connPerRoute, int maxTotalConnections) {
super(operator, connPerRoute, maxTotalConnections);
}
NamedConnectionPool(ClientConnectionOperator operator,
HttpParams params) {
super(operator, params);
}
void initMonitors(String name) {
Preconditions.checkNotNull(name);
freeEntryCounter = Monitors.newCounter(name + "_Reuse");
createEntryCounter = Monitors.newCounter(name + "_CreateNew");
requestCounter = Monitors.newCounter(name + "_Request");
releaseCounter = Monitors.newCounter(name + "_Release");
deleteCounter = Monitors.newCounter(name + "_Delete");
requestTimer = Monitors.newTimer(name + "_RequestConnectionTimer", TimeUnit.MILLISECONDS);
creationTimer = Monitors.newTimer(name + "_CreateConnectionTimer", TimeUnit.MILLISECONDS);
this.name = name;
Monitors.registerObject(name, this);
}
@Override
public PoolEntryRequest requestPoolEntry(HttpRoute route, Object state) {
requestCounter.increment();
return super.requestPoolEntry(route, state);
}
@Override
protected BasicPoolEntry getFreeEntry(RouteSpecificPool rospl, Object state) {
BasicPoolEntry entry = super.getFreeEntry(rospl, state);
if (entry != null) {
freeEntryCounter.increment();
}
return entry;
}
@Override
protected BasicPoolEntry createEntry(RouteSpecificPool rospl,
ClientConnectionOperator op) {
createEntryCounter.increment();
Stopwatch stopWatch = creationTimer.start();
try {
return super.createEntry(rospl, op);
} finally {
stopWatch.stop();
}
}
@Override
protected BasicPoolEntry getEntryBlocking(HttpRoute route, Object state,
long timeout, TimeUnit tunit, WaitingThreadAborter aborter)
throws ConnectionPoolTimeoutException, InterruptedException {
Stopwatch stopWatch = requestTimer.start();
try {
return super.getEntryBlocking(route, state, timeout, tunit, aborter);
} finally {
stopWatch.stop();
}
}
@Override
public void freeEntry(BasicPoolEntry entry, boolean reusable,
long validDuration, TimeUnit timeUnit) {
releaseCounter.increment();
super.freeEntry(entry, reusable, validDuration, timeUnit);
}
@Override
protected void deleteEntry(BasicPoolEntry entry) {
deleteCounter.increment();
super.deleteEntry(entry);
}
public final long getFreeEntryCount() {
return freeEntryCounter.getValue().longValue();
}
public final long getCreatedEntryCount() {
return createEntryCounter.getValue().longValue();
}
public final long getRequestsCount() {
return requestCounter.getValue().longValue();
}
public final long getReleaseCount() {
return releaseCounter.getValue().longValue();
}
public final long getDeleteCount() {
return deleteCounter.getValue().longValue();
}
@Monitor(name="connectionCount", type=DataSourceType.GAUGE)
public int getConnectionCount() {
return this.getConnectionsInPool();
}
@Override
public void shutdown() {
super.shutdown();
Monitors.unregisterObject(name, this);
}
}
| 7,212 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/http4/NFHttpClientFactory.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.http4;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import com.netflix.client.config.ClientConfigFactory;
import org.apache.commons.collections.keyvalue.MultiKey;
import com.netflix.client.config.IClientConfig;
import com.netflix.servo.monitor.Monitors;
/**
* Factory class to get an instance of NFHttpClient
* @author stonse
*
*/
public class NFHttpClientFactory {
private static Map<MultiKey,NFHttpClient> clientMap = new ConcurrentHashMap<MultiKey,NFHttpClient>();
private static Map<String,NFHttpClient> namedClientMap = new ConcurrentHashMap<String,NFHttpClient>();
private static NFHttpClient defaultClient = new NFHttpClient();
private static Object lock = new Object();
public static NFHttpClient getNFHttpClient(String host, int port){
MultiKey mk = new MultiKey(host,port);
NFHttpClient client = clientMap.get(mk);
if (client == null){
client = new NFHttpClient(host, port);
clientMap.put(mk,client);
}
return client;
}
public static NFHttpClient getNamedNFHttpClient(String name) {
IClientConfig config = ClientConfigFactory.DEFAULT.newConfig();
config.loadProperties(name);
return getNamedNFHttpClient(name, config, true);
}
public static NFHttpClient getNamedNFHttpClient(String name, IClientConfig config) {
return getNamedNFHttpClient(name, config, true);
}
public static NFHttpClient getNamedNFHttpClient(String name, boolean registerMonitor) {
IClientConfig config = ClientConfigFactory.DEFAULT.newConfig();
config.loadProperties(name);
return getNamedNFHttpClient(name, config, registerMonitor);
}
public static NFHttpClient getNamedNFHttpClient(String name, IClientConfig config, boolean registerMonitor) {
NFHttpClient client = namedClientMap.get(name);
//avoid creating multiple HttpClient instances
if (client == null){
synchronized (lock) {
client = namedClientMap.get(name);
if (client == null){
client = new NFHttpClient(name, config, registerMonitor);
namedClientMap.put(name,client);
}
}
}
return client;
}
public static NFHttpClient getDefaultClient() {
return defaultClient;
}
public static void setDefaultClient(NFHttpClient defaultClient) {
NFHttpClientFactory.defaultClient = defaultClient;
}
public static void shutdownNFHttpClient(String name) {
NFHttpClient c = namedClientMap.get(name);
if (c != null) {
c.shutdown();
namedClientMap.remove(name);
Monitors.unregisterObject(name, c);
}
}
}
| 7,213 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/http4/ConnectionPoolCleaner.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.http4;
import com.netflix.client.config.Property;
import org.apache.http.conn.ClientConnectionManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
/**
* Class that is responsible to cleanup connections based on a policy
* For e.g. evict all connections from the pool that have been idle for more than x msecs
* @author stonse
*
*/
public class ConnectionPoolCleaner {
private static final Logger logger = LoggerFactory.getLogger(ConnectionPoolCleaner.class);
String name = "default";
ClientConnectionManager connMgr;
ScheduledExecutorService scheduler;
private Property<Integer> connIdleEvictTimeMilliSeconds = Property.of(30*1000);
volatile boolean enableConnectionPoolCleanerTask = false;
long connectionCleanerTimerDelay = 10;
long connectionCleanerRepeatInterval = 30*1000;
private volatile ScheduledFuture<?> scheduledFuture;
public ConnectionPoolCleaner(String name, ClientConnectionManager connMgr, ScheduledExecutorService scheduler){
this.name = name;
this.connMgr = connMgr;
this.scheduler = scheduler;
}
public Property<Integer> getConnIdleEvictTimeMilliSeconds() {
return connIdleEvictTimeMilliSeconds;
}
public void setConnIdleEvictTimeMilliSeconds(Property<Integer> connIdleEvictTimeMilliSeconds) {
this.connIdleEvictTimeMilliSeconds = connIdleEvictTimeMilliSeconds;
}
public boolean isEnableConnectionPoolCleanerTask() {
return enableConnectionPoolCleanerTask;
}
public void setEnableConnectionPoolCleanerTask(
boolean enableConnectionPoolCleanerTask) {
this.enableConnectionPoolCleanerTask = enableConnectionPoolCleanerTask;
}
public long getConnectionCleanerTimerDelay() {
return connectionCleanerTimerDelay;
}
public void setConnectionCleanerTimerDelay(long connectionCleanerTimerDelay) {
this.connectionCleanerTimerDelay = connectionCleanerTimerDelay;
}
public long getConnectionCleanerRepeatInterval() {
return connectionCleanerRepeatInterval;
}
public void setConnectionCleanerRepeatInterval(
long connectionCleanerRepeatInterval) {
this.connectionCleanerRepeatInterval = connectionCleanerRepeatInterval;
}
public void initTask(){
if (enableConnectionPoolCleanerTask) {
scheduledFuture = scheduler.scheduleWithFixedDelay(new Runnable() {
public void run() {
try {
if (enableConnectionPoolCleanerTask) {
logger.debug("Connection pool clean up started for client {}", name);
cleanupConnections();
} else if (scheduledFuture != null) {
scheduledFuture.cancel(true);
}
} catch (Throwable e) {
logger.error("Exception in ConnectionPoolCleanerThread",e);
}
}
}, connectionCleanerTimerDelay, connectionCleanerRepeatInterval, TimeUnit.MILLISECONDS);
logger.info("Initializing ConnectionPoolCleaner for NFHttpClient:" + name);
}
}
void cleanupConnections(){
connMgr.closeExpiredConnections();
connMgr.closeIdleConnections(connIdleEvictTimeMilliSeconds.getOrDefault(), TimeUnit.MILLISECONDS);
}
public void shutdown() {
enableConnectionPoolCleanerTask = false;
if (scheduledFuture != null) {
scheduledFuture.cancel(true);
}
}
public String toString(){
StringBuilder sb = new StringBuilder();
sb.append("ConnectionPoolCleaner:" + name);
sb.append(", connIdleEvictTimeMilliSeconds:" + connIdleEvictTimeMilliSeconds.get());
sb.append(", connectionCleanerTimerDelay:" + connectionCleanerTimerDelay);
sb.append(", connectionCleanerRepeatInterval:" + connectionCleanerRepeatInterval);
return sb.toString();
}
}
| 7,214 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/http4/NFHttpClient.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.http4;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.client.config.ClientConfigFactory;
import com.netflix.client.config.CommonClientConfigKey;
import com.netflix.client.config.IClientConfig;
import com.netflix.client.config.IClientConfigKey;
import com.netflix.client.config.Property;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import com.netflix.servo.monitor.Stopwatch;
import com.netflix.servo.monitor.Timer;
import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.HttpRequest;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.ResponseHandler;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.params.ClientPNames;
import org.apache.http.client.params.HttpClientParams;
import org.apache.http.client.utils.URIUtils;
import org.apache.http.conn.ClientConnectionManager;
import org.apache.http.conn.routing.HttpRoute;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.impl.conn.tsccm.ThreadSafeClientConnManager;
import org.apache.http.message.BasicHeader;
import org.apache.http.params.HttpParams;
import org.apache.http.params.HttpProtocolParams;
import org.apache.http.protocol.HttpContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Netflix extension of Apache 4.0 HttpClient
* Just so we can wrap around some features.
*
* @author stonse
*
*/
public class NFHttpClient extends DefaultHttpClient {
private static final Logger LOGGER = LoggerFactory.getLogger(NFHttpClient.class);
private static IClientConfigKey<Integer> RETRIES = new CommonClientConfigKey<Integer>("%s.nfhttpclient.retries", 3) {};
private static IClientConfigKey<Integer> SLEEP_TIME_FACTOR_MS = new CommonClientConfigKey<Integer>("%s.nfhttpclient.sleepTimeFactorMs", 10) {};
private static IClientConfigKey<Integer> CONN_IDLE_EVICT_TIME_MILLIS = new CommonClientConfigKey<Integer>("%s.nfhttpclient.connIdleEvictTimeMilliSeconds", 30*1000) {};
protected static final String EXECUTE_TRACER = "HttpClient-ExecuteTimer";
private static ScheduledExecutorService connectionPoolCleanUpScheduler;
private HttpHost httpHost = null;
private HttpRoute httpRoute = null;
private static AtomicInteger numNonNamedHttpClients = new AtomicInteger();
private final String name;
ConnectionPoolCleaner connPoolCleaner;
Property<Integer> connIdleEvictTimeMilliSeconds;
private Property<Integer> retriesProperty;
private Property<Integer> sleepTimeFactorMsProperty;
private Timer tracer;
private Property<Integer> maxTotalConnectionProperty;
private Property<Integer> maxConnectionPerHostProperty;
static {
ThreadFactory factory = (new ThreadFactoryBuilder()).setDaemon(true)
.setNameFormat("Connection pool clean up thread")
.build();
connectionPoolCleanUpScheduler = Executors.newScheduledThreadPool(2, factory);
}
protected NFHttpClient(String host, int port){
super(new ThreadSafeClientConnManager());
this.name = "UNNAMED_" + numNonNamedHttpClients.incrementAndGet();
httpHost = new HttpHost(host, port);
httpRoute = new HttpRoute(httpHost);
init(createDefaultConfig(), false);
}
protected NFHttpClient(){
super(new ThreadSafeClientConnManager());
this.name = "UNNAMED_" + numNonNamedHttpClients.incrementAndGet();
init(createDefaultConfig(), false);
}
private static IClientConfig createDefaultConfig() {
IClientConfig config = ClientConfigFactory.DEFAULT.newConfig();
config.loadProperties("default");
return config;
}
protected NFHttpClient(String name) {
this(name, createDefaultConfig(), true);
}
protected NFHttpClient(String name, IClientConfig config) {
this(name, config, true);
}
protected NFHttpClient(String name, IClientConfig config, boolean registerMonitor) {
super(new MonitoredConnectionManager(name));
this.name = name;
init(config, registerMonitor);
}
void init(IClientConfig config, boolean registerMonitor) {
HttpParams params = getParams();
HttpProtocolParams.setContentCharset(params, "UTF-8");
params.setParameter(ClientPNames.CONNECTION_MANAGER_FACTORY_CLASS_NAME,
ThreadSafeClientConnManager.class.getName());
HttpClientParams.setRedirecting(params, config.get(CommonClientConfigKey.FollowRedirects, true));
// set up default headers
List<Header> defaultHeaders = new ArrayList<Header>();
defaultHeaders.add(new BasicHeader("Netflix.NFHttpClient.Version", "1.0"));
defaultHeaders.add(new BasicHeader("X-netflix-httpclientname", name));
params.setParameter(ClientPNames.DEFAULT_HEADERS, defaultHeaders);
connPoolCleaner = new ConnectionPoolCleaner(name, this.getConnectionManager(), connectionPoolCleanUpScheduler);
this.retriesProperty = config.getGlobalProperty(RETRIES.format(name));
this.sleepTimeFactorMsProperty = config.getGlobalProperty(SLEEP_TIME_FACTOR_MS.format(name));
setHttpRequestRetryHandler(
new NFHttpMethodRetryHandler(this.name, this.retriesProperty.getOrDefault(), false,
this.sleepTimeFactorMsProperty.getOrDefault()));
tracer = Monitors.newTimer(EXECUTE_TRACER + "-" + name, TimeUnit.MILLISECONDS);
if (registerMonitor) {
Monitors.registerObject(name, this);
}
maxTotalConnectionProperty = config.getDynamicProperty(CommonClientConfigKey.MaxTotalHttpConnections);
maxTotalConnectionProperty.onChange(newValue ->
((ThreadSafeClientConnManager) getConnectionManager()).setMaxTotal(newValue)
);
maxConnectionPerHostProperty = config.getDynamicProperty(CommonClientConfigKey.MaxHttpConnectionsPerHost);
maxConnectionPerHostProperty.onChange(newValue ->
((ThreadSafeClientConnManager) getConnectionManager()).setDefaultMaxPerRoute(newValue)
);
connIdleEvictTimeMilliSeconds = config.getGlobalProperty(CONN_IDLE_EVICT_TIME_MILLIS.format(name));
}
public void initConnectionCleanerTask(){
//set the Properties
connPoolCleaner.setConnIdleEvictTimeMilliSeconds(getConnIdleEvictTimeMilliSeconds());// set FastProperty reference
// for this named httpclient - so we can override it later if we want to
//init the Timer Task
//note that we can change the idletime settings after the start of the Thread
connPoolCleaner.initTask();
}
@Monitor(name = "HttpClient-ConnPoolCleaner", type = DataSourceType.INFORMATIONAL)
public ConnectionPoolCleaner getConnPoolCleaner() {
return connPoolCleaner;
}
@Monitor(name = "HttpClient-ConnIdleEvictTimeMilliSeconds", type = DataSourceType.INFORMATIONAL)
public Property<Integer> getConnIdleEvictTimeMilliSeconds() {
return connIdleEvictTimeMilliSeconds;
}
@Monitor(name="HttpClient-ConnectionsInPool", type = DataSourceType.GAUGE)
public int getConnectionsInPool() {
ClientConnectionManager connectionManager = this.getConnectionManager();
if (connectionManager != null) {
return ((ThreadSafeClientConnManager)connectionManager).getConnectionsInPool();
} else {
return 0;
}
}
@Monitor(name = "HttpClient-MaxTotalConnections", type = DataSourceType.INFORMATIONAL)
public int getMaxTotalConnnections() {
ClientConnectionManager connectionManager = this.getConnectionManager();
if (connectionManager != null) {
return ((ThreadSafeClientConnManager)connectionManager).getMaxTotal();
} else {
return 0;
}
}
@Monitor(name = "HttpClient-MaxConnectionsPerHost", type = DataSourceType.INFORMATIONAL)
public int getMaxConnectionsPerHost() {
ClientConnectionManager connectionManager = this.getConnectionManager();
if (connectionManager != null) {
if(httpRoute == null)
return ((ThreadSafeClientConnManager)connectionManager).getDefaultMaxPerRoute();
else
return ((ThreadSafeClientConnManager)connectionManager).getMaxForRoute(httpRoute);
} else {
return 0;
}
}
@Monitor(name = "HttpClient-NumRetries", type = DataSourceType.INFORMATIONAL)
public int getNumRetries() {
return this.retriesProperty.getOrDefault();
}
public void setConnIdleEvictTimeMilliSeconds(Property<Integer> connIdleEvictTimeMilliSeconds) {
this.connIdleEvictTimeMilliSeconds = connIdleEvictTimeMilliSeconds;
}
@Monitor(name = "HttpClient-SleepTimeFactorMs", type = DataSourceType.INFORMATIONAL)
public int getSleepTimeFactorMs() {
return this.sleepTimeFactorMsProperty.getOrDefault();
}
// copied from httpclient source code
private static HttpHost determineTarget(HttpUriRequest request) throws ClientProtocolException {
// A null target may be acceptable if there is a default target.
// Otherwise, the null target is detected in the director.
HttpHost target = null;
URI requestURI = request.getURI();
if (requestURI.isAbsolute()) {
target = URIUtils.extractHost(requestURI);
if (target == null) {
throw new ClientProtocolException(
"URI does not specify a valid host name: " + requestURI);
}
}
return target;
}
@Override
public <T> T execute(
final HttpUriRequest request,
final ResponseHandler<? extends T> responseHandler)
throws IOException, ClientProtocolException {
return this.execute(request, responseHandler, null);
}
@Override
public <T> T execute(
final HttpUriRequest request,
final ResponseHandler<? extends T> responseHandler,
final HttpContext context)
throws IOException, ClientProtocolException {
HttpHost target = null;
if(httpHost == null)
target = determineTarget(request);
else
target = httpHost;
return this.execute(target, request, responseHandler, context);
}
@Override
public <T> T execute(
final HttpHost target,
final HttpRequest request,
final ResponseHandler<? extends T> responseHandler)
throws IOException, ClientProtocolException {
return this.execute(target, request, responseHandler, null);
}
@Override
public <T> T execute(
final HttpHost target,
final HttpRequest request,
final ResponseHandler<? extends T> responseHandler,
final HttpContext context)
throws IOException, ClientProtocolException {
Stopwatch sw = tracer.start();
try{
// TODO: replaced method.getQueryString() with request.getRequestLine().getUri()
LOGGER.debug("Executing HTTP method: {}, uri: {}", request.getRequestLine().getMethod(), request.getRequestLine().getUri());
return super.execute(target, request, responseHandler, context);
}finally{
sw.stop();
}
}
public void shutdown() {
if (connPoolCleaner != null) {
connPoolCleaner.shutdown();
}
getConnectionManager().shutdown();
}
}
| 7,215 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/http4 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/http4/ssl/AcceptAllSocketFactory.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.http4.ssl;
import com.netflix.client.IClientConfigAware;
import com.netflix.client.config.CommonClientConfigKey;
import com.netflix.client.config.IClientConfig;
import org.apache.http.conn.ssl.SSLSocketFactory;
import org.apache.http.conn.ssl.TrustStrategy;
import java.security.KeyManagementException;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.security.UnrecoverableKeyException;
import java.security.cert.CertificateException;
import java.security.cert.X509Certificate;
/**
*
* SSL Socket factory that will accept all remote endpoints.
*
* This should be used only for testing connections/connectivity.
*
* Following similar pattern as load-balancers here, which is to take an IClientConfig
*
* @author jzarfoss
*
*/
public class AcceptAllSocketFactory extends SSLSocketFactory implements IClientConfigAware {
public AcceptAllSocketFactory() throws KeyManagementException, UnrecoverableKeyException, NoSuchAlgorithmException, KeyStoreException {
super(new TrustStrategy() {
@Override
public boolean isTrusted(final X509Certificate[] chain, String authType) throws CertificateException {
return true;
}
}, SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
}
/**
* In the case of this factory the intent is to ensure that a truststore is not set,
* as this does not make sense in the context of an accept-all policy
*/
@Override
public void initWithNiwsConfig(IClientConfig clientConfig) {
if (clientConfig == null) {
return;
}
if (clientConfig.getOrDefault(CommonClientConfigKey.TrustStore) != null) {
throw new IllegalArgumentException("Client configured with an AcceptAllSocketFactory cannot utilize a truststore");
}
}
}
| 7,216 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/http4 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/http4/ssl/KeyStoreAwareSocketFactory.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.http4.ssl;
import java.security.KeyStore;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import javax.net.ssl.SSLContext;
import org.apache.http.conn.ssl.SSLSocketFactory;
import org.apache.http.conn.ssl.X509HostnameVerifier;
import com.netflix.client.ssl.AbstractSslContextFactory;
import com.netflix.client.ssl.ClientSslSocketFactoryException;
/**
*
* SocketFactory that remembers what keystore and truststore being used,
* allowing for that information to be queried later.
*
* @author jzarfoss
*
*/
public class KeyStoreAwareSocketFactory extends SSLSocketFactory{
private final KeyStore keyStore;
private final KeyStore trustStore;
public KeyStoreAwareSocketFactory(X509HostnameVerifier hostnameVerifier) throws NoSuchAlgorithmException, KeyStoreException{
super(SSLContext.getDefault(), hostnameVerifier);
this.keyStore = null;
this.trustStore = null;
}
public KeyStoreAwareSocketFactory(final AbstractSslContextFactory abstractFactory) throws ClientSslSocketFactoryException, NoSuchAlgorithmException{
super(abstractFactory == null ? SSLContext.getDefault() : abstractFactory.getSSLContext());
if(abstractFactory == null){
this.keyStore = null;
this.trustStore = null;
}else{
this.keyStore = abstractFactory.getKeyStore();
this.trustStore = abstractFactory.getTrustStore();
}
}
public KeyStoreAwareSocketFactory(final AbstractSslContextFactory abstractFactory, X509HostnameVerifier hostnameVerifier) throws ClientSslSocketFactoryException, NoSuchAlgorithmException{
super(abstractFactory == null ? SSLContext.getDefault() : abstractFactory.getSSLContext(), hostnameVerifier);
if(abstractFactory == null){
this.keyStore = null;
this.trustStore = null;
}else{
this.keyStore = abstractFactory.getKeyStore();
this.trustStore = abstractFactory.getTrustStore();
}
}
public KeyStore getKeyStore(){
return this.keyStore;
}
public KeyStore getTrustStore(){
return this.trustStore;
}
} | 7,217 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/client/http/CaseInsensitiveMultiMap.java | package com.netflix.client.http;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.AbstractMap.SimpleEntry;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimap;
public class CaseInsensitiveMultiMap implements HttpHeaders {
Multimap<String, Entry<String, String>> map = ArrayListMultimap.create();
@Override
public String getFirstValue(String headerName) {
Collection<Entry<String, String>> entries = map.get(headerName.toLowerCase());
if (entries == null || entries.isEmpty()) {
return null;
}
return entries.iterator().next().getValue();
}
@Override
public List<String> getAllValues(String headerName) {
Collection<Entry<String, String>> entries = map.get(headerName.toLowerCase());
List<String> values = Lists.newArrayList();
if (entries != null) {
for (Entry<String, String> entry: entries) {
values.add(entry.getValue());
}
}
return values;
}
@Override
public List<Entry<String, String>> getAllHeaders() {
Collection<Entry<String, String>> all = map.values();
return new ArrayList<Entry<String, String>>(all);
}
@Override
public boolean containsHeader(String name) {
return map.containsKey(name.toLowerCase());
}
public void addHeader(String name, String value) {
if (getAllValues(name).contains(value)) {
return;
}
SimpleEntry<String, String> entry = new SimpleEntry<String, String>(name, value);
map.put(name.toLowerCase(), entry);
}
Map<String, Collection<String>> asMap() {
Multimap<String, String> result = ArrayListMultimap.create();
Collection<Entry<String, String>> all = map.values();
for (Entry<String, String> entry: all) {
result.put(entry.getKey(), entry.getValue());
}
return result.asMap();
}
}
| 7,218 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/client/http/HttpHeaders.java | package com.netflix.client.http;
import java.util.List;
import java.util.Map.Entry;
public interface HttpHeaders {
public String getFirstValue(String headerName);
public List<String> getAllValues(String headerName);
public List<Entry<String, String>> getAllHeaders();
public boolean containsHeader(String name);
}
| 7,219 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/client/http/HttpRequest.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client.http;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collection;
import java.util.Map;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Multimap;
import com.netflix.client.ClientRequest;
import com.netflix.client.config.IClientConfig;
/**
* Request for HTTP communication.
*
* @author awang
*
*/
public class HttpRequest extends ClientRequest {
public enum Verb {
GET("GET"),
PUT("PUT"),
POST("POST"),
DELETE("DELETE"),
OPTIONS("OPTIONS"),
HEAD("HEAD");
private final String verb; // http method
Verb(String verb) {
this.verb = verb;
}
public String verb() {
return verb;
}
}
protected CaseInsensitiveMultiMap httpHeaders = new CaseInsensitiveMultiMap();
protected Multimap<String, String> queryParams = ArrayListMultimap.create();
private Object entity;
protected Verb verb;
HttpRequest() {
this.verb = Verb.GET;
}
public static class Builder {
private HttpRequest request = new HttpRequest();
public Builder() {
}
public Builder(HttpRequest request) {
this.request = request;
}
public Builder uri(URI uri) {
request.setUri(uri);
return this;
}
public Builder uri(String uri) {
try {
request.setUri(new URI(uri));
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
return this;
}
public Builder header(String name, String value) {
request.httpHeaders.addHeader(name, value);
return this;
}
Builder queryParams(Multimap<String, String> queryParams) {
request.queryParams = queryParams;
return this;
}
/**
* @deprecated request configuration should be now be passed
* as a method parameter to client's execution API
*/
@Deprecated
public Builder overrideConfig(IClientConfig config) {
request.setOverrideConfig(config);
return this;
}
Builder headers(CaseInsensitiveMultiMap headers) {
request.httpHeaders = headers;
return this;
}
public Builder setRetriable(boolean retriable) {
request.setRetriable(retriable);
return this;
}
/**
* @deprecated see {@link #queryParam(String, String)}
*/
@Deprecated
public Builder queryParams(String name, String value) {
request.queryParams.put(name, value);
return this;
}
public Builder queryParam(String name, String value) {
request.queryParams.put(name, value);
return this;
}
public Builder entity(Object entity) {
request.entity = entity;
return this;
}
public Builder verb(Verb verb) {
request.verb = verb;
return this;
}
public Builder loadBalancerKey(Object loadBalancerKey) {
request.setLoadBalancerKey(loadBalancerKey);
return this;
}
public HttpRequest build() {
return request;
}
}
public Map<String, Collection<String>> getQueryParams() {
return queryParams.asMap();
}
public Verb getVerb() {
return verb;
}
/**
* Replaced by {@link #getHttpHeaders()}
*/
@Deprecated
public Map<String, Collection<String>> getHeaders() {
return httpHeaders.asMap();
}
public HttpHeaders getHttpHeaders() {
return httpHeaders;
}
public Object getEntity() {
return entity;
}
/**
* Test if the request is retriable. If the request is
* a {@link Verb#GET} and {@link Builder#setRetriable(boolean)}
* is not called, returns true. Otherwise, returns value passed in
* {@link Builder#setRetriable(boolean)}
*/
@Override
public boolean isRetriable() {
if (this.verb == Verb.GET && isRetriable == null) {
return true;
}
return super.isRetriable();
}
public static Builder newBuilder() {
return new Builder();
}
public static Builder newBuilder(HttpRequest toCopy) {
return new Builder(toCopy);
}
/**
* Return a new instance of HttpRequest replacing the URI.
*/
@Override
public HttpRequest replaceUri(URI newURI) {
return (new Builder()).uri(newURI)
.headers(this.httpHeaders)
.overrideConfig(this.getOverrideConfig())
.queryParams(this.queryParams)
.setRetriable(this.isRetriable())
.loadBalancerKey(this.getLoadBalancerKey())
.verb(this.getVerb())
.entity(this.entity)
.build();
}
}
| 7,220 |
0 | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/client | Create_ds/ribbon/ribbon-httpclient/src/main/java/com/netflix/client/http/HttpResponse.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.client.http;
import com.google.common.reflect.TypeToken;
import com.netflix.client.IResponse;
import java.io.Closeable;
import java.io.InputStream;
import java.lang.reflect.Type;
import java.util.Collection;
import java.util.Map;
/**
* Response for HTTP communication.
*
* @author awang
*
*/
public interface HttpResponse extends IResponse, Closeable {
/**
* Get the HTTP status code.
*/
public int getStatus();
/**
* Get the reason phrase of HTTP status
*/
public String getStatusLine();
/**
* @see #getHttpHeaders()
*/
@Override
@Deprecated
public Map<String, Collection<String>> getHeaders();
public HttpHeaders getHttpHeaders();
public void close();
public InputStream getInputStream();
public boolean hasEntity();
public <T> T getEntity(Class<T> type) throws Exception;
public <T> T getEntity(Type type) throws Exception;
/**
* @deprecated use {@link #getEntity(Type)}
*/
@Deprecated
public <T> T getEntity(TypeToken<T> type) throws Exception;
}
| 7,221 |
0 | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/proxy/EvCacheAnnotationTest.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.proxy;
import com.netflix.ribbon.CacheProvider;
import com.netflix.ribbon.RibbonRequest;
import com.netflix.ribbon.evache.EvCacheProvider;
import com.netflix.ribbon.http.HttpRequestBuilder;
import com.netflix.ribbon.http.HttpRequestTemplate;
import com.netflix.ribbon.http.HttpRequestTemplate.Builder;
import com.netflix.ribbon.http.HttpResourceGroup;
import com.netflix.ribbon.proxy.processor.AnnotationProcessorsProvider;
import com.netflix.ribbon.proxy.sample.HystrixHandlers.MovieFallbackHandler;
import com.netflix.ribbon.proxy.sample.HystrixHandlers.SampleHttpResponseValidator;
import com.netflix.ribbon.proxy.sample.SampleMovieServiceWithEVCache;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.powermock.api.easymock.annotation.Mock;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import static com.netflix.ribbon.proxy.Utils.methodByName;
import static junit.framework.Assert.assertEquals;
import static org.easymock.EasyMock.anyObject;
import static org.easymock.EasyMock.expect;
import static org.powermock.api.easymock.PowerMock.*;
/**
* @author Tomasz Bak
*/
@RunWith(PowerMockRunner.class)
@PrepareForTest({MethodTemplateExecutor.class})
@PowerMockIgnore("javax.management.*")
public class EvCacheAnnotationTest {
@Mock
private RibbonRequest ribbonRequestMock = createMock(RibbonRequest.class);
@Mock
private HttpRequestBuilder requestBuilderMock = createMock(HttpRequestBuilder.class);
@Mock
private Builder httpRequestTemplateBuilderMock = createMock(Builder.class);
@Mock
private HttpRequestTemplate httpRequestTemplateMock = createMock(HttpRequestTemplate.class);
@Mock
private HttpResourceGroup httpResourceGroupMock = createMock(HttpResourceGroup.class);
@BeforeClass
public static void setup() {
RibbonDynamicProxy.registerAnnotationProcessors(AnnotationProcessorsProvider.DEFAULT);
}
@Before
public void setUp() throws Exception {
expect(requestBuilderMock.build()).andReturn(ribbonRequestMock);
expect(httpRequestTemplateBuilderMock.build()).andReturn(httpRequestTemplateMock);
expect(httpRequestTemplateMock.requestBuilder()).andReturn(requestBuilderMock);
}
@Test
public void testGetQueryWithDomainObjectResult() throws Exception {
expectUrlBase("GET", "/movies/{id}");
expect(requestBuilderMock.withRequestProperty("id", "id123")).andReturn(requestBuilderMock);
expect(httpResourceGroupMock.newTemplateBuilder("findMovieById")).andReturn(httpRequestTemplateBuilderMock);
expect(httpRequestTemplateBuilderMock.withHeader("X-MyHeader1", "value1.1")).andReturn(httpRequestTemplateBuilderMock);
expect(httpRequestTemplateBuilderMock.withHeader("X-MyHeader1", "value1.2")).andReturn(httpRequestTemplateBuilderMock);
expect(httpRequestTemplateBuilderMock.withHeader("X-MyHeader2", "value2")).andReturn(httpRequestTemplateBuilderMock);
expect(httpRequestTemplateBuilderMock.withRequestCacheKey("findMovieById/{id}")).andReturn(httpRequestTemplateBuilderMock);
expect(httpRequestTemplateBuilderMock.withFallbackProvider(anyObject(MovieFallbackHandler.class))).andReturn(httpRequestTemplateBuilderMock);
expect(httpRequestTemplateBuilderMock.withResponseValidator(anyObject(SampleHttpResponseValidator.class))).andReturn(httpRequestTemplateBuilderMock);
expect(httpRequestTemplateBuilderMock.withCacheProvider(anyObject(String.class), anyObject(CacheProvider.class))).andReturn(httpRequestTemplateBuilderMock);
expect(httpRequestTemplateBuilderMock.withCacheProvider(anyObject(String.class), anyObject(EvCacheProvider.class))).andReturn(httpRequestTemplateBuilderMock);
replayAll();
MethodTemplateExecutor executor = createExecutor(SampleMovieServiceWithEVCache.class, "findMovieById");
RibbonRequest ribbonRequest = executor.executeFromTemplate(new Object[]{"id123"});
verifyAll();
assertEquals(ribbonRequestMock, ribbonRequest);
}
private void expectUrlBase(String method, String path) {
expect(httpRequestTemplateBuilderMock.withMethod(method)).andReturn(httpRequestTemplateBuilderMock);
expect(httpRequestTemplateBuilderMock.withUriTemplate(path)).andReturn(httpRequestTemplateBuilderMock);
}
private MethodTemplateExecutor createExecutor(Class<?> clientInterface, String methodName) {
MethodTemplate methodTemplate = new MethodTemplate(methodByName(clientInterface, methodName));
return new MethodTemplateExecutor(httpResourceGroupMock, methodTemplate, AnnotationProcessorsProvider.DEFAULT);
}
}
| 7,222 |
0 | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/proxy | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/proxy/sample/SampleCacheProviderFactory.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.proxy.sample;
import com.netflix.ribbon.CacheProvider;
import com.netflix.ribbon.CacheProviderFactory;
import rx.Observable;
import java.util.Map;
/**
* @author Tomasz Bak
*/
public class SampleCacheProviderFactory implements CacheProviderFactory<Object> {
@Override
public CacheProvider<Object> createCacheProvider() {
return new SampleCacheProvider();
}
public static class SampleCacheProvider implements CacheProvider<Object> {
@Override
public Observable<Object> get(String key, Map requestProperties) {
return null;
}
}
}
| 7,223 |
0 | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/proxy | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/proxy/sample/ResourceGroupClasses.java | package com.netflix.ribbon.proxy.sample;
import com.netflix.ribbon.http.HttpResourceGroup;
/**
* @author Tomasz Bak
*/
public class ResourceGroupClasses {
public static class SampleHttpResourceGroup extends HttpResourceGroup {
public SampleHttpResourceGroup() {
super("myTestGroup");
}
}
}
| 7,224 |
0 | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/proxy | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/proxy/sample/MovieTransformer.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.proxy.sample;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.reactivex.netty.channel.ContentTransformer;
/**
* @author Tomasz Bak
*/
public class MovieTransformer implements ContentTransformer<MovieTransformer> {
@Override
public ByteBuf call(MovieTransformer toTransform, ByteBufAllocator byteBufAllocator) {
return null;
}
}
| 7,225 |
0 | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/proxy | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/proxy/sample/MovieServiceInterfaces.java | package com.netflix.ribbon.proxy.sample;
import com.netflix.ribbon.RibbonRequest;
import com.netflix.ribbon.proxy.annotation.*;
import com.netflix.ribbon.proxy.annotation.ClientProperties.Property;
import com.netflix.ribbon.proxy.annotation.Http.Header;
import com.netflix.ribbon.proxy.annotation.Http.HttpMethod;
import com.netflix.ribbon.proxy.sample.HystrixHandlers.MovieFallbackHandler;
import com.netflix.ribbon.proxy.sample.HystrixHandlers.SampleHttpResponseValidator;
import io.netty.buffer.ByteBuf;
import rx.Observable;
import java.util.concurrent.atomic.AtomicReference;
import static com.netflix.ribbon.proxy.sample.ResourceGroupClasses.SampleHttpResourceGroup;
/**
* @author Tomasz Bak
*/
public class MovieServiceInterfaces {
@ClientProperties(properties = {
@Property(name="ReadTimeout", value="2000"),
@Property(name="ConnectTimeout", value="1000"),
@Property(name="MaxAutoRetriesNextServer", value="2")
}, exportToArchaius = true)
public static interface SampleMovieService {
@TemplateName("findMovieById")
@Http(
method = HttpMethod.GET,
uri = "/movies/{id}",
headers = {
@Header(name = "X-MyHeader1", value = "value1.1"),
@Header(name = "X-MyHeader1", value = "value1.2"),
@Header(name = "X-MyHeader2", value = "value2")
})
@Hystrix(
cacheKey = "findMovieById/{id}",
validator = SampleHttpResponseValidator.class,
fallbackHandler = MovieFallbackHandler.class)
@CacheProvider(key = "findMovieById_{id}", provider = SampleCacheProviderFactory.class)
RibbonRequest<ByteBuf> findMovieById(@Var("id") String id);
@TemplateName("findRawMovieById")
@Http(method = HttpMethod.GET, uri = "/rawMovies/{id}")
RibbonRequest<ByteBuf> findRawMovieById(@Var("id") String id);
@TemplateName("findMovie")
@Http(method = HttpMethod.GET, uri = "/movies?name={name}&author={author}")
RibbonRequest<ByteBuf> findMovie(@Var("name") String name, @Var("author") String author);
@TemplateName("registerMovie")
@Http(method = HttpMethod.POST, uri = "/movies")
@Hystrix(cacheKey = "registerMovie", fallbackHandler = MovieFallbackHandler.class)
@ContentTransformerClass(MovieTransformer.class)
RibbonRequest<ByteBuf> registerMovie(@Content Movie movie);
@Http(method = HttpMethod.PUT, uri = "/movies/{id}")
@ContentTransformerClass(MovieTransformer.class)
RibbonRequest<ByteBuf> updateMovie(@Var("id") String id, @Content Movie movie);
@Http(method = HttpMethod.PATCH, uri = "/movies/{id}")
@ContentTransformerClass(MovieTransformer.class)
RibbonRequest<ByteBuf> updateMoviePartial(@Var("id") String id, @Content Movie movie);
@TemplateName("registerTitle")
@Http(method = HttpMethod.POST, uri = "/titles")
@Hystrix(cacheKey = "registerTitle", fallbackHandler = MovieFallbackHandler.class)
RibbonRequest<ByteBuf> registerTitle(@Content String title);
@TemplateName("registerByteBufBinary")
@Http(method = HttpMethod.POST, uri = "/binaries/byteBuf")
@Hystrix(cacheKey = "registerByteBufBinary", fallbackHandler = MovieFallbackHandler.class)
RibbonRequest<ByteBuf> registerByteBufBinary(@Content ByteBuf binary);
@TemplateName("registerByteArrayBinary")
@Http(method = HttpMethod.POST, uri = "/binaries/byteArray")
@Hystrix(cacheKey = "registerByteArrayBinary", fallbackHandler = MovieFallbackHandler.class)
RibbonRequest<ByteBuf> registerByteArrayBinary(@Content byte[] binary);
@TemplateName("deleteMovie")
@Http(method = HttpMethod.DELETE, uri = "/movies/{id}")
RibbonRequest<ByteBuf> deleteMovie(@Var("id") String id);
}
public static interface ShortMovieService {
@TemplateName("findMovieById")
@Http(method = HttpMethod.GET, uri = "/movies/{id}")
RibbonRequest<ByteBuf> findMovieById(@Var("id") String id);
@TemplateName("findMovieById")
@Http(method = HttpMethod.GET, uri = "/movies")
RibbonRequest<ByteBuf> findAll();
}
public static interface BrokenMovieService {
@Http(method = HttpMethod.GET)
Movie returnTypeNotRibbonRequest();
Movie missingHttpAnnotation();
@Http(method = HttpMethod.GET)
RibbonRequest<ByteBuf> multipleContentParameters(@Content Movie content1, @Content Movie content2);
}
@ResourceGroup(name = "testResourceGroup")
public static interface SampleMovieServiceWithResourceGroupNameAnnotation {
}
@ResourceGroup(resourceGroupClass = SampleHttpResourceGroup.class)
public static interface SampleMovieServiceWithResourceGroupClassAnnotation {
}
@ResourceGroup(name = "testResourceGroup", resourceGroupClass = SampleHttpResourceGroup.class)
public static interface BrokenMovieServiceWithResourceGroupNameAndClassAnnotation {
}
@ResourceGroup(name = "testResourceGroup")
public static interface TemplateNameDerivedFromMethodName {
@Http(method = HttpMethod.GET, uri = "/template")
RibbonRequest<ByteBuf> myTemplateName();
}
@ResourceGroup(name = "testResourceGroup")
public static interface HystrixOptionalAnnotationValues {
@TemplateName("hystrix1")
@Http(method = HttpMethod.GET, uri = "/hystrix/1")
@Hystrix(cacheKey = "findMovieById/{id}")
RibbonRequest<ByteBuf> hystrixWithCacheKeyOnly();
@TemplateName("hystrix2")
@Http(method = HttpMethod.GET, uri = "/hystrix/2")
@Hystrix(validator = SampleHttpResponseValidator.class)
RibbonRequest<ByteBuf> hystrixWithValidatorOnly();
@TemplateName("hystrix3")
@Http(method = HttpMethod.GET, uri = "/hystrix/3")
@Hystrix(fallbackHandler = MovieFallbackHandler.class)
RibbonRequest<ByteBuf> hystrixWithFallbackHandlerOnly();
}
@ResourceGroup(name = "testResourceGroup")
public static interface PostsWithDifferentContentTypes {
@TemplateName("rawContentSource")
@Http(method = HttpMethod.POST, uri = "/content/rawContentSource")
@ContentTransformerClass(MovieTransformer.class)
RibbonRequest<ByteBuf> postwithRawContentSource(AtomicReference<Object> arg1, int arg2, @Content Observable<Movie> movie);
@TemplateName("byteBufContent")
@Http(method = HttpMethod.POST, uri = "/content/byteBufContent")
RibbonRequest<ByteBuf> postwithByteBufContent(@Content ByteBuf byteBuf);
@TemplateName("byteArrayContent")
@Http(method = HttpMethod.POST, uri = "/content/byteArrayContent")
RibbonRequest<ByteBuf> postwithByteArrayContent(@Content byte[] bytes);
@TemplateName("stringContent")
@Http(method = HttpMethod.POST, uri = "/content/stringContent")
RibbonRequest<ByteBuf> postwithStringContent(@Content String content);
@TemplateName("movieContent")
@Http(method = HttpMethod.POST, uri = "/content/movieContent")
@ContentTransformerClass(MovieTransformer.class)
RibbonRequest<ByteBuf> postwithMovieContent(@Content Movie movie);
@TemplateName("movieContentBroken")
@Http(method = HttpMethod.POST, uri = "/content/movieContentBroken")
RibbonRequest<ByteBuf> postwithMovieContentBroken(@Content Movie movie);
}
}
| 7,226 |
0 | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/proxy | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/proxy/sample/EvCacheClasses.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.proxy.sample;
import com.netflix.evcache.EVCacheTranscoder;
import net.spy.memcached.CachedData;
/**
* @author Tomasz Bak
*/
public class EvCacheClasses {
public static class SampleEVCacheTranscoder implements EVCacheTranscoder<Object> {
@Override
public boolean asyncDecode(CachedData d) {
return false;
}
@Override
public CachedData encode(Object o) {
return null;
}
@Override
public Object decode(CachedData d) {
return null;
}
@Override
public int getMaxSize() {
return 0;
}
}
}
| 7,227 |
0 | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/proxy | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/proxy/sample/HystrixHandlers.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.proxy.sample;
import com.netflix.hystrix.HystrixInvokableInfo;
import com.netflix.ribbon.ServerError;
import com.netflix.ribbon.UnsuccessfulResponseException;
import com.netflix.ribbon.http.HttpResponseValidator;
import com.netflix.ribbon.hystrix.FallbackHandler;
import io.netty.buffer.ByteBuf;
import io.reactivex.netty.protocol.http.client.HttpClientResponse;
import rx.Observable;
import java.util.Map;
/**
* @author Tomasz Bak
*/
public class HystrixHandlers {
public static class SampleHttpResponseValidator implements HttpResponseValidator {
@Override
public void validate(HttpClientResponse<ByteBuf> response) throws UnsuccessfulResponseException, ServerError {
}
}
public static class MovieFallbackHandler implements FallbackHandler<Movie> {
@Override
public Observable<Movie> getFallback(HystrixInvokableInfo<?> hystrixInfo, Map<String, Object> requestProperties) {
return null;
}
}
}
| 7,228 |
0 | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/proxy | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/proxy/sample/SampleMovieServiceWithEVCache.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.proxy.sample;
import com.netflix.ribbon.RibbonRequest;
import com.netflix.ribbon.proxy.annotation.CacheProvider;
import com.netflix.ribbon.proxy.annotation.EvCache;
import com.netflix.ribbon.proxy.annotation.Http;
import com.netflix.ribbon.proxy.annotation.Http.Header;
import com.netflix.ribbon.proxy.annotation.Http.HttpMethod;
import com.netflix.ribbon.proxy.annotation.Hystrix;
import com.netflix.ribbon.proxy.annotation.TemplateName;
import com.netflix.ribbon.proxy.annotation.Var;
import com.netflix.ribbon.proxy.sample.EvCacheClasses.SampleEVCacheTranscoder;
import com.netflix.ribbon.proxy.sample.HystrixHandlers.MovieFallbackHandler;
import com.netflix.ribbon.proxy.sample.HystrixHandlers.SampleHttpResponseValidator;
import com.netflix.ribbon.proxy.sample.MovieServiceInterfaces.SampleMovieService;
import io.netty.buffer.ByteBuf;
/**
* @author Allen Wang
*/
public interface SampleMovieServiceWithEVCache extends SampleMovieService {
@TemplateName("findMovieById")
@Http(
method = HttpMethod.GET,
uri = "/movies/{id}",
headers = {
@Header(name = "X-MyHeader1", value = "value1.1"),
@Header(name = "X-MyHeader1", value = "value1.2"),
@Header(name = "X-MyHeader2", value = "value2")
})
@Hystrix(
cacheKey = "findMovieById/{id}",
validator = SampleHttpResponseValidator.class,
fallbackHandler = MovieFallbackHandler.class)
@CacheProvider(key = "findMovieById_{id}", provider = SampleCacheProviderFactory.class)
@EvCache(name = "movie-cache", appName = "movieService", key = "movie-{id}", ttl = 50,
enableZoneFallback = true, transcoder = SampleEVCacheTranscoder.class)
RibbonRequest<ByteBuf> findMovieById(@Var("id") String id);
}
| 7,229 |
0 | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/proxy | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/proxy/sample/Movie.java | package com.netflix.ribbon.proxy.sample;
/**
* @author Tomasz Bak
*/
public class Movie {
}
| 7,230 |
0 | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/evache/ServiceLoaderTest.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.evache;
import com.netflix.ribbon.proxy.processor.AnnotationProcessor;
import com.netflix.ribbon.proxy.processor.AnnotationProcessorsProvider;
import com.netflix.ribbon.proxy.processor.EVCacheAnnotationProcessor;
import org.junit.Test;
import java.util.List;
import static junit.framework.TestCase.assertTrue;
/**
* @author Allen Wang
*/
public class ServiceLoaderTest {
@Test
public void testServiceLoader() {
AnnotationProcessorsProvider annotations = AnnotationProcessorsProvider.DEFAULT;
List<AnnotationProcessor> processors = annotations.getProcessors();
boolean hasEVCacheProcessor = false;
for (AnnotationProcessor processor: processors) {
Class<?> clazz = processor.getClass();
if (clazz.equals(EVCacheAnnotationProcessor.class)) {
hasEVCacheProcessor = true;
break;
}
}
assertTrue(hasEVCacheProcessor);
}
}
| 7,231 |
0 | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon | Create_ds/ribbon/ribbon-evcache/src/test/java/com/netflix/ribbon/evache/EvCacheProviderTest.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.evache;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheException;
import com.netflix.evcache.EVCacheImpl;
import com.netflix.evcache.EVCacheTranscoder;
import com.netflix.ribbon.testutils.TestUtils;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.powermock.api.easymock.PowerMock;
import org.powermock.api.easymock.annotation.Mock;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import rx.Notification;
import rx.Observable;
import rx.Subscription;
import rx.functions.Func0;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import static junit.framework.Assert.assertEquals;
import static junit.framework.Assert.assertTrue;
import static org.easymock.EasyMock.*;
import static org.powermock.api.easymock.PowerMock.*;
/**
* @author Tomasz Bak
*/
@RunWith(PowerMockRunner.class)
@PrepareForTest({EVCache.Builder.class, EVCacheImpl.class})
public class EvCacheProviderTest {
@Mock
private EVCacheImpl evCacheImplMock;
@Mock
private Future<String> cacheFutureMock;
@Mock
private EVCacheTranscoder<String> transcoderMock;
@Before
public void setUp() throws Exception {
PowerMock.mockStatic(EVCacheImpl.class);
expectNew(EVCacheImpl.class,
new Class[]{String.class, String.class, int.class, EVCacheTranscoder.class, boolean.class},
anyObject(String.class), anyObject(String.class), anyInt(), anyObject(EVCacheTranscoder.class), anyBoolean()
).andReturn(evCacheImplMock);
}
@Test
public void testAsynchronousAccessFromCache() throws Exception {
expect(evCacheImplMock.<String>getAsynchronous("test1")).andReturn(cacheFutureMock);
expect(cacheFutureMock.isDone()).andReturn(true);
expect(cacheFutureMock.isCancelled()).andReturn(false);
expect(cacheFutureMock.get()).andReturn("value1");
replayAll();
EvCacheOptions options = new EvCacheOptions("testApp", "test-cache", true, 100, null, "test{id}");
EvCacheProvider<Object> cacheProvider = new EvCacheProvider<Object>(options);
Observable<Object> cacheValue = cacheProvider.get("test1", null);
assertEquals("value1", cacheValue.toBlocking().first());
}
@Test
public void testAsynchronousAccessWithTranscoderFromCache() throws Exception {
expect(evCacheImplMock.getAsynchronous("test1", transcoderMock)).andReturn(cacheFutureMock);
expect(cacheFutureMock.isDone()).andReturn(true);
expect(cacheFutureMock.isCancelled()).andReturn(false);
expect(cacheFutureMock.get()).andReturn("value1");
replayAll();
EvCacheOptions options = new EvCacheOptions("testApp", "test-cache", true, 100, transcoderMock, "test{id}");
EvCacheProvider<Object> cacheProvider = new EvCacheProvider<Object>(options);
Observable<Object> cacheValue = cacheProvider.get("test1", null);
assertEquals("value1", cacheValue.toBlocking().first());
}
@Test
public void testCacheMiss() throws Exception {
expect(evCacheImplMock.<String>getAsynchronous("test1")).andReturn(cacheFutureMock);
expect(cacheFutureMock.isDone()).andReturn(true);
expect(cacheFutureMock.isCancelled()).andReturn(false);
expect(cacheFutureMock.get()).andReturn(null);
replayAll();
EvCacheOptions options = new EvCacheOptions("testApp", "test-cache", true, 100, null, "test{id}");
EvCacheProvider<Object> cacheProvider = new EvCacheProvider<Object>(options);
Observable<Object> cacheValue = cacheProvider.get("test1", null);
assertTrue(cacheValue.materialize().toBlocking().first().getThrowable() instanceof CacheMissException);
}
@Test
public void testFailedAsynchronousAccessFromCache() throws Exception {
expect(evCacheImplMock.<String>getAsynchronous("test1")).andThrow(new EVCacheException("cache error"));
replayAll();
EvCacheOptions options = new EvCacheOptions("testApp", "test-cache", true, 100, null, "test{id}");
EvCacheProvider<Object> cacheProvider = new EvCacheProvider<Object>(options);
Observable<Object> cacheValue = cacheProvider.get("test1", null);
Notification<Object> notification = cacheValue.materialize().toBlocking().first();
assertTrue(notification.getThrowable() instanceof CacheFaultException);
}
@Test
public void testCanceledFuture() throws Exception {
expect(evCacheImplMock.getAsynchronous("test1", transcoderMock)).andReturn(cacheFutureMock);
expect(cacheFutureMock.isDone()).andReturn(true);
expect(cacheFutureMock.isCancelled()).andReturn(true);
replayAll();
EvCacheOptions options = new EvCacheOptions("testApp", "test-cache", true, 100, transcoderMock, "test{id}");
EvCacheProvider<Object> cacheProvider = new EvCacheProvider<Object>(options);
Observable<Object> cacheValue = cacheProvider.get("test1", null);
assertTrue(cacheValue.materialize().toBlocking().first().getThrowable() instanceof CacheFaultException);
}
@Test
public void testExceptionResultInFuture() throws Exception {
expect(evCacheImplMock.getAsynchronous("test1", transcoderMock)).andReturn(cacheFutureMock);
expect(cacheFutureMock.isDone()).andReturn(true);
expect(cacheFutureMock.isCancelled()).andReturn(false);
expect(cacheFutureMock.get()).andThrow(new ExecutionException(new RuntimeException("operation failed")));
replayAll();
EvCacheOptions options = new EvCacheOptions("testApp", "test-cache", true, 100, transcoderMock, "test{id}");
EvCacheProvider<Object> cacheProvider = new EvCacheProvider<Object>(options);
Observable<Object> cacheValue = cacheProvider.get("test1", null);
assertTrue(cacheValue.materialize().toBlocking().first().getThrowable() instanceof RuntimeException);
}
@Test
public void testUnsubscribedBeforeFutureCompletes() throws Exception {
expect(evCacheImplMock.getAsynchronous("test1", transcoderMock)).andReturn(cacheFutureMock);
expect(cacheFutureMock.cancel(true)).andReturn(true);
replayAll();
EvCacheOptions options = new EvCacheOptions("testApp", "test-cache", true, 100, transcoderMock, "test{id}");
EvCacheProvider<Object> cacheProvider = new EvCacheProvider<Object>(options);
Observable<Object> cacheValue = cacheProvider.get("test1", null);
Subscription subscription = cacheValue.subscribe();
subscription.unsubscribe();
TestUtils.waitUntilTrueOrTimeout(10000, new Func0<Boolean>() {
@Override
public Boolean call() {
try {
verifyAll();
return true;
} catch (Throwable e) {
e.printStackTrace();
return false;
}
}
});
}
}
| 7,232 |
0 | Create_ds/ribbon/ribbon-evcache/src/main/java/com/netflix/ribbon/proxy | Create_ds/ribbon/ribbon-evcache/src/main/java/com/netflix/ribbon/proxy/processor/EVCacheAnnotationProcessor.java | package com.netflix.ribbon.proxy.processor;
import com.netflix.evcache.EVCacheTranscoder;
import com.netflix.ribbon.ResourceGroup.GroupBuilder;
import com.netflix.ribbon.ResourceGroup.TemplateBuilder;
import com.netflix.ribbon.RibbonResourceFactory;
import com.netflix.ribbon.evache.EvCacheOptions;
import com.netflix.ribbon.evache.EvCacheProvider;
import com.netflix.ribbon.proxy.ProxyAnnotationException;
import com.netflix.ribbon.proxy.Utils;
import com.netflix.ribbon.proxy.annotation.EvCache;
import java.lang.reflect.Method;
import java.util.HashMap;
import java.util.Map;
/**
* @author Allen Wang
*/
public class EVCacheAnnotationProcessor implements AnnotationProcessor<GroupBuilder, TemplateBuilder> {
private static final class CacheId {
private final String appName;
private final String cacheName;
CacheId(String appName, String cacheName) {
this.appName = appName;
this.cacheName = cacheName;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
CacheId cacheId = (CacheId) o;
if (!appName.equals(cacheId.appName)) {
return false;
}
return cacheName.equals(cacheId.cacheName);
}
@Override
public int hashCode() {
int result = appName.hashCode();
result = 31 * result + cacheName.hashCode();
return result;
}
}
private Map<CacheId, EvCacheProvider<?>> evCacheProviderPool = new HashMap<CacheId, EvCacheProvider<?>>();
@Override
public void process(String templateName, TemplateBuilder templateBuilder, Method method) {
EvCache annotation = method.getAnnotation(EvCache.class);
if (annotation == null) {
return;
}
Class<? extends EVCacheTranscoder<?>>[] transcoderClasses = annotation.transcoder();
EVCacheTranscoder<?> transcoder;
if (transcoderClasses.length == 0) {
transcoder = null;
} else if (transcoderClasses.length > 1) {
throw new ProxyAnnotationException("Multiple transcoders defined on method " + method.getName());
} else {
transcoder = Utils.newInstance(transcoderClasses[0]);
}
EvCacheOptions evCacheOptions = new EvCacheOptions(
annotation.appName(),
annotation.name(),
annotation.enableZoneFallback(),
annotation.ttl(),
transcoder,
annotation.key());
if (evCacheOptions != null) {
CacheId cacheId = new CacheId(evCacheOptions.getAppName(), evCacheOptions.getCacheName());
EvCacheProvider<?> provider = evCacheProviderPool.get(cacheId);
if (provider == null) {
provider = new EvCacheProvider(evCacheOptions);
evCacheProviderPool.put(cacheId, provider);
}
templateBuilder.withCacheProvider(evCacheOptions.getCacheKeyTemplate(), provider);
}
}
@Override
public void process(String groupName, GroupBuilder groupBuilder, RibbonResourceFactory factory, Class<?> interfaceClass) {
}
}
| 7,233 |
0 | Create_ds/ribbon/ribbon-evcache/src/main/java/com/netflix/ribbon/proxy | Create_ds/ribbon/ribbon-evcache/src/main/java/com/netflix/ribbon/proxy/annotation/EvCache.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.proxy.annotation;
import com.netflix.evcache.EVCacheTranscoder;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
public @interface EvCache {
String name();
String appName();
String key();
int ttl() default 100;
boolean enableZoneFallback() default true;
Class<? extends EVCacheTranscoder<?>>[] transcoder() default {};
}
| 7,234 |
0 | Create_ds/ribbon/ribbon-evcache/src/main/java/com/netflix/ribbon | Create_ds/ribbon/ribbon-evcache/src/main/java/com/netflix/ribbon/evache/CacheMissException.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.evache;
/**
* @author Tomasz Bak
*/
public class CacheMissException extends RuntimeException {
}
| 7,235 |
0 | Create_ds/ribbon/ribbon-evcache/src/main/java/com/netflix/ribbon | Create_ds/ribbon/ribbon-evcache/src/main/java/com/netflix/ribbon/evache/CacheFaultException.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.evache;
/**
* @author Tomasz Bak
*/
public class CacheFaultException extends RuntimeException {
private static final long serialVersionUID = -1672764803141328757L;
public CacheFaultException(String message) {
super(message);
}
public CacheFaultException(String message, Throwable cause) {
super(message, cause);
}
}
| 7,236 |
0 | Create_ds/ribbon/ribbon-evcache/src/main/java/com/netflix/ribbon | Create_ds/ribbon/ribbon-evcache/src/main/java/com/netflix/ribbon/evache/EvCacheProvider.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.evache;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheException;
import com.netflix.ribbon.CacheProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import rx.Observable;
import rx.Observable.OnSubscribe;
import rx.Subscriber;
/**
* @author Tomasz Bak
*/
public class EvCacheProvider<T> implements CacheProvider<T> {
private static final Logger LOGGER = LoggerFactory.getLogger(EvCacheProvider.class);
private static final long WATCH_INTERVAL = 1;
private static final FutureObserver FUTURE_OBSERVER;
static {
FUTURE_OBSERVER = new FutureObserver();
FUTURE_OBSERVER.start();
}
private final EvCacheOptions options;
private final EVCache evCache;
public EvCacheProvider(EvCacheOptions options) {
this.options = options;
EVCache.Builder builder = new EVCache.Builder();
if (options.isEnableZoneFallback()) {
builder.enableZoneFallback();
}
builder.setDefaultTTL(options.getTimeToLive());
builder.setAppName(options.getAppName());
builder.setCacheName(options.getCacheName());
evCache = builder.build();
}
@SuppressWarnings("unchecked")
@Override
public Observable<T> get(final String key, Map<String, Object> requestProperties) {
return Observable.create(new OnSubscribe<T>() {
@Override
public void call(Subscriber<? super T> subscriber) {
Future<T> getFuture;
try {
if (options.getTranscoder() == null) {
getFuture = evCache.getAsynchronous(key);
} else {
getFuture = (Future<T>) evCache.getAsynchronous(key, options.getTranscoder());
}
FUTURE_OBSERVER.watchFuture(getFuture, subscriber);
} catch (EVCacheException e) {
subscriber.onError(new CacheFaultException("EVCache exception when getting value for key " + key, e));
}
}
});
}
@SuppressWarnings({"unchecked", "rawtypes"})
static final class FutureObserver extends Thread {
private final Map<Future, Subscriber> futureMap = new ConcurrentHashMap<Future, Subscriber>();
FutureObserver() {
super("EvCache-Future-Observer");
setDaemon(true);
}
@Override
public void run() {
while (true) {
for (Map.Entry<Future, Subscriber> f : futureMap.entrySet()) {
Future<?> future = f.getKey();
Subscriber subscriber = f.getValue();
if (subscriber.isUnsubscribed()) {
future.cancel(true);
futureMap.remove(future);
} else if (future.isDone()) {
try {
handleCompletedFuture(future, subscriber);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} finally {
futureMap.remove(future);
}
}
}
try {
Thread.sleep(WATCH_INTERVAL);
} catch (InterruptedException e) {
// Never terminate
}
}
}
private static void handleCompletedFuture(Future future, Subscriber subscriber) throws InterruptedException {
if (future.isCancelled()) {
subscriber.onError(new CacheFaultException("cache get request canceled"));
} else {
try {
Object value = future.get();
if (value == null) {
subscriber.onError(new CacheMissException());
} else {
subscriber.onNext(value);
subscriber.onCompleted();
}
} catch (ExecutionException e) {
subscriber.onError(e.getCause());
}
}
}
void watchFuture(Future future, Subscriber<?> subscriber) {
futureMap.put(future, subscriber);
}
}
}
| 7,237 |
0 | Create_ds/ribbon/ribbon-evcache/src/main/java/com/netflix/ribbon | Create_ds/ribbon/ribbon-evcache/src/main/java/com/netflix/ribbon/evache/EvCacheOptions.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ribbon.evache;
import com.netflix.evcache.EVCacheTranscoder;
/**
* @author Tomasz Bak
*/
public class EvCacheOptions {
private final String appName;
private final String cacheName;
private final boolean enableZoneFallback;
private final int timeToLive;
private final EVCacheTranscoder<?> transcoder;
private final String cacheKeyTemplate;
public EvCacheOptions(String appName, String cacheName, boolean enableZoneFallback, int timeToLive,
EVCacheTranscoder<?> transcoder, String cacheKeyTemplate) {
this.appName = appName;
this.cacheName = cacheName;
this.enableZoneFallback = enableZoneFallback;
this.timeToLive = timeToLive;
this.transcoder = transcoder;
this.cacheKeyTemplate = cacheKeyTemplate;
}
public String getAppName() {
return appName;
}
public String getCacheName() {
return cacheName;
}
public boolean isEnableZoneFallback() {
return enableZoneFallback;
}
public int getTimeToLive() {
return timeToLive;
}
public EVCacheTranscoder<?> getTranscoder() {
return transcoder;
}
public String getCacheKeyTemplate() {
return cacheKeyTemplate;
}
}
| 7,238 |
0 | Create_ds/recipes-rss/rss-edge/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-edge/src/main/java/com/netflix/recipes/rss/server/EdgeHealthCheckHandler.java | package com.netflix.recipes.rss.server;
import com.netflix.karyon.spi.HealthCheckHandler;
public class EdgeHealthCheckHandler implements HealthCheckHandler {
public int getStatus() {
return 200;
}
}
| 7,239 |
0 | Create_ds/recipes-rss/rss-edge/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-edge/src/main/java/com/netflix/recipes/rss/server/EdgeServer.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss.server;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.config.ConfigurationManager;
import com.netflix.karyon.spi.PropertyNames;
/**
* Edge Server
*
* @author Chris Fregly (chris@fregly.com)
*/
public class EdgeServer extends BaseJettyServer {
private static final Logger logger = LoggerFactory
.getLogger(EdgeServer.class);
public EdgeServer() {
}
public static void main(final String[] args) throws Exception {
System.setProperty("archaius.deployment.applicationId", "edge");
System.setProperty(PropertyNames.SERVER_BOOTSTRAP_BASE_PACKAGES_OVERRIDE, "com.netflix");
String appId = ConfigurationManager.getDeploymentContext().getApplicationId();
String env = ConfigurationManager.getDeploymentContext().getDeploymentEnvironment();
// populate the eureka-specific properties
System.setProperty("eureka.client.props", appId);
if (env != null) {
System.setProperty("eureka.environment", env);
}
EdgeServer edgeServer = new EdgeServer();
edgeServer.start();
}
}
| 7,240 |
0 | Create_ds/recipes-rss/rss-edge/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-edge/src/main/java/com/netflix/recipes/rss/hystrix/DeleteRSSCommand.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss.hystrix;
import com.google.common.base.Charsets;
import com.netflix.client.ClientFactory;
import com.netflix.hystrix.HystrixCommand;
import com.netflix.hystrix.HystrixCommandGroupKey;
import com.netflix.hystrix.HystrixCommandKey;
import com.netflix.hystrix.HystrixThreadPoolKey;
import com.netflix.niws.client.http.HttpClientRequest;
import com.netflix.niws.client.http.HttpClientRequest.Verb;
import com.netflix.niws.client.http.HttpClientResponse;
import com.netflix.niws.client.http.RestClient;
import com.netflix.recipes.rss.RSSConstants;
import org.apache.commons.io.IOUtils;
import java.net.URI;
/**
* Calls the middle tier Delete RSS entry point
*/
public class DeleteRSSCommand extends HystrixCommand<String> {
// RSS Feed Url (encoded)
private final String url;
public DeleteRSSCommand(String url) {
super (
Setter.withGroupKey(
HystrixCommandGroupKey.Factory.asKey(RSSConstants.HYSTRIX_RSS_MUTATIONS_GROUP))
.andCommandKey(HystrixCommandKey.Factory.asKey(RSSConstants.HYSTRIX_RSS_DEL_COMMAND_KEY))
.andThreadPoolKey(HystrixThreadPoolKey.Factory.asKey(RSSConstants.HYSTRIX_RSS_THREAD_POOL)
)
);
this.url = url;
}
@Override
protected String run() {
try {
// The named client param must match the prefix for the ribbon
// configuration specified in the edge.properties file
RestClient client = (RestClient) ClientFactory.getNamedClient(RSSConstants.MIDDLETIER_REST_CLIENT);
HttpClientRequest request = HttpClientRequest
.newBuilder()
.setVerb(Verb.DELETE)
.setUri(new URI("/"
+ RSSConstants.MIDDLETIER_WEB_RESOURCE_ROOT_PATH
+ RSSConstants.RSS_ENTRY_POINT
+ "?url=" + url)
)
.build();
HttpClientResponse response = client.executeWithLoadBalancer(request);
return IOUtils.toString(response.getRawEntity(), Charsets.UTF_8);
} catch (Exception exc) {
throw new RuntimeException("Exception", exc);
}
}
@Override
protected String getFallback() {
// Empty json
return "{}";
}
} | 7,241 |
0 | Create_ds/recipes-rss/rss-edge/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-edge/src/main/java/com/netflix/recipes/rss/hystrix/GetRSSCommand.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss.hystrix;
import com.google.common.base.Charsets;
import com.netflix.client.ClientFactory;
import com.netflix.hystrix.HystrixCommand;
import com.netflix.hystrix.HystrixCommandGroupKey;
import com.netflix.hystrix.HystrixCommandKey;
import com.netflix.hystrix.HystrixThreadPoolKey;
import com.netflix.niws.client.http.HttpClientRequest;
import com.netflix.niws.client.http.HttpClientRequest.Verb;
import com.netflix.niws.client.http.HttpClientResponse;
import com.netflix.niws.client.http.RestClient;
import com.netflix.recipes.rss.RSSConstants;
import org.apache.commons.io.IOUtils;
import java.net.URI;
/**
* Calls the middle tier Get RSS entry point
*/
public class GetRSSCommand extends HystrixCommand<String> {
public GetRSSCommand() {
super (
Setter.withGroupKey(
HystrixCommandGroupKey.Factory.asKey(RSSConstants.HYSTRIX_RSS_GET_GROUP))
.andCommandKey(HystrixCommandKey.Factory.asKey(RSSConstants.HYSTRIX_RSS_GET_COMMAND_KEY))
.andThreadPoolKey(HystrixThreadPoolKey.Factory.asKey(RSSConstants.HYSTRIX_RSS_THREAD_POOL)
)
);
}
@Override
protected String run() {
try {
// The named client param must match the prefix for the ribbon
// configuration specified in the edge.properties file
RestClient client = (RestClient) ClientFactory.getNamedClient(RSSConstants.MIDDLETIER_REST_CLIENT);
HttpClientRequest request = HttpClientRequest
.newBuilder()
.setVerb(Verb.GET)
.setUri(new URI("/"
+ RSSConstants.MIDDLETIER_WEB_RESOURCE_ROOT_PATH
+ RSSConstants.RSS_ENTRY_POINT)
)
.build();
HttpClientResponse response = client.executeWithLoadBalancer(request);
return IOUtils.toString(response.getRawEntity(), Charsets.UTF_8);
} catch (Exception exc) {
throw new RuntimeException("Exception", exc);
}
}
@Override
protected String getFallback() {
// Empty json
return "{}";
}
} | 7,242 |
0 | Create_ds/recipes-rss/rss-edge/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-edge/src/main/java/com/netflix/recipes/rss/hystrix/AddRSSCommand.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss.hystrix;
import com.google.common.base.Charsets;
import com.netflix.client.ClientFactory;
import com.netflix.hystrix.HystrixCommand;
import com.netflix.hystrix.HystrixCommandGroupKey;
import com.netflix.hystrix.HystrixCommandKey;
import com.netflix.hystrix.HystrixThreadPoolKey;
import com.netflix.niws.client.http.HttpClientRequest;
import com.netflix.niws.client.http.HttpClientRequest.Verb;
import com.netflix.niws.client.http.HttpClientResponse;
import com.netflix.niws.client.http.RestClient;
import com.netflix.recipes.rss.RSSConstants;
import org.apache.commons.io.IOUtils;
import java.net.URI;
/**
* Calls the middle tier Add RSS entry point
*/
public class AddRSSCommand extends HystrixCommand<String> {
// RSS Feed Url (encoded)
private final String url;
public AddRSSCommand(String url) {
super (
Setter.withGroupKey(
HystrixCommandGroupKey.Factory.asKey(RSSConstants.HYSTRIX_RSS_MUTATIONS_GROUP))
.andCommandKey(HystrixCommandKey.Factory.asKey(RSSConstants.HYSTRIX_RSS_ADD_COMMAND_KEY))
.andThreadPoolKey(HystrixThreadPoolKey.Factory.asKey(RSSConstants.HYSTRIX_RSS_THREAD_POOL)
)
);
this.url = url;
}
@Override
protected String run() {
try {
/*
* The named client param must match the prefix for the ribbon
* configuration specified in the edge.properties file
*/
RestClient client = (RestClient) ClientFactory.getNamedClient(RSSConstants.MIDDLETIER_REST_CLIENT);
HttpClientRequest request = HttpClientRequest
.newBuilder()
.setVerb(Verb.POST)
.setUri(new URI("/"
+ RSSConstants.MIDDLETIER_WEB_RESOURCE_ROOT_PATH
+ RSSConstants.RSS_ENTRY_POINT
+ "?url=" + url))
.build();
HttpClientResponse response = client.executeWithLoadBalancer(request);
return IOUtils.toString(response.getRawEntity(), Charsets.UTF_8);
} catch (Exception exc) {
throw new RuntimeException("Exception occurred when adding a RSS feed", exc);
}
}
@Override
protected String getFallback() {
// Empty json
return "{}";
}
} | 7,243 |
0 | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss/Subscriptions.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss;
import java.util.List;
/**
* Represents a User and a list of subscribed feeds
*
* @author ppadmanabhan
*
*/
public interface Subscriptions {
/**
* UUID of the user
*/
String getUser();
/**
* List of the subscribed RSS feeds along with its contents
*/
List<RSS> getSubscriptions();
}
| 7,244 |
0 | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss/RSS.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss;
import java.util.List;
/**
* Represents a RSS feed
*
* @author ppadmanabhan
*
*/
public interface RSS {
/**
* RSS Feed url
*/
String getUrl();
/**
* Return the title of the RSS feed
*/
String getTitle();
/**
* Returns the list of items of the RSS feed
*/
List<RSSItem> getItems();
} | 7,245 |
0 | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss/RSSItem.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss;
/*
* Represents each item (story) within a RSS feed
*/
public interface RSSItem {
/**
* The title of the item
*/
String getTitle();
/**
* Link to the actual story
*/
String getLink();
/**
* Description of the item
*/
String getDescription();
}
| 7,246 |
0 | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss/RSSStore.java | package com.netflix.recipes.rss;
import java.util.List;
public interface RSSStore {
List<String> getSubscribedUrls(String userId) throws Exception;
void subscribeUrl(String userId, String url) throws Exception;
void unsubscribeUrl(String userId, String url) throws Exception;
} | 7,247 |
0 | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss/impl/RSSItemImpl.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss.impl;
import com.netflix.recipes.rss.RSSItem;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
@XmlRootElement
@XmlAccessorType(XmlAccessType.NONE)
public class RSSItemImpl implements RSSItem {
private final String title;
private final String link;
private final String description;
public RSSItemImpl() {
this.title = null;
this.link = null;
this.description = null;
}
public RSSItemImpl(String title, String link, String description) {
this.title = title;
this.link = link;
this.description = description;
}
@XmlElement(name="title")
public String getTitle() {
return title;
}
@XmlElement(name="link")
public String getLink() {
return link;
}
@XmlElement(name="description")
public String getDescription() {
return description;
}
}
| 7,248 |
0 | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss/impl/CassandraStoreImpl.java | package com.netflix.recipes.rss.impl;
import com.netflix.astyanax.AstyanaxContext;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.connectionpool.NodeDiscoveryType;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
import com.netflix.astyanax.connectionpool.impl.ConnectionPoolConfigurationImpl;
import com.netflix.astyanax.connectionpool.impl.CountingConnectionPoolMonitor;
import com.netflix.astyanax.impl.AstyanaxConfigurationImpl;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.serializers.StringSerializer;
import com.netflix.astyanax.thrift.ThriftFamilyFactory;
import com.netflix.config.DynamicPropertyFactory;
import com.netflix.recipes.rss.RSSConstants;
import com.netflix.recipes.rss.RSSStore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
public class CassandraStoreImpl implements RSSStore {
private static final Logger logger = LoggerFactory.getLogger(CassandraStoreImpl.class);
// Cassandra keyspace
private static Keyspace ks;
// Data model is documented in the wiki
private static final ColumnFamily<String, String> CF_SUBSCRIPTIONS = new ColumnFamily<String, String>("Subscriptions", StringSerializer.get(), StringSerializer.get());
/**
* Get the feed urls from Cassandra
*/
@Override
public List<String> getSubscribedUrls(String userId) throws Exception{
OperationResult<ColumnList<String>> response;
try {
response = getKeyspace().prepareQuery(CF_SUBSCRIPTIONS).getKey(userId).execute();
} catch (NotFoundException e) {
logger.error("No record found for this user: " + userId);
throw e;
} catch (Exception t) {
logger.error("Exception occurred when fetching from Cassandra: " + t);
throw t;
}
final List<String> items = new ArrayList<String>();
if (response != null) {
final ColumnList<String> columns = response.getResult();
for (Column<String> column : columns) {
items.add(column.getName());
}
}
return items;
}
/**
* Add feed url into Cassandra
*/
@Override
public void subscribeUrl(String userId, String url) throws Exception{
try {
OperationResult<Void> opr = getKeyspace().prepareColumnMutation(CF_SUBSCRIPTIONS, userId, url)
.putValue("1", null).execute();
logger.info("Time taken to add to Cassandra (in ms): " + opr.getLatency(TimeUnit.MILLISECONDS));
} catch (Exception e) {
logger.error("Exception occurred when writing to Cassandra: " + e);
throw e;
}
}
/**
* Delete feed url from Cassandra
*/
@Override
public void unsubscribeUrl(String userId, String url) throws Exception{
try {
OperationResult<Void> opr = getKeyspace().prepareColumnMutation(CF_SUBSCRIPTIONS, userId, url)
.deleteColumn().execute();
logger.info("Time taken to delete from Cassandra (in ms): " + opr.getLatency(TimeUnit.MILLISECONDS));
} catch (Exception e) {
logger.error("Exception occurred when writing to Cassandra: " + e);
throw e;
}
}
/**
* Connect to Cassandra
*/
private static Keyspace getKeyspace() throws Exception{
if (ks == null) {
try {
AstyanaxContext<Keyspace> context = new AstyanaxContext.Builder()
.forKeyspace(DynamicPropertyFactory.getInstance().getStringProperty(RSSConstants.CASSANDRA_KEYSPACE, null).get())
.withAstyanaxConfiguration(new AstyanaxConfigurationImpl()
.setDiscoveryType(NodeDiscoveryType.RING_DESCRIBE)
)
.withConnectionPoolConfiguration(new ConnectionPoolConfigurationImpl("MyConnectionPool")
.setPort(DynamicPropertyFactory.getInstance().getIntProperty(RSSConstants.CASSANDRA_PORT, 0).get())
.setMaxConnsPerHost(DynamicPropertyFactory.getInstance().getIntProperty(RSSConstants.CASSANDRA_MAXCONNSPERHOST, 1).get())
.setSeeds(DynamicPropertyFactory.getInstance().getStringProperty(RSSConstants.CASSANDRA_HOST, "").get() + ":" +
DynamicPropertyFactory.getInstance().getIntProperty(RSSConstants.CASSANDRA_PORT, 0).get()
)
)
.withConnectionPoolMonitor(new CountingConnectionPoolMonitor())
.buildKeyspace(ThriftFamilyFactory.getInstance());
context.start();
ks = context.getEntity();
} catch (Exception e) {
logger.error("Exception occurred when initializing Cassandra keyspace: " + e);
throw e;
}
}
return ks;
}
} | 7,249 |
0 | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss/impl/RSSImpl.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss.impl;
import com.netflix.recipes.rss.RSS;
import com.netflix.recipes.rss.RSSItem;
import javax.xml.bind.annotation.*;
import java.util.List;
@XmlRootElement
@XmlAccessorType(XmlAccessType.NONE)
public class RSSImpl implements RSS{
private final String url;
private final String title;
private final List<RSSItem> items;
public RSSImpl() {
this.url = null;
this.title = null;
this.items = null;
}
public RSSImpl(String url, String title, List<RSSItem> items) {
this.url = url;
this.title = title;
this.items = items;
}
@XmlElement(name="url")
public String getUrl() {
return url;
}
@XmlElement(name="title")
public String getTitle() {
return title;
}
@XmlElements({@XmlElement(name="items", type=RSSItemImpl.class)})
public List<RSSItem> getItems() {
return items;
}
}
| 7,250 |
0 | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss/impl/InMemoryStoreImpl.java | package com.netflix.recipes.rss.impl;
import com.netflix.recipes.rss.RSSStore;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class InMemoryStoreImpl implements RSSStore {
// Maintain the same data model as Cassandra
private static final Map<String, HashMap<String, String>> map = new HashMap<String, HashMap<String, String>>();
@Override
public List<String> getSubscribedUrls(String userId) {
List<String> urls = new ArrayList<String>();
if (map.containsKey(userId)) {
for (final Map.Entry<String, String> entry: map.get(userId).entrySet()) {
urls.add(entry.getKey());
}
}
return urls;
}
@Override
public void subscribeUrl(String userId, String url) {
HashMap<String, String> feeds;
if (map.containsKey(userId)) {
feeds = map.get(userId);
} else {
feeds = new HashMap<String, String>(1);
}
feeds.put(url, "1");
map.put(userId, feeds);
}
@Override
public void unsubscribeUrl(String userId, String url) {
map.get(userId).remove(url);
}
}
| 7,251 |
0 | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss/impl/SubscriptionsImpl.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss.impl;
import com.netflix.recipes.rss.RSS;
import com.netflix.recipes.rss.Subscriptions;
import javax.xml.bind.annotation.*;
import java.util.List;
@XmlRootElement
@XmlAccessorType(XmlAccessType.NONE)
public class SubscriptionsImpl implements Subscriptions {
private String user;
private List<RSS> subscriptions;
public SubscriptionsImpl() {
this.user = null;
this.subscriptions = null;
}
public SubscriptionsImpl(String user, List<RSS> subscriptions) {
this.user = user;
this.subscriptions = subscriptions;
}
@XmlElement(name="user")
public String getUser() {
return user;
}
@XmlElements({@XmlElement(name="subscriptions", type=RSSImpl.class)})
public List<RSS> getSubscriptions() {
return subscriptions;
}
}
| 7,252 |
0 | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss/jersey | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss/jersey/resources/MiddleTierResource.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss.jersey.resources;
import com.netflix.recipes.rss.Subscriptions;
import com.netflix.recipes.rss.manager.RSSManager;
import com.netflix.servo.DefaultMonitorRegistry;
import com.netflix.servo.monitor.*;
import com.netflix.servo.stats.StatsConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.*;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.net.URLDecoder;
import java.util.concurrent.TimeUnit;
/**
* Rest entry points for fetching/adding/deleting RSS feeds.
* RSS Edge service will be calling these APIs
*/
@Path("/middletier")
public class MiddleTierResource {
private static final Logger logger = LoggerFactory.getLogger(MiddleTierResource.class);
// JMX: com.netflix.servo.COUNTER.MiddleTierRSS_*
private static final Counter getRSSRequestCounter = new BasicCounter(MonitorConfig.builder("MiddleTierRSS_getRequestCounter").build());
private static final Counter addRSSRequestCounter = new BasicCounter(MonitorConfig.builder("MiddleTierRSS_addRequestCounter").build());
private static final Counter delRSSRequestCounter = new BasicCounter(MonitorConfig.builder("MiddleTierRSS_delRequestCounter").build());
// JMX: com.netflix.servo.COUNTER.MiddleTierRSS_*
private static final Counter getRSSErrorCounter = new BasicCounter(MonitorConfig.builder("MiddleTierRSS_getErrorCounter").build());
private static final Counter addRSSErrorCounter = new BasicCounter(MonitorConfig.builder("MiddleTierRSS_addErrorCounter").build());
private static final Counter delRSSErrorCounter = new BasicCounter(MonitorConfig.builder("MiddleTierRSS_delErrorCounter").build());
// JMX: com.netflix.servo.COUNTER.MiddleTierRSS_*
// JMX: com.netflix.servo.MiddleTierRSS_* (95th and 99th percentile)
private static final StatsTimer getRSSStatsTimer = new StatsTimer(MonitorConfig.builder("MiddleTierRSS_getStatsTimer").build(), new StatsConfig.Builder().build());
private static final StatsTimer addRSSStatsTimer = new StatsTimer(MonitorConfig.builder("MiddleTierRSS_addStatsTimer").build(), new StatsConfig.Builder().build());
private static final StatsTimer delRSSStatsTimer = new StatsTimer(MonitorConfig.builder("MiddleTierRSS_delStatsTimer").build(), new StatsConfig.Builder().build());
static {
DefaultMonitorRegistry.getInstance().register(getRSSRequestCounter);
DefaultMonitorRegistry.getInstance().register(addRSSRequestCounter);
DefaultMonitorRegistry.getInstance().register(delRSSRequestCounter);
DefaultMonitorRegistry.getInstance().register(getRSSErrorCounter);
DefaultMonitorRegistry.getInstance().register(addRSSErrorCounter);
DefaultMonitorRegistry.getInstance().register(delRSSErrorCounter);
DefaultMonitorRegistry.getInstance().register(getRSSStatsTimer);
DefaultMonitorRegistry.getInstance().register(addRSSStatsTimer);
DefaultMonitorRegistry.getInstance().register(delRSSStatsTimer);
}
public MiddleTierResource() {
}
@GET
@Path("/rss/user/{user}")
@Produces({MediaType.APPLICATION_JSON})
public Response fetchSubscriptions (final @PathParam("user") String user) {
// Start timer
Stopwatch stopwatch = getRSSStatsTimer.start();
try {
getRSSRequestCounter.increment();
Subscriptions subscriptions = RSSManager.getInstance().getSubscriptions(user);
return Response.ok(subscriptions).build();
} catch (Exception e) {
logger.error("Exception occurred when fetching subscriptions", e);
getRSSErrorCounter.increment();
return Response.serverError().build();
} finally {
stopwatch.stop();
getRSSStatsTimer.record(stopwatch.getDuration(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
}
}
@POST
@Path("/rss/user/{user}")
@Produces({MediaType.APPLICATION_JSON})
public Response subscribe (
final @QueryParam("url") String url,
final @PathParam("user") String user) {
// Start timer
Stopwatch stopwatch = addRSSStatsTimer.start();
try {
addRSSRequestCounter.increment();
String decodedUrl = URLDecoder.decode(url, "UTF-8");
RSSManager.getInstance().addSubscription(user, decodedUrl);
return Response.ok().build();
} catch (Exception e) {
logger.error("Exception occurred during subscription", e);
addRSSErrorCounter.increment();
return Response.serverError().build();
} finally {
stopwatch.stop();
addRSSStatsTimer.record(stopwatch.getDuration(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
}
}
@DELETE
@Path("/rss/user/{user}")
@Produces({MediaType.APPLICATION_JSON})
public Response unsubscribe (
final @QueryParam("url") String url,
final @PathParam("user") String user) {
// Start timer
Stopwatch stopwatch = delRSSStatsTimer.start();
try {
delRSSRequestCounter.increment();
String decodedUrl = URLDecoder.decode(url, "UTF-8");
RSSManager.getInstance().deleteSubscription(user, decodedUrl);
return Response.ok().build();
} catch (Exception e) {
logger.error("Exception occurred during un-subscription", e);
delRSSErrorCounter.increment();
return Response.serverError().build();
} finally {
stopwatch.stop();
delRSSStatsTimer.record(stopwatch.getDuration(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
}
}
} | 7,253 |
0 | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss/test | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss/test/utils/EmbeddedMiddleTierForTests.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss.test.utils;
import com.google.common.io.Closeables;
import com.google.inject.Injector;
import com.netflix.governator.guice.LifecycleInjector;
import com.netflix.governator.lifecycle.LifecycleManager;
import com.netflix.recipes.rss.server.MiddleTierServer;
import com.netflix.recipes.rss.util.RSSModule;
/**
* Middle Tier Test
*/
public class EmbeddedMiddleTierForTests {
public MiddleTierServer middleTierServer;
public void setUp() throws Exception {
System.setProperty("archaius.deployment.applicationId", "middletier");
System.setProperty("archaius.deployment.environment", "ci");
Injector injector = LifecycleInjector.builder().withModules(new RSSModule()).createInjector();
LifecycleManager lifecycleManager = injector.getInstance(LifecycleManager.class);
lifecycleManager.start();
middleTierServer = injector.getInstance(MiddleTierServer.class);
middleTierServer.start();
}
public void tearDown() throws Exception {
Closeables.closeQuietly(middleTierServer);
}
}
| 7,254 |
0 | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss/server/MiddleTierServer.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss.server;
import com.netflix.karyon.spi.PropertyNames;
/**
* @author Chris Fregly (chris@fregly.com)
*/
public class MiddleTierServer extends BaseNettyServer {
public MiddleTierServer() {
}
public static void main(String args[]) throws Exception {
System.setProperty("archaius.deployment.applicationId", "middletier");
System.setProperty(PropertyNames.SERVER_BOOTSTRAP_BASE_PACKAGES_OVERRIDE, "com.netflix");
MiddleTierServer middleTierServer = new MiddleTierServer();
middleTierServer.start();
}
}
| 7,255 |
0 | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss/manager/MiddleTierHealthCheckHandler.java | package com.netflix.recipes.rss.manager;
import com.netflix.karyon.spi.HealthCheckHandler;
public class MiddleTierHealthCheckHandler implements HealthCheckHandler {
public int getStatus() {
return RSSManager.getInstance().getStatus();
}
}
| 7,256 |
0 | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-middletier/src/main/java/com/netflix/recipes/rss/manager/RSSManager.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss.manager;
import java.io.IOException;
import java.io.StringReader;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.List;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.apache.commons.io.Charsets;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import com.netflix.client.ClientFactory;
import com.netflix.config.DynamicPropertyFactory;
import com.netflix.karyon.spi.HealthCheckHandler;
import com.netflix.niws.client.http.HttpClientRequest;
import com.netflix.niws.client.http.HttpClientResponse;
import com.netflix.niws.client.http.RestClient;
import com.netflix.recipes.rss.RSS;
import com.netflix.recipes.rss.RSSConstants;
import com.netflix.recipes.rss.RSSItem;
import com.netflix.recipes.rss.RSSStore;
import com.netflix.recipes.rss.Subscriptions;
import com.netflix.recipes.rss.impl.CassandraStoreImpl;
import com.netflix.recipes.rss.impl.InMemoryStoreImpl;
import com.netflix.recipes.rss.impl.RSSImpl;
import com.netflix.recipes.rss.impl.RSSItemImpl;
import com.netflix.recipes.rss.impl.SubscriptionsImpl;
/**
* RSS Manager that
* 1) Fetches content from RSS feeds using Ribbon
* 2) Parses RSS feeds
* 3) Persists feed urls into
* a) Cassandra using Astyanax (or)
* b) InMemoryStore
*/
public class RSSManager implements HealthCheckHandler {
private RSSStore store;
private static final Logger logger = LoggerFactory.getLogger(RSSManager.class);
private static final RSSManager instance = new RSSManager();
private RSSManager() {
if (RSSConstants.RSS_STORE_CASSANDRA.equals(
DynamicPropertyFactory.getInstance().getStringProperty(RSSConstants.RSS_STORE, RSSConstants.RSS_STORE_CASSANDRA).get())) {
store = new CassandraStoreImpl();
} else {
store = new InMemoryStoreImpl();
}
}
public static RSSManager getInstance() {
return instance;
}
/**
* Fetches the User subscriptions
*/
public Subscriptions getSubscriptions(String userId) throws Exception {
List<String> feedUrls = store.getSubscribedUrls(userId);
List<RSS> feeds = new ArrayList<RSS>(feedUrls.size());
for (String feedUrl: feedUrls) {
RSS rss = RSSManager.getInstance().fetchRSSFeed(feedUrl);
if (rss.getItems() != null && !rss.getItems().isEmpty()) {
feeds.add(rss);
}
}
return new SubscriptionsImpl(userId, feeds);
}
/**
* Add subscription
*/
public void addSubscription(String user, String decodedUrl) throws Exception {
if (decodedUrl == null) throw new IllegalArgumentException("url cannot be null");
store.subscribeUrl(user, decodedUrl);
}
/**
* Delete subscription
*/
public void deleteSubscription(String user, String decodedUrl) throws Exception {
if (decodedUrl == null) throw new IllegalArgumentException("url cannot be null");
store.unsubscribeUrl(user, decodedUrl);
}
/**
* Fetch the RSS feed content using Ribbon
*/
private RSS fetchRSSFeed(String url) {
RestClient client = (RestClient) ClientFactory.getNamedClient(RSSConstants.MIDDLETIER_REST_CLIENT);
HttpClientResponse response;
String rssData = null;
try {
HttpClientRequest request = HttpClientRequest.newBuilder().setUri(new URI(url)).build();
response = client.execute(request);
if (response != null) {
rssData = IOUtils.toString(response.getRawEntity(), Charsets.UTF_8);
logger.info("Status code for " + response.getRequestedURI() + " : " + response.getStatus());
}
} catch (URISyntaxException e) {
logger.error("Exception occurred when setting the URI", e);
} catch (Exception e) {
logger.error("Exception occurred when executing the HTTP request", e);
}
return parseRSS(url, rssData);
}
/**
* Parses the RSS feeds and return back a POJO
*/
private RSS parseRSS(String url, String rss) {
// Error case
if (rss == null) return new RSSImpl();
RSS rssItems = null;
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
String FEATURE = "http://apache.org/xml/features/disallow-doctype-decl";
try {
dbf.setFeature(FEATURE, true);
DocumentBuilder db = dbf.newDocumentBuilder();
try {
InputSource is = new InputSource(new StringReader(rss));
Document dom = db.parse(is);
Element docEle = dom.getDocumentElement();
List<RSSItem> items = new ArrayList<RSSItem>();
String title = docEle.getElementsByTagName("title").item(0).getTextContent();
NodeList nl = docEle.getElementsByTagName("item");
if (nl != null && nl.getLength() > 0) {
for (int i = 0 ; i < nl.getLength(); i++) {
Element el = (Element) nl.item(i);
items.add(new RSSItemImpl(el.getElementsByTagName("title").item(0).getTextContent(), el.getElementsByTagName("link").item(0).getTextContent(), el.getElementsByTagName("description").item(0).getTextContent()));
}
}
rssItems = new RSSImpl(url, title, items);
} catch (SAXException e) {
logger.error("Exception occurred during parsing the RSS feed", e);
} catch (IOException e) {
logger.error("Exception occurred during fetching the RSS feed", e);
}
} catch (ParserConfigurationException e) {
logger.error("Exception occurred during parsing the RSS feed", e);
}
if (rssItems == null) {
rssItems = new RSSImpl();
}
return rssItems;
}
public int getStatus() {
return store == null ? 500 : 200;
}
}
| 7,257 |
0 | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes/rss/RSSConstants.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss;
public class RSSConstants {
// Edge constants
public static final String EDGE_WEB_RESOURCE_ROOT_PATH = "edge";
public static final String EDGE_WEB_RESOURCE_GET_PATH = "get";
// Middletier constants
public static final String MIDDLETIER_EUREKA_SERVICE_NAME = "middletier";
// Hystrix
public static final String MIDDLETIER_HYSTRIX_THREAD_POOL = "MiddleTierThreadPool";
public static final String HYSTRIX_RSS_THREAD_POOL = "RSSThreadPool";
public static final String HYSTRIX_RSS_MUTATIONS_GROUP = "RSSMutationsGroup";
public static final String HYSTRIX_RSS_GET_GROUP = "RSSGetGroup";
public static final String HYSTRIX_RSS_ADD_COMMAND_KEY = "RSSAdd";
public static final String HYSTRIX_RSS_DEL_COMMAND_KEY = "RSSDel";
public static final String HYSTRIX_RSS_GET_COMMAND_KEY = "RSSGet";
public static final String HYSTRIX_STREAM_PATH = "/hystrix.stream";
public static final String MIDDLETIER_WEB_RESOURCE_ROOT_PATH = "middletier";
public static final String MIDDLETIER_WEB_RESOURCE_GET_PATH = "get";
// Rest Client
public static final String MIDDLETIER_REST_CLIENT = "middletier-client";
// Default user name
public static final String DEFUALT_USER = "default";
// REST Entry points
public static final String RSS_ENTRY_POINT = "/rss/user/" + DEFUALT_USER;
// RSS Store
public static final String RSS_STORE = "rss.store";
public static final String RSS_STORE_CASSANDRA = "cassandra";
public static final String RSS_STORE_INMEMORY = "inmemory";
// Cassandra meta data
public static final String CASSANDRA_HOST = "cassandra.host";
public static final String CASSANDRA_PORT = "cassandra.port";
public static final String CASSANDRA_MAXCONNSPERHOST = "cassandra.maxConnectionsPerHost";
public static final String CASSANDRA_KEYSPACE = "cassandra.keyspace";
public static final String CASSANDRA_COLUMNFAMILY = "cassandra.columnfamily";
// Jetty
public static final String JETTY_HTTP_PORT = "jetty.http.port";
public static final String WEBAPPS_DIR = "rss-edge/webapp";
} | 7,258 |
0 | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes/rss/RSSConfiguration.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.inject.Singleton;
import com.netflix.config.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.PostConstruct;
import java.io.IOException;
import com.netflix.governator.annotations.AutoBindSingleton;
/**
* RSSConfiguration follows a hierarchy as follows: <appId>-<env>.properties
* (optional: <env>=local|dev|qa|prod) <appId>.properties (default values)
* System Properties (-D)
*
* JMX: All properties can be viewed and updated (on a per instance basis) here:
* Config-com.netflix.config.jmx.BaseConfigMBean
*
* @author Chris Fregly (chris@fregly.com)
*/
@AutoBindSingleton(AppConfiguration.class)
public class RSSConfiguration implements AppConfiguration {
private static final Logger logger = LoggerFactory.getLogger(RSSConfiguration.class);
private boolean initialized = false;
public RSSConfiguration() {
}
public String getString(String key, String defaultValue) {
final DynamicStringProperty property = DynamicPropertyFactory.getInstance().getStringProperty(key, defaultValue);
return property.get();
}
public int getInt(String key, int defaultValue) {
final DynamicIntProperty property = DynamicPropertyFactory.getInstance().getIntProperty(key, defaultValue);
return property.get();
}
public long getLong(String key, int defaultValue) {
final DynamicLongProperty property = DynamicPropertyFactory.getInstance().getLongProperty(key, defaultValue);
return property.get();
}
public boolean getBoolean(String key, boolean defaultValue) {
final DynamicBooleanProperty property = DynamicPropertyFactory.getInstance().getBooleanProperty(key, defaultValue);
return property.get();
}
@VisibleForTesting
public void setOverrideProperty(String key, Object value) {
Preconditions.checkState(initialized, "Must initialize RSSConfiguration before use.");
((ConcurrentCompositeConfiguration) ConfigurationManager
.getConfigInstance()).setOverrideProperty(key, value);
}
public void close() {
}
}
| 7,259 |
0 | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes/rss/AppConfiguration.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss;
import java.io.Closeable;
/**
* @author Chris Fregly (chris@fregly.com)
*/
public interface AppConfiguration extends Closeable {
public String getString(String key, String defaultValue);
public int getInt(String key, int defaultValue);
public long getLong(String key, int defaultValue);
public boolean getBoolean(String key, boolean defaultValue);
/**
* Sets an instance-level override. This will trump everything including
* dynamic properties and system properties. Useful for tests.
*
* @param key
* @param value
*/
public void setOverrideProperty(String key, Object value);
} | 7,260 |
0 | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes/rss/util/DescriptiveThreadFactory.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss.util;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Adds descriptive thread names for debugging purposes. Allows priority and daemon to be set, as well.
*
* @author Chris Fregly (chris@fregly.com)
*/
public class DescriptiveThreadFactory implements ThreadFactory {
private final String description;
private final int priority;
private final boolean daemon;
private final AtomicInteger n = new AtomicInteger(1);
public DescriptiveThreadFactory(String description) {
this(description, Thread.NORM_PRIORITY, false);
}
public DescriptiveThreadFactory(String description, int priority,
boolean daemon) {
this.description = description;
this.priority = priority;
this.daemon = daemon;
}
public Thread newThread(Runnable runnable) {
String threadDescription = description + "-" + n.getAndIncrement();
Thread thread = new Thread(runnable, threadDescription);
thread.setPriority(priority);
thread.setDaemon(daemon);
return thread;
}
}
| 7,261 |
0 | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes/rss/util/RSSModule.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss.util;
import com.netflix.recipes.rss.AppConfiguration;
import com.netflix.recipes.rss.RSSConfiguration;
import com.google.inject.AbstractModule;
public class RSSModule extends AbstractModule {
@Override
protected void configure() {
bind(AppConfiguration.class).to(RSSConfiguration.class);
}
} | 7,262 |
0 | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes/rss/server/BaseJettyServer.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss.server;
import java.io.Closeable;
import org.apache.jasper.servlet.JspServlet;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.inject.Injector;
import com.netflix.blitz4j.LoggingConfiguration;
import com.netflix.config.ConfigurationManager;
import com.netflix.config.DynamicPropertyFactory;
import com.netflix.hystrix.contrib.metrics.eventstream.HystrixMetricsStreamServlet;
import com.netflix.karyon.server.KaryonServer;
import com.netflix.recipes.rss.RSSConstants;
/**
* Base Jetty Server
*
* @author Chris Fregly (chris@fregly.com)
*/
public class BaseJettyServer implements Closeable {
static {
LoggingConfiguration.getInstance().configure();
}
private static final Logger logger = LoggerFactory.getLogger(BaseJettyServer.class);
private final Server jettyServer;
private final KaryonServer karyonServer;
protected final Injector injector;
public BaseJettyServer() {
System.setProperty(DynamicPropertyFactory.ENABLE_JMX, "true");
this.karyonServer = new KaryonServer();
this.injector = karyonServer.initialize();
this.jettyServer = new Server();
}
public void start() {
final int port = ConfigurationManager.getConfigInstance().getInt(RSSConstants.JETTY_HTTP_PORT, Integer.MIN_VALUE);
final Context context = new Context(jettyServer, "/", Context.SESSIONS);
context.setResourceBase(RSSConstants.WEBAPPS_DIR);
context.setClassLoader(Thread.currentThread().getContextClassLoader());
context.addServlet(JspServlet.class, "*.jsp");
// Enable hystrix.stream
context.addServlet(HystrixMetricsStreamServlet.class, RSSConstants.HYSTRIX_STREAM_PATH);
final Server server = new Server(port);
server.setHandler(context);
try {
karyonServer.start();
server.start();
} catch (Exception exc) {
throw new RuntimeException("Cannot start karyon server ...", exc);
}
}
public void close() {
try {
jettyServer.stop();
karyonServer.close();
} catch (Exception exc) {
logger.error("Error stopping jetty ...", exc);
}
LoggingConfiguration.getInstance().stop();
}
}
| 7,263 |
0 | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes/rss/server/BaseNettyServer.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss.server;
import java.io.Closeable;
import com.google.common.io.Closeables;
import com.google.inject.Injector;
import com.netflix.blitz4j.LoggingConfiguration;
import com.netflix.config.ConfigurationManager;
import com.netflix.config.DynamicPropertyFactory;
import com.netflix.karyon.server.KaryonServer;
import com.netflix.recipes.rss.netty.NettyHandlerContainer;
import com.netflix.recipes.rss.netty.NettyServer;
import com.sun.jersey.api.container.ContainerFactory;
import com.sun.jersey.api.core.PackagesResourceConfig;
/**
* @author Chris Fregly (chris@fregly.com)
*/
public class BaseNettyServer implements Closeable {
static {
LoggingConfiguration.getInstance().configure();
}
public NettyServer nettyServer;
public final KaryonServer karyonServer;
public String host;
public int port;
protected final Injector injector;
//protected AppConfiguration config;
public BaseNettyServer() {
// This must be set before karyonServer.initialize() otherwise the
// archaius properties will not be available in JMX/jconsole
System.setProperty(DynamicPropertyFactory.ENABLE_JMX, "true");
this.karyonServer = new KaryonServer();
this.injector = karyonServer.initialize();
}
public void start() {
this.host = ConfigurationManager.getConfigInstance().getString("netty.http.host", "not-found-in-configuration");
this.port = ConfigurationManager.getConfigInstance().getInt("netty.http.port", Integer.MIN_VALUE);
final PackagesResourceConfig rcf = new PackagesResourceConfig(ConfigurationManager.getConfigInstance().getString("jersey.resources.package","not-found-in-configuration"));
nettyServer = NettyServer
.builder()
.host(host)
.port(port)
.addHandler(
"jerseyHandler",
ContainerFactory.createContainer(
NettyHandlerContainer.class, rcf))
.numBossThreads(NettyServer.cpus)
.numWorkerThreads(NettyServer.cpus * 4).build();
try {
karyonServer.start();
} catch (Exception exc) {
throw new RuntimeException("Cannot start karyon server.", exc);
}
}
public void close() {
Closeables.closeQuietly(nettyServer);
Closeables.closeQuietly(karyonServer);
LoggingConfiguration.getInstance().stop();
}
} | 7,264 |
0 | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes/rss/netty/JerseyContainerProvider.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss.netty;
import com.sun.jersey.api.container.ContainerException;
import com.sun.jersey.api.core.ResourceConfig;
import com.sun.jersey.spi.container.ContainerProvider;
import com.sun.jersey.spi.container.WebApplication;
/**
* This class is referenced in the following jersey configuration file:
*
* src/main/resources/META-INF/services/com.sun.jersey.spi.container.
* ContainerProvider
*
* @author Chris Fregly (chris@fregly.com)
*/
public class JerseyContainerProvider implements ContainerProvider<NettyHandlerContainer> {
public NettyHandlerContainer createContainer(Class<NettyHandlerContainer> clazz, ResourceConfig config,WebApplication webApp)
throws ContainerException {
if (clazz != NettyHandlerContainer.class) {
return null;
}
return new NettyHandlerContainer(webApp, config);
}
} | 7,265 |
0 | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes/rss/netty/NettyHandlerContainer.java | /**
* The MIT License
*
* Copyright (c) 2009 Carl Bystrom
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Snagged from the following RenderShark component:
* https://code.google.com/p/rendershark
* /source/browse/trunk/rendershark/src/main
* /java/com/sun/jersey/server/impl/container/netty/NettyHandlerContainer.java
*
*/
package com.netflix.recipes.rss.netty;
import com.sun.jersey.api.core.ResourceConfig;
import com.sun.jersey.core.header.InBoundHeaders;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseWriter;
import com.sun.jersey.spi.container.WebApplication;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBufferInputStream;
import org.jboss.netty.buffer.ChannelBufferOutputStream;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.*;
import org.jboss.netty.channel.ChannelHandler.Sharable;
import org.jboss.netty.handler.codec.http.*;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@Sharable
public class NettyHandlerContainer extends SimpleChannelUpstreamHandler {
public static final String PROPERTY_BASE_URI = "com.sun.jersey.server.impl.container.netty.baseUri";
private final WebApplication application;
private final String baseUri;
NettyHandlerContainer(WebApplication application, ResourceConfig resourceConfig) {
this.application = application;
this.baseUri = (String) resourceConfig.getProperty(PROPERTY_BASE_URI);
}
private final static class Writer implements ContainerResponseWriter {
private final Channel channel;
private HttpResponse response;
private Writer(Channel channel) {
this.channel = channel;
}
public OutputStream writeStatusAndHeaders(long contentLength, ContainerResponse cResponse) throws IOException {
response = new DefaultHttpResponse(HttpVersion.HTTP_1_0, HttpResponseStatus.valueOf(cResponse.getStatus()));
for (Map.Entry<String, List<Object>> e : cResponse.getHttpHeaders().entrySet()) {
List<String> values = new ArrayList<String>();
for (Object v : e.getValue())
values.add(ContainerResponse.getHeaderValue(v));
response.setHeader(e.getKey(), values);
}
ChannelBuffer buffer = ChannelBuffers.dynamicBuffer();
response.setContent(buffer);
return new ChannelBufferOutputStream(buffer);
}
public void finish() throws IOException {
// Streaming is not supported. Entire response will be written
// downstream once finish() is called.
channel.write(response).addListener(ChannelFutureListener.CLOSE);
}
}
@Override
public void messageReceived(ChannelHandlerContext context, MessageEvent e) throws Exception {
HttpRequest request = (HttpRequest) e.getMessage();
String base = getBaseUri(request);
URI baseUri = new URI(base);
URI requestUri = new URI(base.substring(0, base.length() - 1) + request.getUri());
ContainerRequest cRequest = new ContainerRequest(application, request
.getMethod().getName(), baseUri, requestUri,
getHeaders(request), new ChannelBufferInputStream(
request.getContent()));
application.handleRequest(cRequest, new Writer(e.getChannel()));
}
private String getBaseUri(HttpRequest request) {
if (baseUri != null) {
return baseUri;
}
return "http://" + request.getHeader(HttpHeaders.Names.HOST) + "/";
}
private InBoundHeaders getHeaders(HttpRequest request) {
InBoundHeaders headers = new InBoundHeaders();
for (String name : request.getHeaderNames()) {
headers.put(name, request.getHeaders(name));
}
return headers;
}
}
| 7,266 |
0 | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes/rss | Create_ds/recipes-rss/rss-core/src/main/java/com/netflix/recipes/rss/netty/NettyServer.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.recipes.rss.netty;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.netflix.recipes.rss.util.DescriptiveThreadFactory;
import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.channel.*;
import org.jboss.netty.channel.group.ChannelGroup;
import org.jboss.netty.channel.group.DefaultChannelGroup;
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
import org.jboss.netty.handler.codec.http.HttpRequestDecoder;
import org.jboss.netty.handler.codec.http.HttpResponseEncoder;
import org.jboss.netty.handler.execution.ExecutionHandler;
import org.jboss.netty.logging.InternalLoggerFactory;
import org.jboss.netty.logging.Slf4JLoggerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.lang.Thread.UncaughtExceptionHandler;
import java.net.InetSocketAddress;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
* NettyServer and Builder
*
* @author Chris Fregly (chris@fregly.com)
*/
public final class NettyServer implements Closeable {
private static final Logger logger = LoggerFactory.getLogger(NettyServer.class);
public static final int cpus = Runtime.getRuntime().availableProcessors();
private ChannelGroup channelGroup = new DefaultChannelGroup();
static {
InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory());
Thread.setDefaultUncaughtExceptionHandler(new UncaughtExceptionHandler() {
public void uncaughtException(Thread thread, Throwable exc) {
logger.error("Thread Exc {}", thread.getName(), exc);
for (Throwable exc2 = exc; exc2 != null; exc2 = exc2.getCause()) {
if (exc2 instanceof OutOfMemoryError)
throw new RuntimeException("OutOfMemoryError");
}
}
});
}
public String getListenHost() {
return ((InetSocketAddress) channelGroup.find(1).getLocalAddress()).getHostName();
}
public int getListenPort() {
return ((InetSocketAddress) channelGroup.find(1).getLocalAddress()).getPort();
}
public void addChannel(Channel channel) {
channelGroup.add(channel);
}
/**
* @return Builder object which will help build the client and server
*/
public static Builder builder() {
return new Builder();
}
public static class Builder {
private String host;
private int port = 0; // default is any port
private Map<String, ChannelHandler> handlers = Maps.newHashMap();
private ChannelHandler encoder = new HttpResponseEncoder();
private ChannelHandler decoder = new HttpRequestDecoder();
private int numBossThreads = cpus; // IO boss threads
private int numWorkerThreads = cpus * 4; // worker threads
public Builder host(String host) {
this.host = host;
return this;
}
public Builder port(int port) {
this.port = port;
return this;
}
public Builder addHandler(String name, ChannelHandler handler) {
Preconditions.checkNotNull(handler);
handlers.put(name, handler);
return this;
}
public Builder encoder(ChannelHandler encoder) {
this.encoder = encoder;
return this;
}
public Builder decoder(ChannelHandler decoder) {
this.decoder = decoder;
return this;
}
public Builder numBossThreads(int numBossThreads) {
this.numBossThreads = numBossThreads;
return this;
}
public Builder numWorkerThreads(int numWorkerThreads) {
this.numWorkerThreads = numWorkerThreads;
return this;
}
/**
* Builds and starts netty
*/
public NettyServer build() {
PipelineFactory factory = new PipelineFactory(handlers, encoder,
decoder, numBossThreads);
ThreadPoolExecutor bossPool = new ThreadPoolExecutor(
numBossThreads, numBossThreads, 60, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(),
new DescriptiveThreadFactory("Boss-Thread"));
ThreadPoolExecutor workerPool = new ThreadPoolExecutor(
numWorkerThreads, numWorkerThreads, 60, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(),
new DescriptiveThreadFactory("Worker-Thread"));
ChannelFactory nioServer = new NioServerSocketChannelFactory(
bossPool, workerPool, numWorkerThreads);
ServerBootstrap serverBootstrap = new ServerBootstrap(nioServer);
serverBootstrap.setOption("reuseAddress", true);
serverBootstrap.setOption("keepAlive", true);
serverBootstrap.setPipelineFactory(factory);
Channel serverChannel = serverBootstrap.bind(new InetSocketAddress(
host, port));
logger.info("Started netty server {}:{}", host, port);
NettyServer server = new NettyServer();
server.addChannel(serverChannel);
return server;
}
}
public static class PipelineFactory implements ChannelPipelineFactory {
static final String CHANNEL_HANDLERS = "channelHandlers";
static final String ENCODER_NAME = "encoder";
static final String DECODER_NAME = "decoder";
final ChannelHandler executionHandler;
final Map<String, ChannelHandler> handlers;
final ChannelHandler encoder;
final ChannelHandler decoder;
public PipelineFactory(Map<String, ChannelHandler> handlers,
ChannelHandler encoder, ChannelHandler decoder, int numThreads) {
this.handlers = handlers;
this.encoder = encoder;
this.decoder = decoder;
if (numThreads != 0) {
ThreadPoolExecutor executorThreadPool = new ThreadPoolExecutor(
NettyServer.cpus, NettyServer.cpus * 4, 60,
TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(),
new DescriptiveThreadFactory("Executor-Thread"));
this.executionHandler = new ExecutionHandler(executorThreadPool);
} else {
this.executionHandler = null;
}
}
public ChannelPipeline getPipeline() throws Exception {
ChannelPipeline pipeline = Channels.pipeline();
pipeline.addLast("executionHandler", executionHandler);
pipeline.addLast(DECODER_NAME, decoder);
pipeline.addLast(ENCODER_NAME, encoder);
for (Entry<String, ChannelHandler> handler : handlers.entrySet()) {
pipeline.addLast(handler.getKey(), handler.getValue());
}
return pipeline;
}
}
public static class ClientPipelineFactory extends PipelineFactory {
public ClientPipelineFactory(Map<String, ChannelHandler> handlers,
ChannelHandler encoder, ChannelHandler decoder) {
super(handlers, encoder, decoder, 0);
}
public ChannelPipeline getPipeline() throws Exception {
ChannelPipeline pipeline = Channels.pipeline();
pipeline.addLast(DECODER_NAME, decoder);
pipeline.addLast(ENCODER_NAME, encoder);
for (Entry<String, ChannelHandler> handler : handlers.entrySet()) {
pipeline.addLast(handler.getKey(), handler.getValue());
}
return pipeline;
}
}
public void close() {
channelGroup.close();
}
private NettyServer() {
}
} | 7,267 |
0 | Create_ds/airbnb-spark-thrift/src/test/scala/com/airbnb/spark | Create_ds/airbnb-spark-thrift/src/test/scala/com/airbnb/spark/thrift/TestEnum.java | /**
* Autogenerated by Thrift Compiler (0.9.2)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package com.airbnb.spark.thrift;
import java.util.Map;
import java.util.HashMap;
import org.apache.thrift.TEnum;
public enum TestEnum implements org.apache.thrift.TEnum {
TWEET(0),
RETWEET(2),
DM(10),
REPLY(11);
private final int value;
private TestEnum(int value) {
this.value = value;
}
/**
* Get the integer value of this enum value, as defined in the Thrift IDL.
*/
public int getValue() {
return value;
}
/**
* Find a the enum type by its integer value, as defined in the Thrift IDL.
* @return null if the value is not found.
*/
public static TestEnum findByValue(int value) {
switch (value) {
case 0:
return TWEET;
case 2:
return RETWEET;
case 10:
return DM;
case 11:
return REPLY;
default:
return null;
}
}
}
| 7,268 |
0 | Create_ds/airbnb-spark-thrift/src/test/scala/com/airbnb/spark | Create_ds/airbnb-spark-thrift/src/test/scala/com/airbnb/spark/thrift/StructSimple.java | /**
* Autogenerated by Thrift Compiler (0.9.2)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package com.airbnb.spark.thrift;
import org.apache.thrift.scheme.IScheme;
import org.apache.thrift.scheme.SchemeFactory;
import org.apache.thrift.scheme.StandardScheme;
import org.apache.thrift.scheme.TupleScheme;
import org.apache.thrift.protocol.TTupleProtocol;
import org.apache.thrift.protocol.TProtocolException;
import org.apache.thrift.EncodingUtils;
import org.apache.thrift.TException;
import org.apache.thrift.async.AsyncMethodCallback;
import org.apache.thrift.server.AbstractNonblockingServer.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.util.HashMap;
import java.util.EnumMap;
import java.util.Set;
import java.util.HashSet;
import java.util.EnumSet;
import java.util.Collections;
import java.util.BitSet;
import java.nio.ByteBuffer;
import java.util.Arrays;
import javax.annotation.Generated;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2017-4-21")
public class StructSimple implements org.apache.thrift.TBase<StructSimple, StructSimple._Fields>, java.io.Serializable, Cloneable, Comparable<StructSimple> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("StructSimple");
private static final org.apache.thrift.protocol.TField ID16_FIELD_DESC = new org.apache.thrift.protocol.TField("id16", org.apache.thrift.protocol.TType.I16, (short)1);
private static final org.apache.thrift.protocol.TField ID32_FIELD_DESC = new org.apache.thrift.protocol.TField("id32", org.apache.thrift.protocol.TType.I32, (short)2);
private static final org.apache.thrift.protocol.TField ID64_FIELD_DESC = new org.apache.thrift.protocol.TField("id64", org.apache.thrift.protocol.TType.I64, (short)3);
private static final org.apache.thrift.protocol.TField BIN1_FIELD_DESC = new org.apache.thrift.protocol.TField("bin1", org.apache.thrift.protocol.TType.STRING, (short)4);
private static final org.apache.thrift.protocol.TField B1_FIELD_DESC = new org.apache.thrift.protocol.TField("b1", org.apache.thrift.protocol.TType.BOOL, (short)5);
private static final org.apache.thrift.protocol.TField D1_FIELD_DESC = new org.apache.thrift.protocol.TField("d1", org.apache.thrift.protocol.TType.DOUBLE, (short)6);
private static final org.apache.thrift.protocol.TField STR1_FIELD_DESC = new org.apache.thrift.protocol.TField("str1", org.apache.thrift.protocol.TType.STRING, (short)7);
private static final org.apache.thrift.protocol.TField L1_FIELD_DESC = new org.apache.thrift.protocol.TField("l1", org.apache.thrift.protocol.TType.LIST, (short)8);
private static final org.apache.thrift.protocol.TField M1_FIELD_DESC = new org.apache.thrift.protocol.TField("m1", org.apache.thrift.protocol.TType.MAP, (short)9);
private static final org.apache.thrift.protocol.TField S1_FIELD_DESC = new org.apache.thrift.protocol.TField("s1", org.apache.thrift.protocol.TType.SET, (short)10);
private static final org.apache.thrift.protocol.TField F1_FIELD_DESC = new org.apache.thrift.protocol.TField("f1", org.apache.thrift.protocol.TType.STRUCT, (short)11);
private static final org.apache.thrift.protocol.TField FOO_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("fooList", org.apache.thrift.protocol.TType.LIST, (short)12);
private static final org.apache.thrift.protocol.TField FOO_MAP_FIELD_DESC = new org.apache.thrift.protocol.TField("fooMap", org.apache.thrift.protocol.TType.MAP, (short)13);
private static final org.apache.thrift.protocol.TField OPTION_STR_FIELD_DESC = new org.apache.thrift.protocol.TField("option_str", org.apache.thrift.protocol.TType.STRING, (short)14);
private static final org.apache.thrift.protocol.TField E_FIELD_DESC = new org.apache.thrift.protocol.TField("e", org.apache.thrift.protocol.TType.I32, (short)15);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new StructSimpleStandardSchemeFactory());
schemes.put(TupleScheme.class, new StructSimpleTupleSchemeFactory());
}
public short id16; // required
public int id32; // required
public long id64; // required
public ByteBuffer bin1; // required
public boolean b1; // required
public double d1; // required
public String str1; // required
public List<Long> l1; // required
public Map<String,Boolean> m1; // required
public Set<Double> s1; // required
public Foo f1; // required
public List<Foo> fooList; // required
public Map<String,Foo> fooMap; // required
public String option_str; // optional
public TestEnum e; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
ID16((short)1, "id16"),
ID32((short)2, "id32"),
ID64((short)3, "id64"),
BIN1((short)4, "bin1"),
B1((short)5, "b1"),
D1((short)6, "d1"),
STR1((short)7, "str1"),
L1((short)8, "l1"),
M1((short)9, "m1"),
S1((short)10, "s1"),
F1((short)11, "f1"),
FOO_LIST((short)12, "fooList"),
FOO_MAP((short)13, "fooMap"),
OPTION_STR((short)14, "option_str"),
E((short)15, "e");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // ID16
return ID16;
case 2: // ID32
return ID32;
case 3: // ID64
return ID64;
case 4: // BIN1
return BIN1;
case 5: // B1
return B1;
case 6: // D1
return D1;
case 7: // STR1
return STR1;
case 8: // L1
return L1;
case 9: // M1
return M1;
case 10: // S1
return S1;
case 11: // F1
return F1;
case 12: // FOO_LIST
return FOO_LIST;
case 13: // FOO_MAP
return FOO_MAP;
case 14: // OPTION_STR
return OPTION_STR;
case 15: // E
return E;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
private static final int __ID16_ISSET_ID = 0;
private static final int __ID32_ISSET_ID = 1;
private static final int __ID64_ISSET_ID = 2;
private static final int __B1_ISSET_ID = 3;
private static final int __D1_ISSET_ID = 4;
private byte __isset_bitfield = 0;
private static final _Fields optionals[] = {_Fields.OPTION_STR};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.ID16, new org.apache.thrift.meta_data.FieldMetaData("id16", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16)));
tmpMap.put(_Fields.ID32, new org.apache.thrift.meta_data.FieldMetaData("id32", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
tmpMap.put(_Fields.ID64, new org.apache.thrift.meta_data.FieldMetaData("id64", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.BIN1, new org.apache.thrift.meta_data.FieldMetaData("bin1", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true)));
tmpMap.put(_Fields.B1, new org.apache.thrift.meta_data.FieldMetaData("b1", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
tmpMap.put(_Fields.D1, new org.apache.thrift.meta_data.FieldMetaData("d1", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
tmpMap.put(_Fields.STR1, new org.apache.thrift.meta_data.FieldMetaData("str1", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.L1, new org.apache.thrift.meta_data.FieldMetaData("l1", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
tmpMap.put(_Fields.M1, new org.apache.thrift.meta_data.FieldMetaData("m1", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING),
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))));
tmpMap.put(_Fields.S1, new org.apache.thrift.meta_data.FieldMetaData("s1", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))));
tmpMap.put(_Fields.F1, new org.apache.thrift.meta_data.FieldMetaData("f1", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT , "Foo")));
tmpMap.put(_Fields.FOO_LIST, new org.apache.thrift.meta_data.FieldMetaData("fooList", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT , "Foo"))));
tmpMap.put(_Fields.FOO_MAP, new org.apache.thrift.meta_data.FieldMetaData("fooMap", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING),
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT , "Foo"))));
tmpMap.put(_Fields.OPTION_STR, new org.apache.thrift.meta_data.FieldMetaData("option_str", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.E, new org.apache.thrift.meta_data.FieldMetaData("e", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.ENUM , "TestEnum")));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(StructSimple.class, metaDataMap);
}
public StructSimple() {
}
public StructSimple(
short id16,
int id32,
long id64,
ByteBuffer bin1,
boolean b1,
double d1,
String str1,
List<Long> l1,
Map<String,Boolean> m1,
Set<Double> s1,
Foo f1,
List<Foo> fooList,
Map<String,Foo> fooMap,
TestEnum e)
{
this();
this.id16 = id16;
setId16IsSet(true);
this.id32 = id32;
setId32IsSet(true);
this.id64 = id64;
setId64IsSet(true);
this.bin1 = org.apache.thrift.TBaseHelper.copyBinary(bin1);
this.b1 = b1;
setB1IsSet(true);
this.d1 = d1;
setD1IsSet(true);
this.str1 = str1;
this.l1 = l1;
this.m1 = m1;
this.s1 = s1;
this.f1 = f1;
this.fooList = fooList;
this.fooMap = fooMap;
this.e = e;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public StructSimple(StructSimple other) {
__isset_bitfield = other.__isset_bitfield;
this.id16 = other.id16;
this.id32 = other.id32;
this.id64 = other.id64;
if (other.isSetBin1()) {
this.bin1 = org.apache.thrift.TBaseHelper.copyBinary(other.bin1);
}
this.b1 = other.b1;
this.d1 = other.d1;
if (other.isSetStr1()) {
this.str1 = other.str1;
}
if (other.isSetL1()) {
List<Long> __this__l1 = new ArrayList<Long>(other.l1);
this.l1 = __this__l1;
}
if (other.isSetM1()) {
Map<String,Boolean> __this__m1 = new HashMap<String,Boolean>(other.m1);
this.m1 = __this__m1;
}
if (other.isSetS1()) {
Set<Double> __this__s1 = new HashSet<Double>(other.s1);
this.s1 = __this__s1;
}
if (other.isSetF1()) {
this.f1 = other.f1;
}
if (other.isSetFooList()) {
List<Foo> __this__fooList = new ArrayList<Foo>(other.fooList.size());
for (Foo other_element : other.fooList) {
__this__fooList.add(other_element);
}
this.fooList = __this__fooList;
}
if (other.isSetFooMap()) {
Map<String,Foo> __this__fooMap = new HashMap<String,Foo>(other.fooMap.size());
for (Map.Entry<String, Foo> other_element : other.fooMap.entrySet()) {
String other_element_key = other_element.getKey();
Foo other_element_value = other_element.getValue();
String __this__fooMap_copy_key = other_element_key;
Foo __this__fooMap_copy_value = other_element_value;
__this__fooMap.put(__this__fooMap_copy_key, __this__fooMap_copy_value);
}
this.fooMap = __this__fooMap;
}
if (other.isSetOption_str()) {
this.option_str = other.option_str;
}
if (other.isSetE()) {
this.e = other.e;
}
}
public StructSimple deepCopy() {
return new StructSimple(this);
}
@Override
public void clear() {
setId16IsSet(false);
this.id16 = 0;
setId32IsSet(false);
this.id32 = 0;
setId64IsSet(false);
this.id64 = 0;
this.bin1 = null;
setB1IsSet(false);
this.b1 = false;
setD1IsSet(false);
this.d1 = 0.0;
this.str1 = null;
this.l1 = null;
this.m1 = null;
this.s1 = null;
this.f1 = null;
this.fooList = null;
this.fooMap = null;
this.option_str = null;
this.e = null;
}
public short getId16() {
return this.id16;
}
public StructSimple setId16(short id16) {
this.id16 = id16;
setId16IsSet(true);
return this;
}
public void unsetId16() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ID16_ISSET_ID);
}
/** Returns true if field id16 is set (has been assigned a value) and false otherwise */
public boolean isSetId16() {
return EncodingUtils.testBit(__isset_bitfield, __ID16_ISSET_ID);
}
public void setId16IsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ID16_ISSET_ID, value);
}
public int getId32() {
return this.id32;
}
public StructSimple setId32(int id32) {
this.id32 = id32;
setId32IsSet(true);
return this;
}
public void unsetId32() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ID32_ISSET_ID);
}
/** Returns true if field id32 is set (has been assigned a value) and false otherwise */
public boolean isSetId32() {
return EncodingUtils.testBit(__isset_bitfield, __ID32_ISSET_ID);
}
public void setId32IsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ID32_ISSET_ID, value);
}
public long getId64() {
return this.id64;
}
public StructSimple setId64(long id64) {
this.id64 = id64;
setId64IsSet(true);
return this;
}
public void unsetId64() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ID64_ISSET_ID);
}
/** Returns true if field id64 is set (has been assigned a value) and false otherwise */
public boolean isSetId64() {
return EncodingUtils.testBit(__isset_bitfield, __ID64_ISSET_ID);
}
public void setId64IsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ID64_ISSET_ID, value);
}
public byte[] getBin1() {
setBin1(org.apache.thrift.TBaseHelper.rightSize(bin1));
return bin1 == null ? null : bin1.array();
}
public ByteBuffer bufferForBin1() {
return org.apache.thrift.TBaseHelper.copyBinary(bin1);
}
public StructSimple setBin1(byte[] bin1) {
this.bin1 = bin1 == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(bin1, bin1.length));
return this;
}
public StructSimple setBin1(ByteBuffer bin1) {
this.bin1 = org.apache.thrift.TBaseHelper.copyBinary(bin1);
return this;
}
public void unsetBin1() {
this.bin1 = null;
}
/** Returns true if field bin1 is set (has been assigned a value) and false otherwise */
public boolean isSetBin1() {
return this.bin1 != null;
}
public void setBin1IsSet(boolean value) {
if (!value) {
this.bin1 = null;
}
}
public boolean isB1() {
return this.b1;
}
public StructSimple setB1(boolean b1) {
this.b1 = b1;
setB1IsSet(true);
return this;
}
public void unsetB1() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __B1_ISSET_ID);
}
/** Returns true if field b1 is set (has been assigned a value) and false otherwise */
public boolean isSetB1() {
return EncodingUtils.testBit(__isset_bitfield, __B1_ISSET_ID);
}
public void setB1IsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __B1_ISSET_ID, value);
}
public double getD1() {
return this.d1;
}
public StructSimple setD1(double d1) {
this.d1 = d1;
setD1IsSet(true);
return this;
}
public void unsetD1() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __D1_ISSET_ID);
}
/** Returns true if field d1 is set (has been assigned a value) and false otherwise */
public boolean isSetD1() {
return EncodingUtils.testBit(__isset_bitfield, __D1_ISSET_ID);
}
public void setD1IsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __D1_ISSET_ID, value);
}
public String getStr1() {
return this.str1;
}
public StructSimple setStr1(String str1) {
this.str1 = str1;
return this;
}
public void unsetStr1() {
this.str1 = null;
}
/** Returns true if field str1 is set (has been assigned a value) and false otherwise */
public boolean isSetStr1() {
return this.str1 != null;
}
public void setStr1IsSet(boolean value) {
if (!value) {
this.str1 = null;
}
}
public int getL1Size() {
return (this.l1 == null) ? 0 : this.l1.size();
}
public java.util.Iterator<Long> getL1Iterator() {
return (this.l1 == null) ? null : this.l1.iterator();
}
public void addToL1(long elem) {
if (this.l1 == null) {
this.l1 = new ArrayList<Long>();
}
this.l1.add(elem);
}
public List<Long> getL1() {
return this.l1;
}
public StructSimple setL1(List<Long> l1) {
this.l1 = l1;
return this;
}
public void unsetL1() {
this.l1 = null;
}
/** Returns true if field l1 is set (has been assigned a value) and false otherwise */
public boolean isSetL1() {
return this.l1 != null;
}
public void setL1IsSet(boolean value) {
if (!value) {
this.l1 = null;
}
}
public int getM1Size() {
return (this.m1 == null) ? 0 : this.m1.size();
}
public void putToM1(String key, boolean val) {
if (this.m1 == null) {
this.m1 = new HashMap<String,Boolean>();
}
this.m1.put(key, val);
}
public Map<String,Boolean> getM1() {
return this.m1;
}
public StructSimple setM1(Map<String,Boolean> m1) {
this.m1 = m1;
return this;
}
public void unsetM1() {
this.m1 = null;
}
/** Returns true if field m1 is set (has been assigned a value) and false otherwise */
public boolean isSetM1() {
return this.m1 != null;
}
public void setM1IsSet(boolean value) {
if (!value) {
this.m1 = null;
}
}
public int getS1Size() {
return (this.s1 == null) ? 0 : this.s1.size();
}
public java.util.Iterator<Double> getS1Iterator() {
return (this.s1 == null) ? null : this.s1.iterator();
}
public void addToS1(double elem) {
if (this.s1 == null) {
this.s1 = new HashSet<Double>();
}
this.s1.add(elem);
}
public Set<Double> getS1() {
return this.s1;
}
public StructSimple setS1(Set<Double> s1) {
this.s1 = s1;
return this;
}
public void unsetS1() {
this.s1 = null;
}
/** Returns true if field s1 is set (has been assigned a value) and false otherwise */
public boolean isSetS1() {
return this.s1 != null;
}
public void setS1IsSet(boolean value) {
if (!value) {
this.s1 = null;
}
}
public Foo getF1() {
return this.f1;
}
public StructSimple setF1(Foo f1) {
this.f1 = f1;
return this;
}
public void unsetF1() {
this.f1 = null;
}
/** Returns true if field f1 is set (has been assigned a value) and false otherwise */
public boolean isSetF1() {
return this.f1 != null;
}
public void setF1IsSet(boolean value) {
if (!value) {
this.f1 = null;
}
}
public int getFooListSize() {
return (this.fooList == null) ? 0 : this.fooList.size();
}
public java.util.Iterator<Foo> getFooListIterator() {
return (this.fooList == null) ? null : this.fooList.iterator();
}
public void addToFooList(Foo elem) {
if (this.fooList == null) {
this.fooList = new ArrayList<Foo>();
}
this.fooList.add(elem);
}
public List<Foo> getFooList() {
return this.fooList;
}
public StructSimple setFooList(List<Foo> fooList) {
this.fooList = fooList;
return this;
}
public void unsetFooList() {
this.fooList = null;
}
/** Returns true if field fooList is set (has been assigned a value) and false otherwise */
public boolean isSetFooList() {
return this.fooList != null;
}
public void setFooListIsSet(boolean value) {
if (!value) {
this.fooList = null;
}
}
public int getFooMapSize() {
return (this.fooMap == null) ? 0 : this.fooMap.size();
}
public void putToFooMap(String key, Foo val) {
if (this.fooMap == null) {
this.fooMap = new HashMap<String,Foo>();
}
this.fooMap.put(key, val);
}
public Map<String,Foo> getFooMap() {
return this.fooMap;
}
public StructSimple setFooMap(Map<String,Foo> fooMap) {
this.fooMap = fooMap;
return this;
}
public void unsetFooMap() {
this.fooMap = null;
}
/** Returns true if field fooMap is set (has been assigned a value) and false otherwise */
public boolean isSetFooMap() {
return this.fooMap != null;
}
public void setFooMapIsSet(boolean value) {
if (!value) {
this.fooMap = null;
}
}
public String getOption_str() {
return this.option_str;
}
public StructSimple setOption_str(String option_str) {
this.option_str = option_str;
return this;
}
public void unsetOption_str() {
this.option_str = null;
}
/** Returns true if field option_str is set (has been assigned a value) and false otherwise */
public boolean isSetOption_str() {
return this.option_str != null;
}
public void setOption_strIsSet(boolean value) {
if (!value) {
this.option_str = null;
}
}
public TestEnum getE() {
return this.e;
}
public StructSimple setE(TestEnum e) {
this.e = e;
return this;
}
public void unsetE() {
this.e = null;
}
/** Returns true if field e is set (has been assigned a value) and false otherwise */
public boolean isSetE() {
return this.e != null;
}
public void setEIsSet(boolean value) {
if (!value) {
this.e = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case ID16:
if (value == null) {
unsetId16();
} else {
setId16((Short)value);
}
break;
case ID32:
if (value == null) {
unsetId32();
} else {
setId32((Integer)value);
}
break;
case ID64:
if (value == null) {
unsetId64();
} else {
setId64((Long)value);
}
break;
case BIN1:
if (value == null) {
unsetBin1();
} else {
setBin1((ByteBuffer)value);
}
break;
case B1:
if (value == null) {
unsetB1();
} else {
setB1((Boolean)value);
}
break;
case D1:
if (value == null) {
unsetD1();
} else {
setD1((Double)value);
}
break;
case STR1:
if (value == null) {
unsetStr1();
} else {
setStr1((String)value);
}
break;
case L1:
if (value == null) {
unsetL1();
} else {
setL1((List<Long>)value);
}
break;
case M1:
if (value == null) {
unsetM1();
} else {
setM1((Map<String,Boolean>)value);
}
break;
case S1:
if (value == null) {
unsetS1();
} else {
setS1((Set<Double>)value);
}
break;
case F1:
if (value == null) {
unsetF1();
} else {
setF1((Foo)value);
}
break;
case FOO_LIST:
if (value == null) {
unsetFooList();
} else {
setFooList((List<Foo>)value);
}
break;
case FOO_MAP:
if (value == null) {
unsetFooMap();
} else {
setFooMap((Map<String,Foo>)value);
}
break;
case OPTION_STR:
if (value == null) {
unsetOption_str();
} else {
setOption_str((String)value);
}
break;
case E:
if (value == null) {
unsetE();
} else {
setE((TestEnum)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case ID16:
return Short.valueOf(getId16());
case ID32:
return Integer.valueOf(getId32());
case ID64:
return Long.valueOf(getId64());
case BIN1:
return getBin1();
case B1:
return Boolean.valueOf(isB1());
case D1:
return Double.valueOf(getD1());
case STR1:
return getStr1();
case L1:
return getL1();
case M1:
return getM1();
case S1:
return getS1();
case F1:
return getF1();
case FOO_LIST:
return getFooList();
case FOO_MAP:
return getFooMap();
case OPTION_STR:
return getOption_str();
case E:
return getE();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case ID16:
return isSetId16();
case ID32:
return isSetId32();
case ID64:
return isSetId64();
case BIN1:
return isSetBin1();
case B1:
return isSetB1();
case D1:
return isSetD1();
case STR1:
return isSetStr1();
case L1:
return isSetL1();
case M1:
return isSetM1();
case S1:
return isSetS1();
case F1:
return isSetF1();
case FOO_LIST:
return isSetFooList();
case FOO_MAP:
return isSetFooMap();
case OPTION_STR:
return isSetOption_str();
case E:
return isSetE();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof StructSimple)
return this.equals((StructSimple)that);
return false;
}
public boolean equals(StructSimple that) {
if (that == null)
return false;
boolean this_present_id16 = true;
boolean that_present_id16 = true;
if (this_present_id16 || that_present_id16) {
if (!(this_present_id16 && that_present_id16))
return false;
if (this.id16 != that.id16)
return false;
}
boolean this_present_id32 = true;
boolean that_present_id32 = true;
if (this_present_id32 || that_present_id32) {
if (!(this_present_id32 && that_present_id32))
return false;
if (this.id32 != that.id32)
return false;
}
boolean this_present_id64 = true;
boolean that_present_id64 = true;
if (this_present_id64 || that_present_id64) {
if (!(this_present_id64 && that_present_id64))
return false;
if (this.id64 != that.id64)
return false;
}
boolean this_present_bin1 = true && this.isSetBin1();
boolean that_present_bin1 = true && that.isSetBin1();
if (this_present_bin1 || that_present_bin1) {
if (!(this_present_bin1 && that_present_bin1))
return false;
if (!this.bin1.equals(that.bin1))
return false;
}
boolean this_present_b1 = true;
boolean that_present_b1 = true;
if (this_present_b1 || that_present_b1) {
if (!(this_present_b1 && that_present_b1))
return false;
if (this.b1 != that.b1)
return false;
}
boolean this_present_d1 = true;
boolean that_present_d1 = true;
if (this_present_d1 || that_present_d1) {
if (!(this_present_d1 && that_present_d1))
return false;
if (this.d1 != that.d1)
return false;
}
boolean this_present_str1 = true && this.isSetStr1();
boolean that_present_str1 = true && that.isSetStr1();
if (this_present_str1 || that_present_str1) {
if (!(this_present_str1 && that_present_str1))
return false;
if (!this.str1.equals(that.str1))
return false;
}
boolean this_present_l1 = true && this.isSetL1();
boolean that_present_l1 = true && that.isSetL1();
if (this_present_l1 || that_present_l1) {
if (!(this_present_l1 && that_present_l1))
return false;
if (!this.l1.equals(that.l1))
return false;
}
boolean this_present_m1 = true && this.isSetM1();
boolean that_present_m1 = true && that.isSetM1();
if (this_present_m1 || that_present_m1) {
if (!(this_present_m1 && that_present_m1))
return false;
if (!this.m1.equals(that.m1))
return false;
}
boolean this_present_s1 = true && this.isSetS1();
boolean that_present_s1 = true && that.isSetS1();
if (this_present_s1 || that_present_s1) {
if (!(this_present_s1 && that_present_s1))
return false;
if (!this.s1.equals(that.s1))
return false;
}
boolean this_present_f1 = true && this.isSetF1();
boolean that_present_f1 = true && that.isSetF1();
if (this_present_f1 || that_present_f1) {
if (!(this_present_f1 && that_present_f1))
return false;
if (!this.f1.equals(that.f1))
return false;
}
boolean this_present_fooList = true && this.isSetFooList();
boolean that_present_fooList = true && that.isSetFooList();
if (this_present_fooList || that_present_fooList) {
if (!(this_present_fooList && that_present_fooList))
return false;
if (!this.fooList.equals(that.fooList))
return false;
}
boolean this_present_fooMap = true && this.isSetFooMap();
boolean that_present_fooMap = true && that.isSetFooMap();
if (this_present_fooMap || that_present_fooMap) {
if (!(this_present_fooMap && that_present_fooMap))
return false;
if (!this.fooMap.equals(that.fooMap))
return false;
}
boolean this_present_option_str = true && this.isSetOption_str();
boolean that_present_option_str = true && that.isSetOption_str();
if (this_present_option_str || that_present_option_str) {
if (!(this_present_option_str && that_present_option_str))
return false;
if (!this.option_str.equals(that.option_str))
return false;
}
boolean this_present_e = true && this.isSetE();
boolean that_present_e = true && that.isSetE();
if (this_present_e || that_present_e) {
if (!(this_present_e && that_present_e))
return false;
if (!this.e.equals(that.e))
return false;
}
return true;
}
@Override
public int hashCode() {
List<Object> list = new ArrayList<Object>();
boolean present_id16 = true;
list.add(present_id16);
if (present_id16)
list.add(id16);
boolean present_id32 = true;
list.add(present_id32);
if (present_id32)
list.add(id32);
boolean present_id64 = true;
list.add(present_id64);
if (present_id64)
list.add(id64);
boolean present_bin1 = true && (isSetBin1());
list.add(present_bin1);
if (present_bin1)
list.add(bin1);
boolean present_b1 = true;
list.add(present_b1);
if (present_b1)
list.add(b1);
boolean present_d1 = true;
list.add(present_d1);
if (present_d1)
list.add(d1);
boolean present_str1 = true && (isSetStr1());
list.add(present_str1);
if (present_str1)
list.add(str1);
boolean present_l1 = true && (isSetL1());
list.add(present_l1);
if (present_l1)
list.add(l1);
boolean present_m1 = true && (isSetM1());
list.add(present_m1);
if (present_m1)
list.add(m1);
boolean present_s1 = true && (isSetS1());
list.add(present_s1);
if (present_s1)
list.add(s1);
boolean present_f1 = true && (isSetF1());
list.add(present_f1);
if (present_f1)
list.add(f1);
boolean present_fooList = true && (isSetFooList());
list.add(present_fooList);
if (present_fooList)
list.add(fooList);
boolean present_fooMap = true && (isSetFooMap());
list.add(present_fooMap);
if (present_fooMap)
list.add(fooMap);
boolean present_option_str = true && (isSetOption_str());
list.add(present_option_str);
if (present_option_str)
list.add(option_str);
boolean present_e = true && (isSetE());
list.add(present_e);
if (present_e)
list.add(e.getValue());
return list.hashCode();
}
@Override
public int compareTo(StructSimple other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetId16()).compareTo(other.isSetId16());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetId16()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.id16, other.id16);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetId32()).compareTo(other.isSetId32());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetId32()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.id32, other.id32);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetId64()).compareTo(other.isSetId64());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetId64()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.id64, other.id64);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetBin1()).compareTo(other.isSetBin1());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetBin1()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bin1, other.bin1);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetB1()).compareTo(other.isSetB1());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetB1()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.b1, other.b1);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetD1()).compareTo(other.isSetD1());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetD1()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.d1, other.d1);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetStr1()).compareTo(other.isSetStr1());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetStr1()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.str1, other.str1);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetL1()).compareTo(other.isSetL1());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetL1()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.l1, other.l1);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetM1()).compareTo(other.isSetM1());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetM1()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.m1, other.m1);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetS1()).compareTo(other.isSetS1());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetS1()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.s1, other.s1);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetF1()).compareTo(other.isSetF1());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetF1()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.f1, other.f1);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetFooList()).compareTo(other.isSetFooList());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetFooList()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fooList, other.fooList);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetFooMap()).compareTo(other.isSetFooMap());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetFooMap()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fooMap, other.fooMap);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetOption_str()).compareTo(other.isSetOption_str());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetOption_str()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.option_str, other.option_str);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetE()).compareTo(other.isSetE());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetE()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.e, other.e);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("StructSimple(");
boolean first = true;
sb.append("id16:");
sb.append(this.id16);
first = false;
if (!first) sb.append(", ");
sb.append("id32:");
sb.append(this.id32);
first = false;
if (!first) sb.append(", ");
sb.append("id64:");
sb.append(this.id64);
first = false;
if (!first) sb.append(", ");
sb.append("bin1:");
if (this.bin1 == null) {
sb.append("null");
} else {
org.apache.thrift.TBaseHelper.toString(this.bin1, sb);
}
first = false;
if (!first) sb.append(", ");
sb.append("b1:");
sb.append(this.b1);
first = false;
if (!first) sb.append(", ");
sb.append("d1:");
sb.append(this.d1);
first = false;
if (!first) sb.append(", ");
sb.append("str1:");
if (this.str1 == null) {
sb.append("null");
} else {
sb.append(this.str1);
}
first = false;
if (!first) sb.append(", ");
sb.append("l1:");
if (this.l1 == null) {
sb.append("null");
} else {
sb.append(this.l1);
}
first = false;
if (!first) sb.append(", ");
sb.append("m1:");
if (this.m1 == null) {
sb.append("null");
} else {
sb.append(this.m1);
}
first = false;
if (!first) sb.append(", ");
sb.append("s1:");
if (this.s1 == null) {
sb.append("null");
} else {
sb.append(this.s1);
}
first = false;
if (!first) sb.append(", ");
sb.append("f1:");
if (this.f1 == null) {
sb.append("null");
} else {
sb.append(this.f1);
}
first = false;
if (!first) sb.append(", ");
sb.append("fooList:");
if (this.fooList == null) {
sb.append("null");
} else {
sb.append(this.fooList);
}
first = false;
if (!first) sb.append(", ");
sb.append("fooMap:");
if (this.fooMap == null) {
sb.append("null");
} else {
sb.append(this.fooMap);
}
first = false;
if (isSetOption_str()) {
if (!first) sb.append(", ");
sb.append("option_str:");
if (this.option_str == null) {
sb.append("null");
} else {
sb.append(this.option_str);
}
first = false;
}
if (!first) sb.append(", ");
sb.append("e:");
if (this.e == null) {
sb.append("null");
} else {
sb.append(this.e);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// alas, we cannot check 'id16' because it's a primitive and you chose the non-beans generator.
// alas, we cannot check 'id32' because it's a primitive and you chose the non-beans generator.
// alas, we cannot check 'id64' because it's a primitive and you chose the non-beans generator.
if (bin1 == null) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'bin1' was not present! Struct: " + toString());
}
// alas, we cannot check 'b1' because it's a primitive and you chose the non-beans generator.
// alas, we cannot check 'd1' because it's a primitive and you chose the non-beans generator.
if (str1 == null) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'str1' was not present! Struct: " + toString());
}
if (l1 == null) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'l1' was not present! Struct: " + toString());
}
if (m1 == null) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'm1' was not present! Struct: " + toString());
}
if (s1 == null) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 's1' was not present! Struct: " + toString());
}
if (f1 == null) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'f1' was not present! Struct: " + toString());
}
if (fooList == null) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'fooList' was not present! Struct: " + toString());
}
if (fooMap == null) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'fooMap' was not present! Struct: " + toString());
}
if (e == null) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'e' was not present! Struct: " + toString());
}
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
// it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
__isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class StructSimpleStandardSchemeFactory implements SchemeFactory {
public StructSimpleStandardScheme getScheme() {
return new StructSimpleStandardScheme();
}
}
private static class StructSimpleStandardScheme extends StandardScheme<StructSimple> {
public void read(org.apache.thrift.protocol.TProtocol iprot, StructSimple struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // ID16
if (schemeField.type == org.apache.thrift.protocol.TType.I16) {
struct.id16 = iprot.readI16();
struct.setId16IsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 2: // ID32
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.id32 = iprot.readI32();
struct.setId32IsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 3: // ID64
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.id64 = iprot.readI64();
struct.setId64IsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 4: // BIN1
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.bin1 = iprot.readBinary();
struct.setBin1IsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 5: // B1
if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
struct.b1 = iprot.readBool();
struct.setB1IsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 6: // D1
if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
struct.d1 = iprot.readDouble();
struct.setD1IsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 7: // STR1
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.str1 = iprot.readString();
struct.setStr1IsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 8: // L1
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list0 = iprot.readListBegin();
struct.l1 = new ArrayList<Long>(_list0.size);
long _elem1;
for (int _i2 = 0; _i2 < _list0.size; ++_i2)
{
_elem1 = iprot.readI64();
struct.l1.add(_elem1);
}
iprot.readListEnd();
}
struct.setL1IsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 9: // M1
if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
{
org.apache.thrift.protocol.TMap _map3 = iprot.readMapBegin();
struct.m1 = new HashMap<String,Boolean>(2*_map3.size);
String _key4;
boolean _val5;
for (int _i6 = 0; _i6 < _map3.size; ++_i6)
{
_key4 = iprot.readString();
_val5 = iprot.readBool();
struct.m1.put(_key4, _val5);
}
iprot.readMapEnd();
}
struct.setM1IsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 10: // S1
if (schemeField.type == org.apache.thrift.protocol.TType.SET) {
{
org.apache.thrift.protocol.TSet _set7 = iprot.readSetBegin();
struct.s1 = new HashSet<Double>(2*_set7.size);
double _elem8;
for (int _i9 = 0; _i9 < _set7.size; ++_i9)
{
_elem8 = iprot.readDouble();
struct.s1.add(_elem8);
}
iprot.readSetEnd();
}
struct.setS1IsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 11: // F1
if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
struct.f1 = new Foo();
struct.f1.read(iprot);
struct.setF1IsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 12: // FOO_LIST
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list10 = iprot.readListBegin();
struct.fooList = new ArrayList<Foo>(_list10.size);
Foo _elem11;
for (int _i12 = 0; _i12 < _list10.size; ++_i12)
{
_elem11 = new Foo();
_elem11.read(iprot);
struct.fooList.add(_elem11);
}
iprot.readListEnd();
}
struct.setFooListIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 13: // FOO_MAP
if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
{
org.apache.thrift.protocol.TMap _map13 = iprot.readMapBegin();
struct.fooMap = new HashMap<String,Foo>(2*_map13.size);
String _key14;
Foo _val15;
for (int _i16 = 0; _i16 < _map13.size; ++_i16)
{
_key14 = iprot.readString();
_val15 = new Foo();
_val15.read(iprot);
struct.fooMap.put(_key14, _val15);
}
iprot.readMapEnd();
}
struct.setFooMapIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 14: // OPTION_STR
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.option_str = iprot.readString();
struct.setOption_strIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 15: // E
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.e = com.airbnb.spark.thrift.TestEnum.findByValue(iprot.readI32());
struct.setEIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
if (!struct.isSetId16()) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'id16' was not found in serialized data! Struct: " + toString());
}
if (!struct.isSetId32()) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'id32' was not found in serialized data! Struct: " + toString());
}
if (!struct.isSetId64()) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'id64' was not found in serialized data! Struct: " + toString());
}
if (!struct.isSetB1()) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'b1' was not found in serialized data! Struct: " + toString());
}
if (!struct.isSetD1()) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'd1' was not found in serialized data! Struct: " + toString());
}
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, StructSimple struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldBegin(ID16_FIELD_DESC);
oprot.writeI16(struct.id16);
oprot.writeFieldEnd();
oprot.writeFieldBegin(ID32_FIELD_DESC);
oprot.writeI32(struct.id32);
oprot.writeFieldEnd();
oprot.writeFieldBegin(ID64_FIELD_DESC);
oprot.writeI64(struct.id64);
oprot.writeFieldEnd();
if (struct.bin1 != null) {
oprot.writeFieldBegin(BIN1_FIELD_DESC);
oprot.writeBinary(struct.bin1);
oprot.writeFieldEnd();
}
oprot.writeFieldBegin(B1_FIELD_DESC);
oprot.writeBool(struct.b1);
oprot.writeFieldEnd();
oprot.writeFieldBegin(D1_FIELD_DESC);
oprot.writeDouble(struct.d1);
oprot.writeFieldEnd();
if (struct.str1 != null) {
oprot.writeFieldBegin(STR1_FIELD_DESC);
oprot.writeString(struct.str1);
oprot.writeFieldEnd();
}
if (struct.l1 != null) {
oprot.writeFieldBegin(L1_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.l1.size()));
for (long _iter17 : struct.l1)
{
oprot.writeI64(_iter17);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
if (struct.m1 != null) {
oprot.writeFieldBegin(M1_FIELD_DESC);
{
oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.BOOL, struct.m1.size()));
for (Map.Entry<String, Boolean> _iter18 : struct.m1.entrySet())
{
oprot.writeString(_iter18.getKey());
oprot.writeBool(_iter18.getValue());
}
oprot.writeMapEnd();
}
oprot.writeFieldEnd();
}
if (struct.s1 != null) {
oprot.writeFieldBegin(S1_FIELD_DESC);
{
oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.DOUBLE, struct.s1.size()));
for (double _iter19 : struct.s1)
{
oprot.writeDouble(_iter19);
}
oprot.writeSetEnd();
}
oprot.writeFieldEnd();
}
if (struct.f1 != null) {
oprot.writeFieldBegin(F1_FIELD_DESC);
struct.f1.write(oprot);
oprot.writeFieldEnd();
}
if (struct.fooList != null) {
oprot.writeFieldBegin(FOO_LIST_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.fooList.size()));
for (Foo _iter20 : struct.fooList)
{
_iter20.write(oprot);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
if (struct.fooMap != null) {
oprot.writeFieldBegin(FOO_MAP_FIELD_DESC);
{
oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.fooMap.size()));
for (Map.Entry<String, Foo> _iter21 : struct.fooMap.entrySet())
{
oprot.writeString(_iter21.getKey());
_iter21.getValue().write(oprot);
}
oprot.writeMapEnd();
}
oprot.writeFieldEnd();
}
if (struct.option_str != null) {
if (struct.isSetOption_str()) {
oprot.writeFieldBegin(OPTION_STR_FIELD_DESC);
oprot.writeString(struct.option_str);
oprot.writeFieldEnd();
}
}
if (struct.e != null) {
oprot.writeFieldBegin(E_FIELD_DESC);
oprot.writeI32(struct.e.getValue());
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class StructSimpleTupleSchemeFactory implements SchemeFactory {
public StructSimpleTupleScheme getScheme() {
return new StructSimpleTupleScheme();
}
}
private static class StructSimpleTupleScheme extends TupleScheme<StructSimple> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, StructSimple struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
oprot.writeI16(struct.id16);
oprot.writeI32(struct.id32);
oprot.writeI64(struct.id64);
oprot.writeBinary(struct.bin1);
oprot.writeBool(struct.b1);
oprot.writeDouble(struct.d1);
oprot.writeString(struct.str1);
{
oprot.writeI32(struct.l1.size());
for (long _iter22 : struct.l1)
{
oprot.writeI64(_iter22);
}
}
{
oprot.writeI32(struct.m1.size());
for (Map.Entry<String, Boolean> _iter23 : struct.m1.entrySet())
{
oprot.writeString(_iter23.getKey());
oprot.writeBool(_iter23.getValue());
}
}
{
oprot.writeI32(struct.s1.size());
for (double _iter24 : struct.s1)
{
oprot.writeDouble(_iter24);
}
}
struct.f1.write(oprot);
{
oprot.writeI32(struct.fooList.size());
for (Foo _iter25 : struct.fooList)
{
_iter25.write(oprot);
}
}
{
oprot.writeI32(struct.fooMap.size());
for (Map.Entry<String, Foo> _iter26 : struct.fooMap.entrySet())
{
oprot.writeString(_iter26.getKey());
_iter26.getValue().write(oprot);
}
}
oprot.writeI32(struct.e.getValue());
BitSet optionals = new BitSet();
if (struct.isSetOption_str()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetOption_str()) {
oprot.writeString(struct.option_str);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, StructSimple struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
struct.id16 = iprot.readI16();
struct.setId16IsSet(true);
struct.id32 = iprot.readI32();
struct.setId32IsSet(true);
struct.id64 = iprot.readI64();
struct.setId64IsSet(true);
struct.bin1 = iprot.readBinary();
struct.setBin1IsSet(true);
struct.b1 = iprot.readBool();
struct.setB1IsSet(true);
struct.d1 = iprot.readDouble();
struct.setD1IsSet(true);
struct.str1 = iprot.readString();
struct.setStr1IsSet(true);
{
org.apache.thrift.protocol.TList _list27 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
struct.l1 = new ArrayList<Long>(_list27.size);
long _elem28;
for (int _i29 = 0; _i29 < _list27.size; ++_i29)
{
_elem28 = iprot.readI64();
struct.l1.add(_elem28);
}
}
struct.setL1IsSet(true);
{
org.apache.thrift.protocol.TMap _map30 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.BOOL, iprot.readI32());
struct.m1 = new HashMap<String,Boolean>(2*_map30.size);
String _key31;
boolean _val32;
for (int _i33 = 0; _i33 < _map30.size; ++_i33)
{
_key31 = iprot.readString();
_val32 = iprot.readBool();
struct.m1.put(_key31, _val32);
}
}
struct.setM1IsSet(true);
{
org.apache.thrift.protocol.TSet _set34 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.DOUBLE, iprot.readI32());
struct.s1 = new HashSet<Double>(2*_set34.size);
double _elem35;
for (int _i36 = 0; _i36 < _set34.size; ++_i36)
{
_elem35 = iprot.readDouble();
struct.s1.add(_elem35);
}
}
struct.setS1IsSet(true);
struct.f1 = new Foo();
struct.f1.read(iprot);
struct.setF1IsSet(true);
{
org.apache.thrift.protocol.TList _list37 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
struct.fooList = new ArrayList<Foo>(_list37.size);
Foo _elem38;
for (int _i39 = 0; _i39 < _list37.size; ++_i39)
{
_elem38 = new Foo();
_elem38.read(iprot);
struct.fooList.add(_elem38);
}
}
struct.setFooListIsSet(true);
{
org.apache.thrift.protocol.TMap _map40 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
struct.fooMap = new HashMap<String,Foo>(2*_map40.size);
String _key41;
Foo _val42;
for (int _i43 = 0; _i43 < _map40.size; ++_i43)
{
_key41 = iprot.readString();
_val42 = new Foo();
_val42.read(iprot);
struct.fooMap.put(_key41, _val42);
}
}
struct.setFooMapIsSet(true);
struct.e = com.airbnb.spark.thrift.TestEnum.findByValue(iprot.readI32());
struct.setEIsSet(true);
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
struct.option_str = iprot.readString();
struct.setOption_strIsSet(true);
}
}
}
}
| 7,269 |
0 | Create_ds/airbnb-spark-thrift/src/test/scala/com/airbnb/spark | Create_ds/airbnb-spark-thrift/src/test/scala/com/airbnb/spark/thrift/Foo.java | /**
* Autogenerated by Thrift Compiler (0.9.2)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package com.airbnb.spark.thrift;
import org.apache.thrift.scheme.IScheme;
import org.apache.thrift.scheme.SchemeFactory;
import org.apache.thrift.scheme.StandardScheme;
import org.apache.thrift.scheme.TupleScheme;
import org.apache.thrift.protocol.TTupleProtocol;
import org.apache.thrift.protocol.TProtocolException;
import org.apache.thrift.EncodingUtils;
import org.apache.thrift.TException;
import org.apache.thrift.async.AsyncMethodCallback;
import org.apache.thrift.server.AbstractNonblockingServer.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.util.HashMap;
import java.util.EnumMap;
import java.util.Set;
import java.util.HashSet;
import java.util.EnumSet;
import java.util.Collections;
import java.util.BitSet;
import java.nio.ByteBuffer;
import java.util.Arrays;
import javax.annotation.Generated;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2017-4-21")
public class Foo implements org.apache.thrift.TBase<Foo, Foo._Fields>, java.io.Serializable, Cloneable, Comparable<Foo> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Foo");
private static final org.apache.thrift.protocol.TField ID16_FIELD_DESC = new org.apache.thrift.protocol.TField("id16", org.apache.thrift.protocol.TType.I16, (short)1);
private static final org.apache.thrift.protocol.TField ID32_FIELD_DESC = new org.apache.thrift.protocol.TField("id32", org.apache.thrift.protocol.TType.I32, (short)2);
private static final org.apache.thrift.protocol.TField ID64_FIELD_DESC = new org.apache.thrift.protocol.TField("id64", org.apache.thrift.protocol.TType.I64, (short)3);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new FooStandardSchemeFactory());
schemes.put(TupleScheme.class, new FooTupleSchemeFactory());
}
public short id16; // required
public int id32; // required
public long id64; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
ID16((short)1, "id16"),
ID32((short)2, "id32"),
ID64((short)3, "id64");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // ID16
return ID16;
case 2: // ID32
return ID32;
case 3: // ID64
return ID64;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
private static final int __ID16_ISSET_ID = 0;
private static final int __ID32_ISSET_ID = 1;
private static final int __ID64_ISSET_ID = 2;
private byte __isset_bitfield = 0;
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.ID16, new org.apache.thrift.meta_data.FieldMetaData("id16", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16)));
tmpMap.put(_Fields.ID32, new org.apache.thrift.meta_data.FieldMetaData("id32", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
tmpMap.put(_Fields.ID64, new org.apache.thrift.meta_data.FieldMetaData("id64", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Foo.class, metaDataMap);
}
public Foo() {
}
public Foo(
short id16,
int id32,
long id64)
{
this();
this.id16 = id16;
setId16IsSet(true);
this.id32 = id32;
setId32IsSet(true);
this.id64 = id64;
setId64IsSet(true);
}
/**
* Performs a deep copy on <i>other</i>.
*/
public Foo(Foo other) {
__isset_bitfield = other.__isset_bitfield;
this.id16 = other.id16;
this.id32 = other.id32;
this.id64 = other.id64;
}
public Foo deepCopy() {
return new Foo(this);
}
@Override
public void clear() {
setId16IsSet(false);
this.id16 = 0;
setId32IsSet(false);
this.id32 = 0;
setId64IsSet(false);
this.id64 = 0;
}
public short getId16() {
return this.id16;
}
public Foo setId16(short id16) {
this.id16 = id16;
setId16IsSet(true);
return this;
}
public void unsetId16() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ID16_ISSET_ID);
}
/** Returns true if field id16 is set (has been assigned a value) and false otherwise */
public boolean isSetId16() {
return EncodingUtils.testBit(__isset_bitfield, __ID16_ISSET_ID);
}
public void setId16IsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ID16_ISSET_ID, value);
}
public int getId32() {
return this.id32;
}
public Foo setId32(int id32) {
this.id32 = id32;
setId32IsSet(true);
return this;
}
public void unsetId32() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ID32_ISSET_ID);
}
/** Returns true if field id32 is set (has been assigned a value) and false otherwise */
public boolean isSetId32() {
return EncodingUtils.testBit(__isset_bitfield, __ID32_ISSET_ID);
}
public void setId32IsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ID32_ISSET_ID, value);
}
public long getId64() {
return this.id64;
}
public Foo setId64(long id64) {
this.id64 = id64;
setId64IsSet(true);
return this;
}
public void unsetId64() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ID64_ISSET_ID);
}
/** Returns true if field id64 is set (has been assigned a value) and false otherwise */
public boolean isSetId64() {
return EncodingUtils.testBit(__isset_bitfield, __ID64_ISSET_ID);
}
public void setId64IsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ID64_ISSET_ID, value);
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case ID16:
if (value == null) {
unsetId16();
} else {
setId16((Short)value);
}
break;
case ID32:
if (value == null) {
unsetId32();
} else {
setId32((Integer)value);
}
break;
case ID64:
if (value == null) {
unsetId64();
} else {
setId64((Long)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case ID16:
return Short.valueOf(getId16());
case ID32:
return Integer.valueOf(getId32());
case ID64:
return Long.valueOf(getId64());
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case ID16:
return isSetId16();
case ID32:
return isSetId32();
case ID64:
return isSetId64();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof Foo)
return this.equals((Foo)that);
return false;
}
public boolean equals(Foo that) {
if (that == null)
return false;
boolean this_present_id16 = true;
boolean that_present_id16 = true;
if (this_present_id16 || that_present_id16) {
if (!(this_present_id16 && that_present_id16))
return false;
if (this.id16 != that.id16)
return false;
}
boolean this_present_id32 = true;
boolean that_present_id32 = true;
if (this_present_id32 || that_present_id32) {
if (!(this_present_id32 && that_present_id32))
return false;
if (this.id32 != that.id32)
return false;
}
boolean this_present_id64 = true;
boolean that_present_id64 = true;
if (this_present_id64 || that_present_id64) {
if (!(this_present_id64 && that_present_id64))
return false;
if (this.id64 != that.id64)
return false;
}
return true;
}
@Override
public int hashCode() {
List<Object> list = new ArrayList<Object>();
boolean present_id16 = true;
list.add(present_id16);
if (present_id16)
list.add(id16);
boolean present_id32 = true;
list.add(present_id32);
if (present_id32)
list.add(id32);
boolean present_id64 = true;
list.add(present_id64);
if (present_id64)
list.add(id64);
return list.hashCode();
}
@Override
public int compareTo(Foo other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetId16()).compareTo(other.isSetId16());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetId16()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.id16, other.id16);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetId32()).compareTo(other.isSetId32());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetId32()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.id32, other.id32);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetId64()).compareTo(other.isSetId64());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetId64()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.id64, other.id64);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("Foo(");
boolean first = true;
sb.append("id16:");
sb.append(this.id16);
first = false;
if (!first) sb.append(", ");
sb.append("id32:");
sb.append(this.id32);
first = false;
if (!first) sb.append(", ");
sb.append("id64:");
sb.append(this.id64);
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// alas, we cannot check 'id16' because it's a primitive and you chose the non-beans generator.
// alas, we cannot check 'id32' because it's a primitive and you chose the non-beans generator.
// alas, we cannot check 'id64' because it's a primitive and you chose the non-beans generator.
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
// it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
__isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class FooStandardSchemeFactory implements SchemeFactory {
public FooStandardScheme getScheme() {
return new FooStandardScheme();
}
}
private static class FooStandardScheme extends StandardScheme<Foo> {
public void read(org.apache.thrift.protocol.TProtocol iprot, Foo struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // ID16
if (schemeField.type == org.apache.thrift.protocol.TType.I16) {
struct.id16 = iprot.readI16();
struct.setId16IsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 2: // ID32
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.id32 = iprot.readI32();
struct.setId32IsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 3: // ID64
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.id64 = iprot.readI64();
struct.setId64IsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
if (!struct.isSetId16()) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'id16' was not found in serialized data! Struct: " + toString());
}
if (!struct.isSetId32()) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'id32' was not found in serialized data! Struct: " + toString());
}
if (!struct.isSetId64()) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'id64' was not found in serialized data! Struct: " + toString());
}
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, Foo struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldBegin(ID16_FIELD_DESC);
oprot.writeI16(struct.id16);
oprot.writeFieldEnd();
oprot.writeFieldBegin(ID32_FIELD_DESC);
oprot.writeI32(struct.id32);
oprot.writeFieldEnd();
oprot.writeFieldBegin(ID64_FIELD_DESC);
oprot.writeI64(struct.id64);
oprot.writeFieldEnd();
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class FooTupleSchemeFactory implements SchemeFactory {
public FooTupleScheme getScheme() {
return new FooTupleScheme();
}
}
private static class FooTupleScheme extends TupleScheme<Foo> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, Foo struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
oprot.writeI16(struct.id16);
oprot.writeI32(struct.id32);
oprot.writeI64(struct.id64);
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, Foo struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
struct.id16 = iprot.readI16();
struct.setId16IsSet(true);
struct.id32 = iprot.readI32();
struct.setId32IsSet(true);
struct.id64 = iprot.readI64();
struct.setId64IsSet(true);
}
}
}
| 7,270 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/producer/FlinkKinesisFirehoseProducerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.producer;
import com.amazonaws.services.kinesisanalytics.flink.connectors.exception.FlinkKinesisFirehoseException;
import com.amazonaws.services.kinesisanalytics.flink.connectors.exception.RecordCouldNotBeBuffered;
import com.amazonaws.services.kinesisanalytics.flink.connectors.exception.RecordCouldNotBeSentException;
import com.amazonaws.services.kinesisanalytics.flink.connectors.serialization.KinesisFirehoseSerializationSchema;
import com.amazonaws.services.kinesisfirehose.AmazonKinesisFirehose;
import com.amazonaws.services.kinesisfirehose.model.Record;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.FunctionSnapshotContext;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import javax.annotation.Nonnull;
import java.util.Properties;
import java.util.concurrent.CompletableFuture;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.CredentialProviderType;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.producer.impl.FirehoseProducer.UserRecordResult;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.testutils.TestUtils.DEFAULT_DELIVERY_STREAM;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.testutils.TestUtils.DEFAULT_TEST_ERROR_MSG;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.testutils.TestUtils.getContext;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.testutils.TestUtils.getKinesisFirehoseSerializationSchema;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.testutils.TestUtils.getSerializationSchema;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.testutils.TestUtils.getStandardProperties;
import static org.apache.flink.streaming.api.functions.sink.SinkFunction.Context;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatExceptionOfType;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.fail;
public class FlinkKinesisFirehoseProducerTest {
private static final Logger LOGGER = LoggerFactory.getLogger(FlinkKinesisFirehoseProducerTest.class);
private FlinkKinesisFirehoseProducer<String> flinkKinesisFirehoseProducer;
private Context<String> context;
private final Configuration properties = new Configuration();
@Mock
private AmazonKinesisFirehose kinesisFirehoseClient;
@Mock
private IProducer<UserRecordResult, Record> firehoseProducer;
@BeforeMethod
public void init() {
MockitoAnnotations.initMocks(this);
flinkKinesisFirehoseProducer = createProducer();
doReturn(firehoseProducer).when(flinkKinesisFirehoseProducer).createFirehoseProducer();
doReturn(kinesisFirehoseClient).when(flinkKinesisFirehoseProducer).createKinesisFirehoseClient();
context = getContext();
}
@DataProvider(name = "kinesisFirehoseSerializationProvider")
public Object[][] kinesisFirehoseSerializationProvider() {
return new Object[][]{
{DEFAULT_DELIVERY_STREAM, getKinesisFirehoseSerializationSchema(), getStandardProperties(), null},
{DEFAULT_DELIVERY_STREAM, getKinesisFirehoseSerializationSchema(), getStandardProperties(), CredentialProviderType.BASIC},
};
}
@DataProvider(name = "serializationSchemaProvider")
public Object[][] serializationSchemaProvider() {
return new Object[][] {
{DEFAULT_DELIVERY_STREAM, getSerializationSchema(), getStandardProperties(), null},
{DEFAULT_DELIVERY_STREAM, getSerializationSchema(), getStandardProperties(), CredentialProviderType.BASIC}
};
}
@Test(dataProvider = "kinesisFirehoseSerializationProvider")
public void testFlinkKinesisFirehoseProducerHappyCase(final String deliveryStream,
final KinesisFirehoseSerializationSchema<String> schema,
final Properties configProps,
final CredentialProviderType credentialType) {
FlinkKinesisFirehoseProducer<String> firehoseProducer = (credentialType != null) ?
new FlinkKinesisFirehoseProducer<>(deliveryStream, schema, configProps, credentialType) :
new FlinkKinesisFirehoseProducer<>(deliveryStream, schema, configProps);
assertNotNull(firehoseProducer);
}
@Test(dataProvider = "serializationSchemaProvider")
public void testFlinkKinesisFirehoseProducerWithSerializationSchemaHappyCase(final String deliveryStream ,
final SerializationSchema<String> schema,
final Properties configProps,
CredentialProviderType credentialType) {
FlinkKinesisFirehoseProducer<String> firehoseProducer = (credentialType != null) ?
new FlinkKinesisFirehoseProducer<>(deliveryStream, schema, configProps, credentialType) :
new FlinkKinesisFirehoseProducer<>(deliveryStream, schema, configProps);
assertNotNull(firehoseProducer);
}
/**
* This test is responsible for testing rethrow in for an async error closing the sink (producer).
*/
@Test
public void testAsyncErrorRethrownOnClose() throws Exception {
try {
flinkKinesisFirehoseProducer.setFailOnError(true);
when(firehoseProducer.addUserRecord(any(Record.class)))
.thenReturn(getUserRecordResult(true, false));
flinkKinesisFirehoseProducer.open(properties);
flinkKinesisFirehoseProducer.invoke("Test", context);
Thread.sleep(1000);
flinkKinesisFirehoseProducer.close();
LOGGER.warn("Should not reach this line");
fail();
} catch (FlinkKinesisFirehoseException ex) {
LOGGER.info("Exception has been thrown inside testAsyncErrorRethrownOnClose");
exceptionAssert(ex);
} finally {
verify(flinkKinesisFirehoseProducer, times(1)).open(properties);
verify(flinkKinesisFirehoseProducer, times(1)).invoke("Test", context);
verify(flinkKinesisFirehoseProducer, times(1)).close();
}
}
/**
* This test is responsible for testing an async error rethrow during invoke.
*/
@Test
public void testAsyncErrorRethrownOnInvoke() throws Exception {
try {
flinkKinesisFirehoseProducer.setFailOnError(true);
when(firehoseProducer.addUserRecord(any(Record.class)))
.thenReturn(getUserRecordResult(true, false))
.thenReturn(getUserRecordResult(false, true));
flinkKinesisFirehoseProducer.open(properties);
flinkKinesisFirehoseProducer.invoke("Test", context);
Thread.sleep(1000);
flinkKinesisFirehoseProducer.invoke("Test2", context);
LOGGER.warn("Should not reach this line");
fail();
} catch (FlinkKinesisFirehoseException ex) {
LOGGER.info("Exception has been thrown inside testAsyncErrorRethrownOnInvoke");
exceptionAssert(ex);
} finally {
verify(flinkKinesisFirehoseProducer, times(1)).open(properties);
verify(flinkKinesisFirehoseProducer, times(1)).invoke("Test", context);
verify(flinkKinesisFirehoseProducer, times(1)).invoke("Test2", context);
verify(flinkKinesisFirehoseProducer, never()).close();
}
}
@Test
public void testAsyncErrorRethrownWhenRecordFailedToSend() throws Exception {
flinkKinesisFirehoseProducer.setFailOnError(true);
UserRecordResult recordResult = new UserRecordResult();
recordResult.setSuccessful(false);
recordResult.setException(new RuntimeException("A bad thing has happened"));
when(firehoseProducer.addUserRecord(any(Record.class)))
.thenReturn(CompletableFuture.completedFuture(recordResult));
flinkKinesisFirehoseProducer.open(properties);
flinkKinesisFirehoseProducer.invoke("Test", context);
assertThatExceptionOfType(FlinkKinesisFirehoseException.class)
.isThrownBy(() -> flinkKinesisFirehoseProducer.close())
.withMessageContaining("An exception has been thrown while trying to process a record")
.withCauseInstanceOf(RecordCouldNotBeSentException.class)
.withStackTraceContaining("A bad thing has happened");
}
/**
* This test is responsible for testing async error, however should not rethrow in case of failures.
* This is the default scenario for FlinkKinesisFirehoseProducer.
*/
@Test
public void testAsyncErrorNotRethrowOnInvoke() throws Exception {
flinkKinesisFirehoseProducer.setFailOnError(false);
when(firehoseProducer.addUserRecord(any(Record.class)))
.thenReturn(getUserRecordResult(true, false))
.thenReturn(getUserRecordResult(true, true));
flinkKinesisFirehoseProducer.open(properties);
flinkKinesisFirehoseProducer.invoke("Test", context);
flinkKinesisFirehoseProducer.invoke("Test2", context);
verify(flinkKinesisFirehoseProducer, times(1)).open(properties);
verify(flinkKinesisFirehoseProducer, times(1)).invoke("Test", context);
verify(flinkKinesisFirehoseProducer, times(1)).invoke("Test2", context);
verify(flinkKinesisFirehoseProducer, never()).close();
}
@Test
public void testFlinkKinesisFirehoseProducerHappyWorkflow() throws Exception {
when(firehoseProducer.addUserRecord(any(Record.class)))
.thenReturn(getUserRecordResult(false, true));
flinkKinesisFirehoseProducer.open(properties);
flinkKinesisFirehoseProducer.invoke("Test", context);
flinkKinesisFirehoseProducer.close();
verify(flinkKinesisFirehoseProducer, times(1)).open(properties);
verify(flinkKinesisFirehoseProducer, times(1)).invoke("Test", context);
verify(flinkKinesisFirehoseProducer, times(1)).close();
}
@Test
public void testFlinkKinesisFirehoseProducerCloseAndFlushHappyWorkflow() throws Exception {
when(firehoseProducer.addUserRecord(any(Record.class)))
.thenReturn(getUserRecordResult(false, true));
doNothing().when(firehoseProducer).flush();
when(firehoseProducer.getOutstandingRecordsCount()).thenReturn(1).thenReturn(0);
when(firehoseProducer.isFlushFailed()).thenReturn(false);
flinkKinesisFirehoseProducer.open(properties);
flinkKinesisFirehoseProducer.invoke("Test", context);
flinkKinesisFirehoseProducer.close();
verify(firehoseProducer, times(1)).flush();
}
@Test
public void testFlinkKinesisFirehoseProducerTakeSnapshotHappyWorkflow() throws Exception {
when(firehoseProducer.addUserRecord(any(Record.class)))
.thenReturn(getUserRecordResult(false, true));
doNothing().when(firehoseProducer).flush();
when(firehoseProducer.getOutstandingRecordsCount()).thenReturn(1).thenReturn(1).thenReturn(1).thenReturn(0);
when(firehoseProducer.isFlushFailed()).thenReturn(false);
FunctionSnapshotContext functionContext = mock(FunctionSnapshotContext.class);
flinkKinesisFirehoseProducer.open(properties);
flinkKinesisFirehoseProducer.invoke("Test", context);
flinkKinesisFirehoseProducer.snapshotState(functionContext);
verify(firehoseProducer, times(1)).flush();
}
@Test(expectedExceptions = IllegalStateException.class,
expectedExceptionsMessageRegExp = "An error has occurred trying to flush the buffer synchronously.*")
public void testFlinkKinesisFirehoseProducerTakeSnapshotFailedFlush() throws Exception {
when(firehoseProducer.addUserRecord(any(Record.class)))
.thenReturn(getUserRecordResult(false, true));
doNothing().when(firehoseProducer).flush();
when(firehoseProducer.getOutstandingRecordsCount()).thenReturn(1).thenReturn(1);
when(firehoseProducer.isFlushFailed()).thenReturn(false).thenReturn(true);
FunctionSnapshotContext functionContext = mock(FunctionSnapshotContext.class);
flinkKinesisFirehoseProducer.open(properties);
flinkKinesisFirehoseProducer.invoke("Test", context);
flinkKinesisFirehoseProducer.snapshotState(functionContext);
fail("We should not reach here.");
}
/**
* This test is responsible for testing a scenarion when there are exceptions to be thrown closing the sink (producer)
* This is the default scenario for FlinkKinesisFirehoseProducer.
*/
@Test
public void testAsyncErrorNotRethrownOnClose() throws Exception {
flinkKinesisFirehoseProducer.setFailOnError(false);
when(firehoseProducer.addUserRecord(any(Record.class)))
.thenReturn(getUserRecordResult(true, false))
.thenReturn(getUserRecordResult(true, false));
flinkKinesisFirehoseProducer.open(properties);
flinkKinesisFirehoseProducer.invoke("Test", context);
flinkKinesisFirehoseProducer.invoke("Test2", context);
flinkKinesisFirehoseProducer.close();
verify(flinkKinesisFirehoseProducer, times(1)).open(properties);
verify(flinkKinesisFirehoseProducer, times(1)).invoke("Test", context);
verify(flinkKinesisFirehoseProducer, times(1)).invoke("Test2", context);
verify(flinkKinesisFirehoseProducer, times(1)).close();
}
private void exceptionAssert(FlinkKinesisFirehoseException ex) {
final String expectedErrorMsg = "An exception has been thrown while trying to process a record";
LOGGER.info(ex.getMessage());
assertThat(ex.getMessage()).isEqualTo(expectedErrorMsg);
assertThat(ex.getCause()).isInstanceOf(RecordCouldNotBeBuffered.class);
LOGGER.info(ex.getCause().getMessage());
assertThat(ex.getCause().getMessage()).isEqualTo(DEFAULT_TEST_ERROR_MSG);
}
@Nonnull
private CompletableFuture<UserRecordResult> getUserRecordResult(final boolean isFailedRecord, final boolean isSuccessful) {
UserRecordResult recordResult = new UserRecordResult().setSuccessful(isSuccessful);
if (isFailedRecord) {
CompletableFuture<UserRecordResult> future = new CompletableFuture<>();
future.completeExceptionally(new RecordCouldNotBeBuffered(DEFAULT_TEST_ERROR_MSG));
return future;
} else {
return CompletableFuture.completedFuture(recordResult);
}
}
@Nonnull
private FlinkKinesisFirehoseProducer<String> createProducer() {
return spy(new FlinkKinesisFirehoseProducer<>(DEFAULT_DELIVERY_STREAM,
getKinesisFirehoseSerializationSchema(), getStandardProperties()));
}
}
| 7,271 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/producer | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/producer/impl/FirehoseProducerConfigurationTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.producer.impl;
import org.testng.annotations.Test;
import javax.annotation.Nonnull;
import java.util.Properties;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.DEFAULT_BASE_BACKOFF;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.DEFAULT_INTERVAL_BETWEEN_FLUSHES;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.DEFAULT_MAXIMUM_BATCH_BYTES;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.DEFAULT_MAX_BACKOFF;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.DEFAULT_MAX_BUFFER_SIZE;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.DEFAULT_MAX_BUFFER_TIMEOUT;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.DEFAULT_MAX_OPERATION_TIMEOUT;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.DEFAULT_NUMBER_OF_RETRIES;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.DEFAULT_WAIT_TIME_FOR_BUFFER_FULL;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_BASE_BACKOFF_TIMEOUT;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_FLUSH_MAX_NUMBER_OF_RETRIES;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_FLUSH_TIMEOUT;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_FULL_WAIT_TIMEOUT;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_MAX_BACKOFF_TIMEOUT;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_MAX_BATCH_BYTES;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_MAX_SIZE;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_MAX_TIMEOUT;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_MAX_OPERATION_TIMEOUT;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatExceptionOfType;
public class FirehoseProducerConfigurationTest {
private static final String REGION = "us-east-1";
@Test
public void testBuilderWithDefaultProperties() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration.builder(REGION).build();
assertThat(configuration.getMaxBufferSize()).isEqualTo(DEFAULT_MAX_BUFFER_SIZE);
assertThat(configuration.getMaxPutRecordBatchBytes()).isEqualTo(DEFAULT_MAXIMUM_BATCH_BYTES);
assertThat(configuration.getNumberOfRetries()).isEqualTo(DEFAULT_NUMBER_OF_RETRIES);
assertThat(configuration.getBufferFullWaitTimeoutInMillis()).isEqualTo(DEFAULT_WAIT_TIME_FOR_BUFFER_FULL);
assertThat(configuration.getBufferTimeoutInMillis()).isEqualTo(DEFAULT_MAX_BUFFER_TIMEOUT);
assertThat(configuration.getBufferTimeoutBetweenFlushes()).isEqualTo(DEFAULT_INTERVAL_BETWEEN_FLUSHES);
assertThat(configuration.getMaxBackOffInMillis()).isEqualTo(DEFAULT_MAX_BACKOFF);
assertThat(configuration.getBaseBackOffInMillis()).isEqualTo(DEFAULT_BASE_BACKOFF);
assertThat(configuration.getMaxOperationTimeoutInMillis()).isEqualTo(DEFAULT_MAX_OPERATION_TIMEOUT);
}
@Test
public void testBuilderWithMaxBufferSize() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withMaxBufferSize(250)
.build();
assertThat(configuration.getMaxBufferSize()).isEqualTo(250);
}
@Test
public void testBuilderWithMaxBufferSizeRejectsZero() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> FirehoseProducerConfiguration.builder(REGION).withMaxBufferSize(0))
.withMessageContaining("Buffer size must be between 1 and 500");
}
@Test
public void testBuilderWithMaxBufferSizeRejectsUpperLimit() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> FirehoseProducerConfiguration.builder(REGION).withMaxBufferSize(501))
.withMessageContaining("Buffer size must be between 1 and 500");
}
@Test
public void testBuilderWithMaxPutRecordBatchBytes() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withMaxPutRecordBatchBytes(100)
.build();
assertThat(configuration.getMaxPutRecordBatchBytes()).isEqualTo(100);
}
@Test
public void testBuilderWithMaxPutRecordBatchBytesRejectsZero() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> FirehoseProducerConfiguration.builder(REGION).withMaxPutRecordBatchBytes(0))
.withMessageContaining("Maximum batch size in bytes must be between 1 and 4194304");
}
@Test
public void testBuilderWithMaxPutRecordBatchBytesRejectsUpperLimit() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> FirehoseProducerConfiguration.builder(REGION).withMaxPutRecordBatchBytes(4194305))
.withMessageContaining("Maximum batch size in bytes must be between 1 and 4194304");
}
@Test
public void testBuilderWithNumberOfRetries() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withNumberOfRetries(100)
.build();
assertThat(configuration.getNumberOfRetries()).isEqualTo(100);
}
@Test
public void testBuilderWithNumberOfRetriesRejectsNegative() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> FirehoseProducerConfiguration.builder(REGION).withNumberOfRetries(-1))
.withMessageContaining("Number of retries cannot be negative");
}
@Test
public void testBuilderWithBufferTimeoutInMillis() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withBufferTimeoutInMillis(12345L)
.build();
assertThat(configuration.getBufferTimeoutInMillis()).isEqualTo(12345L);
}
@Test
public void testBuilderWithBufferTimeoutInMillisRejects() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> FirehoseProducerConfiguration.builder(REGION).withBufferTimeoutInMillis(-1))
.withMessageContaining("Flush timeout should be greater than 0");
}
@Test
public void testBuilderWithMaxOperationTimeoutInMillis() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withMaxOperationTimeoutInMillis(999L)
.build();
assertThat(configuration.getMaxOperationTimeoutInMillis()).isEqualTo(999L);
}
@Test
public void testBuilderWithMaxOperationTimeoutInMillisRejectsNegative() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> FirehoseProducerConfiguration.builder(REGION).withMaxOperationTimeoutInMillis(-1))
.withMessageContaining("Max operation timeout should be greater than 0");
}
@Test
public void testBuilderWithBufferFullWaitTimeoutInMillis() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withBufferFullWaitTimeoutInMillis(1L)
.build();
assertThat(configuration.getBufferFullWaitTimeoutInMillis()).isEqualTo(1L);
}
@Test
public void testBuilderWithBufferFullWaitTimeoutInMillisRejectsNegative() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> FirehoseProducerConfiguration.builder(REGION).withBufferFullWaitTimeoutInMillis(-1))
.withMessageContaining("Buffer full waiting timeout should be greater than 0");
}
@Test
public void testBuilderWithBufferTimeoutBetweenFlushes() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withBufferTimeoutBetweenFlushes(2L)
.build();
assertThat(configuration.getBufferTimeoutBetweenFlushes()).isEqualTo(2L);
}
@Test
public void testBuilderWithBufferTimeoutBetweenFlushesRejectsNegative() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> FirehoseProducerConfiguration.builder(REGION).withBufferTimeoutBetweenFlushes(-1))
.withMessageContaining("Interval between flushes cannot be negative");
}
@Test
public void testBuilderWithMaxBackOffInMillis() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withMaxBackOffInMillis(3L)
.build();
assertThat(configuration.getMaxBackOffInMillis()).isEqualTo(3L);
}
@Test
public void testBuilderWithMaxBackOffInMillisRejectsNegative() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> FirehoseProducerConfiguration.builder(REGION).withMaxBackOffInMillis(-1))
.withMessageContaining("Max backoff timeout should be greater than 0");
}
@Test
public void testBuilderWithBaseBackOffInMillis() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withBaseBackOffInMillis(4L)
.build();
assertThat(configuration.getBaseBackOffInMillis()).isEqualTo(4L);
}
@Test
public void testBuilderWithBaseBackOffInMillisRejectsNegative() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> FirehoseProducerConfiguration.builder(REGION).withBaseBackOffInMillis(-1))
.withMessageContaining("Base backoff timeout should be greater than 0");
}
@Test
public void testBuilderWithMaxBufferSizeFromProperties() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withProperties(props(FIREHOSE_PRODUCER_BUFFER_MAX_SIZE, "250"))
.build();
assertThat(configuration.getMaxBufferSize()).isEqualTo(250);
}
@Test
public void testBuilderWithMaxPutRecordBatchBytesFromProperties() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withProperties(props(FIREHOSE_PRODUCER_BUFFER_MAX_BATCH_BYTES, "100"))
.build();
assertThat(configuration.getMaxPutRecordBatchBytes()).isEqualTo(100);
}
@Test
public void testBuilderWithNumberOfRetriesFromProperties() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withProperties(props(FIREHOSE_PRODUCER_BUFFER_FLUSH_MAX_NUMBER_OF_RETRIES, "100"))
.build();
assertThat(configuration.getNumberOfRetries()).isEqualTo(100);
}
@Test
public void testBuilderWithBufferTimeoutInMillisFromProperties() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withProperties(props(FIREHOSE_PRODUCER_BUFFER_MAX_TIMEOUT, "12345"))
.build();
assertThat(configuration.getBufferTimeoutInMillis()).isEqualTo(12345L);
}
@Test
public void testBuilderWithMaxOperationTimeoutInMillisFromProperties() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withProperties(props(FIREHOSE_PRODUCER_MAX_OPERATION_TIMEOUT, "999"))
.build();
assertThat(configuration.getMaxOperationTimeoutInMillis()).isEqualTo(999L);
}
@Test
public void testBuilderWithBufferFullWaitTimeoutInMillisFromProperties() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withProperties(props(FIREHOSE_PRODUCER_BUFFER_FULL_WAIT_TIMEOUT, "1"))
.build();
assertThat(configuration.getBufferFullWaitTimeoutInMillis()).isEqualTo(1L);
}
@Test
public void testBuilderWithBufferTimeoutBetweenFlushesFromProperties() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withProperties(props(FIREHOSE_PRODUCER_BUFFER_FLUSH_TIMEOUT, "2"))
.build();
assertThat(configuration.getBufferTimeoutBetweenFlushes()).isEqualTo(2L);
}
@Test
public void testBuilderWithMaxBackOffInMillisFromProperties() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withProperties(props(FIREHOSE_PRODUCER_BUFFER_MAX_BACKOFF_TIMEOUT, "3"))
.build();
assertThat(configuration.getMaxBackOffInMillis()).isEqualTo(3L);
}
@Test
public void testBuilderWithBaseBackOffInMillisFromProperties() {
FirehoseProducerConfiguration configuration = FirehoseProducerConfiguration
.builder(REGION)
.withProperties(props(FIREHOSE_PRODUCER_BUFFER_BASE_BACKOFF_TIMEOUT, "4"))
.build();
assertThat(configuration.getBaseBackOffInMillis()).isEqualTo(4L);
}
@Nonnull
private Properties props(@Nonnull final String key, @Nonnull final String value) {
Properties properties = new Properties();
properties.setProperty(key, value);
return properties;
}
} | 7,272 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/producer | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/producer/impl/FirehoseProducerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.producer.impl;
import com.amazonaws.services.kinesisanalytics.flink.connectors.producer.impl.FirehoseProducer.FirehoseThreadFactory;
import com.amazonaws.services.kinesisfirehose.AmazonKinesisFirehose;
import com.amazonaws.services.kinesisfirehose.model.PutRecordBatchRequest;
import com.amazonaws.services.kinesisfirehose.model.PutRecordBatchResponseEntry;
import com.amazonaws.services.kinesisfirehose.model.PutRecordBatchResult;
import com.amazonaws.services.kinesisfirehose.model.Record;
import org.apache.commons.lang3.RandomStringUtils;
import org.mockito.ArgumentCaptor;
import org.mockito.Captor;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import javax.annotation.Nonnull;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.stream.IntStream;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_REGION;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.DEFAULT_MAX_BUFFER_SIZE;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_MAX_BATCH_BYTES;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_MAX_TIMEOUT;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.producer.impl.FirehoseProducer.UserRecordResult;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.testutils.TestUtils.DEFAULT_DELIVERY_STREAM;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import static org.testng.Assert.fail;
/**
* All tests make relies on best effort to simulate and wait how a multi-threading system should be behave,
* trying to rely on deterministic results, however, the results and timing depends on the operating system scheduler and JVM.
* So, if any of these tests failed, you may want to increase the sleep timeout or perhaps comment out the failed ones.
*/
public class FirehoseProducerTest {
private static final Logger LOGGER = LoggerFactory.getLogger(FirehoseProducerTest.class);
private static final int KB_512 = 512 * 1_024;
@Mock
private AmazonKinesisFirehose firehoseClient;
private FirehoseProducer<UserRecordResult, Record> firehoseProducer;
@Captor
private ArgumentCaptor<PutRecordBatchRequest> putRecordCaptor;
@BeforeMethod
public void init() {
MockitoAnnotations.initMocks(this);
this.firehoseProducer = createFirehoseProducer();
}
@Test
public void testFirehoseProducerSingleThreadHappyCase() throws Exception {
PutRecordBatchResult successResult = new PutRecordBatchResult();
when(firehoseClient.putRecordBatch(any(PutRecordBatchRequest.class))).thenReturn(successResult);
for (int i = 0; i < DEFAULT_MAX_BUFFER_SIZE; ++i) {
addRecord(firehoseProducer);
}
Thread.sleep(2000);
LOGGER.debug("Number of outstanding records: {}", firehoseProducer.getOutstandingRecordsCount());
assertThat(firehoseProducer.getOutstandingRecordsCount()).isEqualTo(0);
}
@Test
public void testFirehoseProducerMultiThreadHappyCase() throws Exception {
PutRecordBatchResult successResult = new PutRecordBatchResult();
when(firehoseClient.putRecordBatch(any(PutRecordBatchRequest.class))).thenReturn(successResult);
ExecutorService exec = Executors.newFixedThreadPool(4);
List<Callable<CompletableFuture<UserRecordResult>>> futures = new ArrayList<>();
for (int j = 0; j < DEFAULT_MAX_BUFFER_SIZE; ++j) {
futures.add(() -> addRecord(firehoseProducer));
}
exec.invokeAll(futures);
Thread.currentThread().join(3000);
LOGGER.debug("Number of outstanding items: {}", firehoseProducer.getOutstandingRecordsCount());
assertThat(firehoseProducer.getOutstandingRecordsCount()).isEqualTo(0);
}
@Test
public void testFirehoseProducerMultiThreadFlushSyncHappyCase() throws Exception {
PutRecordBatchResult successResult = mock(PutRecordBatchResult.class);
ArgumentCaptor<PutRecordBatchRequest> captor = ArgumentCaptor.forClass(PutRecordBatchRequest.class);
when(firehoseClient.putRecordBatch(any(PutRecordBatchRequest.class))).thenReturn(successResult);
ExecutorService exec = Executors.newFixedThreadPool(4);
List<Callable<CompletableFuture<UserRecordResult>>> futures = new ArrayList<>();
for (int j = 0; j < 400; ++j) {
futures.add(() -> addRecord(firehoseProducer));
}
List<Future<CompletableFuture<UserRecordResult>>> results = exec.invokeAll(futures);
for (Future<CompletableFuture<UserRecordResult>> f : results) {
while(!f.isDone()) {
Thread.sleep(100);
}
CompletableFuture<UserRecordResult> fi = f.get();
UserRecordResult r = fi.get();
assertThat(r.isSuccessful()).isTrue();
}
firehoseProducer.flushSync();
LOGGER.debug("Number of outstanding items: {}", firehoseProducer.getOutstandingRecordsCount());
verify(firehoseClient).putRecordBatch(captor.capture());
assertThat(firehoseProducer.getOutstandingRecordsCount()).isEqualTo(0);
assertThat(firehoseProducer.isFlushFailed()).isFalse();
}
@Test
public void testFirehoseProducerMultiThreadFlushAndWaitHappyCase() throws Exception {
PutRecordBatchResult successResult = mock(PutRecordBatchResult.class);
ArgumentCaptor<PutRecordBatchRequest> captor = ArgumentCaptor.forClass(PutRecordBatchRequest.class);
when(firehoseClient.putRecordBatch(any(PutRecordBatchRequest.class))).thenReturn(successResult);
ExecutorService exec = Executors.newFixedThreadPool(4);
List<Callable<CompletableFuture<UserRecordResult>>> futures = new ArrayList<>();
for (int j = 0; j < 400; ++j) {
futures.add(() -> addRecord(firehoseProducer));
}
List<Future<CompletableFuture<UserRecordResult>>> results = exec.invokeAll(futures);
for (Future<CompletableFuture<UserRecordResult>> f : results) {
while(!f.isDone()) {
Thread.sleep(100);
}
CompletableFuture<UserRecordResult> fi = f.get();
UserRecordResult r = fi.get();
assertThat(r.isSuccessful()).isTrue();
}
while (firehoseProducer.getOutstandingRecordsCount() > 0 && !firehoseProducer.isFlushFailed()) {
firehoseProducer.flush();
try {
Thread.sleep(500);
} catch (InterruptedException ex) {
fail();
}
}
LOGGER.debug("Number of outstanding items: {}", firehoseProducer.getOutstandingRecordsCount());
verify(firehoseClient).putRecordBatch(captor.capture());
assertThat(firehoseProducer.getOutstandingRecordsCount()).isEqualTo(0);
assertThat(firehoseProducer.isFlushFailed()).isFalse();
}
@Test
public void testFirehoseProducerSingleThreadTimeoutExpiredHappyCase() throws Exception {
PutRecordBatchResult successResult = new PutRecordBatchResult();
when(firehoseClient.putRecordBatch(any(PutRecordBatchRequest.class))).thenReturn(successResult);
for (int i = 0; i < 100; ++i) {
addRecord(firehoseProducer);
}
Thread.sleep(2000);
assertThat(firehoseProducer.getOutstandingRecordsCount()).isEqualTo(0);
}
@Test
public void testFirehoseProducerSingleThreadBufferIsFullHappyCase() throws Exception {
PutRecordBatchResult successResult = new PutRecordBatchResult();
when(firehoseClient.putRecordBatch(any(PutRecordBatchRequest.class))).thenReturn(successResult);
for (int i = 0; i < 2 * DEFAULT_MAX_BUFFER_SIZE; ++i) {
addRecord(firehoseProducer);
}
Thread.sleep(2000);
assertThat(firehoseProducer.getOutstandingRecordsCount()).isEqualTo(0);
}
/**
* This test is responsible for checking if the consumer thread has performed the work or not, so there is no way to
* throw an exception to be catch here, so the assertion goes along the fact if the buffer was flushed or not.
*/
@Test
public void testFirehoseProducerSingleThreadFailedToSendRecords() throws Exception {
PutRecordBatchResult failedResult = new PutRecordBatchResult()
.withFailedPutCount(1)
.withRequestResponses(new PutRecordBatchResponseEntry()
.withErrorCode("400")
.withErrorMessage("Invalid Schema"));
when(firehoseClient.putRecordBatch(any(PutRecordBatchRequest.class))).thenReturn(failedResult);
for (int i = 0; i < DEFAULT_MAX_BUFFER_SIZE; ++i) {
addRecord(firehoseProducer);
}
Thread.sleep(2000);
assertThat(firehoseProducer.getOutstandingRecordsCount()).isEqualTo(DEFAULT_MAX_BUFFER_SIZE);
assertThat(firehoseProducer.isFlushFailed()).isTrue();
}
@Test
public void testFirehoseProducerBatchesRecords() throws Exception {
when(firehoseClient.putRecordBatch(any(PutRecordBatchRequest.class)))
.thenReturn(new PutRecordBatchResult());
// Fill up the maximum capacity: 8 * 512kB = 4MB
IntStream.range(0, 8).forEach(i -> addRecord(firehoseProducer, KB_512));
// Add a single byte to overflow the maximum
addRecord(firehoseProducer, 1);
Thread.sleep(3000);
assertThat(firehoseProducer.getOutstandingRecordsCount()).isEqualTo(0);
verify(firehoseClient, times(2)).putRecordBatch(putRecordCaptor.capture());
// The first batch should contain 4 records (up to 4MB), the second should contain the remaining record
assertThat(putRecordCaptor.getAllValues().get(0).getRecords())
.hasSize(8).allMatch(e -> e.getData().limit() == KB_512);
assertThat(putRecordCaptor.getAllValues().get(1).getRecords())
.hasSize(1).allMatch(e -> e.getData().limit() == 1);
}
@Test
public void testFirehoseProducerBatchesRecordsWithCustomBatchSize() throws Exception {
Properties config = new Properties();
config.setProperty(FIREHOSE_PRODUCER_BUFFER_MAX_BATCH_BYTES, "100");
FirehoseProducer<UserRecordResult, Record> producer = createFirehoseProducer(config);
when(firehoseClient.putRecordBatch(any(PutRecordBatchRequest.class)))
.thenReturn(new PutRecordBatchResult());
// Overflow the maximum capacity: 2 * 100kB = 200kB
IntStream.range(0, 2).forEach(i -> addRecord(producer, 100));
Thread.sleep(3000);
assertThat(firehoseProducer.getOutstandingRecordsCount()).isEqualTo(0);
verify(firehoseClient, times(2)).putRecordBatch(putRecordCaptor.capture());
// The first batch should contain 1 record (up to 100kB), the second should contain the remaining record
assertThat(putRecordCaptor.getAllValues().get(0).getRecords())
.hasSize(1).allMatch(e -> e.getData().limit() == 100);
assertThat(putRecordCaptor.getAllValues().get(1).getRecords())
.hasSize(1).allMatch(e -> e.getData().limit() == 100);
}
@Test
public void testThreadFactoryNewThreadName() {
FirehoseThreadFactory threadFactory = new FirehoseThreadFactory();
Thread thread1 = threadFactory.newThread(() -> LOGGER.info("Running task 1"));
Thread thread2 = threadFactory.newThread(() -> LOGGER.info("Running task 2"));
Thread thread3 = threadFactory.newThread(() -> LOGGER.info("Running task 3"));
// Thread index is allocated statically, so cannot deterministically guarantee the thread number
// Work out thread1's number and then check subsequent thread names
int threadNumber = Integer.parseInt(thread1.getName().substring(thread1.getName().lastIndexOf('-') + 1));
assertThat(thread1.getName()).isEqualTo("kda-writer-thread-" + threadNumber++);
assertThat(thread1.isDaemon()).isFalse();
assertThat(thread2.getName()).isEqualTo("kda-writer-thread-" + threadNumber++);
assertThat(thread2.isDaemon()).isFalse();
assertThat(thread3.getName()).isEqualTo("kda-writer-thread-" + threadNumber);
assertThat(thread3.isDaemon()).isFalse();
}
@Nonnull
private CompletableFuture<UserRecordResult> addRecord(final FirehoseProducer<UserRecordResult, Record> producer) {
return addRecord(producer, 64);
}
@Nonnull
private CompletableFuture<UserRecordResult> addRecord(final FirehoseProducer<UserRecordResult, Record> producer, final int length) {
try {
Record record = new Record().withData(ByteBuffer.wrap(
RandomStringUtils.randomAlphabetic(length).getBytes()));
return producer.addUserRecord(record);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Nonnull
private FirehoseProducer<UserRecordResult, Record> createFirehoseProducer() {
return createFirehoseProducer(new Properties());
}
@Nonnull
private FirehoseProducer<UserRecordResult, Record> createFirehoseProducer(@Nonnull final Properties config) {
config.setProperty(FIREHOSE_PRODUCER_BUFFER_MAX_TIMEOUT, "1000");
config.setProperty(AWS_REGION, "us-east-1");
return new FirehoseProducer<>(DEFAULT_DELIVERY_STREAM, firehoseClient, config);
}
}
| 7,273 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/util/AWSUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.util;
import com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants;
import com.amazonaws.services.kinesisanalytics.flink.connectors.provider.credential.BasicCredentialProvider;
import com.amazonaws.services.kinesisfirehose.AmazonKinesisFirehose;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import javax.annotation.Nonnull;
import java.util.Properties;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_ACCESS_KEY_ID;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_CREDENTIALS_PROVIDER;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_PROFILE_NAME;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_REGION;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_SECRET_ACCESS_KEY;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.CredentialProviderType.ASSUME_ROLE;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.CredentialProviderType.AUTO;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.CredentialProviderType.BASIC;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.DEFAULT_MAXIMUM_BATCH_BYTES;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.REDUCED_QUOTA_MAXIMUM_THROUGHPUT;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.util.AWSUtil.createKinesisFirehoseClientFromConfiguration;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.util.AWSUtil.getCredentialProviderType;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.util.AWSUtil.validateAssumeRoleCredentialsProvider;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.util.AWSUtil.validateBasicProviderConfiguration;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.util.AWSUtil.validateConfiguration;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.util.AWSUtil.validateProfileProviderConfiguration;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatExceptionOfType;
public class AWSUtilTest {
private Properties configProps;
@BeforeMethod
public void setUp() {
configProps = new Properties();
configProps.setProperty(AWS_ACCESS_KEY_ID, "DUMMY");
configProps.setProperty(AWS_SECRET_ACCESS_KEY, "DUMMY-SECRET");
configProps.setProperty(AWS_PROFILE_NAME, "Test");
configProps.setProperty(AWS_REGION, "us-east-1");
}
@Test
public void testCreateKinesisFirehoseClientFromConfigurationWithNullConfiguration() {
assertThatExceptionOfType(NullPointerException.class)
.isThrownBy(() -> createKinesisFirehoseClientFromConfiguration(null, new BasicCredentialProvider(configProps)))
.withMessageContaining("Configuration properties cannot be null");
}
@Test
public void testCreateKinesisFirehoseClientFromConfigurationWithNullCredentialProvider() {
assertThatExceptionOfType(NullPointerException.class)
.isThrownBy(() -> createKinesisFirehoseClientFromConfiguration(configProps, null))
.withMessageContaining("Credential Provider cannot be null");
}
@Test
public void testCreateKinesisFirehoseClientFromConfigurationHappyCase() {
AmazonKinesisFirehose firehoseClient = createKinesisFirehoseClientFromConfiguration(configProps,
new BasicCredentialProvider(configProps));
assertThat(firehoseClient).isNotNull();
}
@Test
public void testValidateConfigurationWithNullConfiguration() {
assertThatExceptionOfType(NullPointerException.class)
.isThrownBy(() -> validateConfiguration(null))
.withMessageContaining("Configuration properties cannot be null");
}
@Test
public void testValidateConfigurationWithNoRegionOrFirehoseEndpoint() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> validateConfiguration(new Properties()))
.withMessageContaining("Either AWS region should be specified or AWS Firehose endpoint and endpoint signing region");
}
@Test
public void testValidateConfigurationHappyCase() {
Properties config = validateConfiguration(configProps);
assertThat(configProps).isEqualTo(config);
}
@Test
public void testValidateBasicConfigurationHappyCase() {
Properties config = validateBasicProviderConfiguration(configProps);
assertThat(configProps).isEqualTo(config);
}
@Test
public void testValidateBasicConfigurationWithNullConfiguration() {
assertThatExceptionOfType(NullPointerException.class)
.isThrownBy(() -> validateBasicProviderConfiguration(null))
.withMessageContaining("Configuration properties cannot be null");
}
@Test
public void testValidateBasicConfigurationWithNoAwsAccessKeyId() {
configProps.remove(AWS_ACCESS_KEY_ID);
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> validateBasicProviderConfiguration(configProps))
.withMessageContaining("AWS access key must be specified with credential provider BASIC");
}
@Test
public void testValidateBasicConfigurationWithNoAwsSecretKeyId() {
configProps.remove(AWS_SECRET_ACCESS_KEY);
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> validateBasicProviderConfiguration(configProps))
.withMessageContaining("AWS secret key must be specified with credential provider BASIC");
}
@Test
public void testValidateProfileProviderConfigurationWithNullConfiguration() {
assertThatExceptionOfType(NullPointerException.class)
.isThrownBy(() -> validateProfileProviderConfiguration(null))
.withMessageContaining("Configuration properties cannot be null");
}
@Test
public void testValidateProfileProviderConfigurationHappyCase() {
Properties config = validateProfileProviderConfiguration(configProps);
assertThat(configProps).isEqualTo(config);
}
@Test
public void testValidateProfileProviderConfigurationWithNoProfileName() {
configProps.remove(AWS_PROFILE_NAME);
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> validateProfileProviderConfiguration(configProps))
.withMessageContaining("AWS profile name should be specified with credential provider PROFILE");
}
@Test
public void testValidateAssumeRoleProviderConfigurationHappyCase() {
Properties properties = buildAssumeRoleProperties();
assertThat(validateAssumeRoleCredentialsProvider(properties)).isEqualTo(properties);
}
@Test
public void testValidateAssumeRoleProviderConfigurationWithNoRoleArn() {
Properties properties = buildAssumeRoleProperties();
properties.remove(AWSConfigConstants.roleArn(AWS_CREDENTIALS_PROVIDER));
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> validateAssumeRoleCredentialsProvider(properties))
.withMessageContaining("AWS role arn to be assumed must be provided with credential provider type ASSUME_ROLE");
}
@Test
public void testValidateAssumeRoleProviderConfigurationWithNoRoleSessionName() {
Properties properties = buildAssumeRoleProperties();
properties.remove(AWSConfigConstants.roleSessionName(AWS_CREDENTIALS_PROVIDER));
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> validateAssumeRoleCredentialsProvider(properties))
.withMessageContaining("AWS role session name must be provided with credential provider type ASSUME_ROLE");
}
@Test
public void testValidateAssumeRoleProviderConfigurationWithNullConfiguration() {
assertThatExceptionOfType(NullPointerException.class)
.isThrownBy(() -> validateAssumeRoleCredentialsProvider(null))
.withMessageContaining("Configuration properties cannot be null");
}
@Nonnull
private Properties buildAssumeRoleProperties() {
Properties properties = new Properties();
properties.putAll(configProps);
properties.put(AWSConfigConstants.roleArn(AWS_CREDENTIALS_PROVIDER), "arn-1234567812345678");
properties.put(AWSConfigConstants.roleSessionName(AWS_CREDENTIALS_PROVIDER), "session-name");
return properties;
}
@Test
public void testGetCredentialProviderTypeIsAutoNullProviderKey() {
assertThat(getCredentialProviderType(new Properties(), null)).isEqualTo(AUTO);
}
@Test
public void testGetCredentialProviderTypeIsAutoWithProviderKeyMismatch() {
assertThat(getCredentialProviderType(configProps, "missing-key")).isEqualTo(AUTO);
}
@Test
public void testGetCredentialProviderTypeIsAutoMissingAccessKey() {
configProps.remove(AWS_ACCESS_KEY_ID);
assertThat(getCredentialProviderType(configProps, null)).isEqualTo(AUTO);
}
@Test
public void testGetCredentialProviderTypeIsAutoMissingSecretKey() {
configProps.remove(AWS_SECRET_ACCESS_KEY);
assertThat(getCredentialProviderType(configProps, null)).isEqualTo(AUTO);
}
@Test
public void testGetCredentialProviderTypeIsBasic() {
assertThat(getCredentialProviderType(configProps, null)).isEqualTo(BASIC);
}
@Test
public void testGetCredentialProviderTypeIsAutoWithEmptyProviderKey() {
configProps.setProperty("key", "");
assertThat(getCredentialProviderType(configProps, "key")).isEqualTo(AUTO);
}
@Test
public void testGetCredentialProviderTypeIsAutoWithBadConfiguration() {
configProps.setProperty("key", "Bad");
assertThat(getCredentialProviderType(configProps, "key")).isEqualTo(AUTO);
}
@Test
public void testGetCredentialProviderTypeIsParsedFromProviderKey() {
configProps.setProperty("key", "ASSUME_ROLE");
assertThat(getCredentialProviderType(configProps, "key")).isEqualTo(ASSUME_ROLE);
}
@Test
public void testGetDefaultMaxPutRecordBatchBytesForNullRegion() {
assertThat(AWSUtil.getDefaultMaxPutRecordBatchBytes(null)).isEqualTo(REDUCED_QUOTA_MAXIMUM_THROUGHPUT);
}
@Test
public void testGetDefaultMaxPutRecordBatchBytesForHighQuotaRegions() {
assertThat(AWSUtil.getDefaultMaxPutRecordBatchBytes("us-east-1")).isEqualTo(DEFAULT_MAXIMUM_BATCH_BYTES);
assertThat(AWSUtil.getDefaultMaxPutRecordBatchBytes("us-west-2")).isEqualTo(DEFAULT_MAXIMUM_BATCH_BYTES);
assertThat(AWSUtil.getDefaultMaxPutRecordBatchBytes("eu-west-1")).isEqualTo(DEFAULT_MAXIMUM_BATCH_BYTES);
}
@Test
public void testGetDefaultMaxPutRecordBatchBytesForReducedQuotaRegions() {
assertThat(AWSUtil.getDefaultMaxPutRecordBatchBytes("us-east-2")).isEqualTo(REDUCED_QUOTA_MAXIMUM_THROUGHPUT);
assertThat(AWSUtil.getDefaultMaxPutRecordBatchBytes("us-west-1")).isEqualTo(REDUCED_QUOTA_MAXIMUM_THROUGHPUT);
assertThat(AWSUtil.getDefaultMaxPutRecordBatchBytes("eu-west-2")).isEqualTo(REDUCED_QUOTA_MAXIMUM_THROUGHPUT);
}
}
| 7,274 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/config/AWSConfigConstantsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.config;
import org.testng.annotations.Test;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatExceptionOfType;
public class AWSConfigConstantsTest {
@Test
public void testAccessKeyId() {
assertThat(AWSConfigConstants.accessKeyId("prefix")).isEqualTo("prefix.basic.aws_access_key_id");
}
@Test
public void testAccessKeyId_null() {
assertThat(AWSConfigConstants.accessKeyId(null)).isEqualTo("aws_access_key_id");
}
@Test
public void testAccessKeyId_empty() {
assertThat(AWSConfigConstants.accessKeyId("")).isEqualTo("aws_access_key_id");
}
@Test
public void testAccessKeyId_noPrefix() {
assertThat(AWSConfigConstants.accessKeyId()).isEqualTo("aws_access_key_id");
}
@Test
public void testSecretKey() {
assertThat(AWSConfigConstants.secretKey("prefix")).isEqualTo("prefix.basic.aws_secret_access_key");
}
@Test
public void testSecretKey_null() {
assertThat(AWSConfigConstants.secretKey(null)).isEqualTo("aws_secret_access_key");
}
@Test
public void testSecretKey_empty() {
assertThat(AWSConfigConstants.secretKey("")).isEqualTo("aws_secret_access_key");
}
@Test
public void testSecretKey_noPrefix() {
assertThat(AWSConfigConstants.secretKey()).isEqualTo("aws_secret_access_key");
}
@Test
public void testProfilePath() {
assertThat(AWSConfigConstants.profilePath("prefix")).isEqualTo("prefix.profile.path");
}
@Test
public void testProfilePath_empty() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> AWSConfigConstants.profilePath(""));
}
@Test
public void testProfileName() {
assertThat(AWSConfigConstants.profileName("prefix")).isEqualTo("prefix.profile.name");
}
@Test
public void testProfileName_empty() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> AWSConfigConstants.profileName(""));
}
@Test
public void testRoleArn() {
assertThat(AWSConfigConstants.roleArn("prefix")).isEqualTo("prefix.role.arn");
}
@Test
public void testRoleArn_empty() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> AWSConfigConstants.roleArn(""));
}
@Test
public void testRoleSessionName() {
assertThat(AWSConfigConstants.roleSessionName("prefix")).isEqualTo("prefix.role.sessionName");
}
@Test
public void testRoleSessionName_empty() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> AWSConfigConstants.roleSessionName(""));
}
@Test
public void testExternalId() {
assertThat(AWSConfigConstants.externalId("prefix")).isEqualTo("prefix.role.externalId");
}
@Test
public void testExternalId_empty() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> AWSConfigConstants.externalId(""));
}
@Test
public void testRoleCredentialsProvider() {
assertThat(AWSConfigConstants.roleCredentialsProvider("prefix")).isEqualTo("prefix.role.provider");
}
@Test
public void testRoleCredentialsProvider_empty() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> AWSConfigConstants.roleCredentialsProvider(""));
}
} | 7,275 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/serialization/JsonSerializationSchemaTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.serialization;
import com.amazonaws.services.kinesisanalytics.flink.connectors.exception.SerializationException;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.annotation.JsonSerialize;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import static org.testng.Assert.assertNotNull;
public class JsonSerializationSchemaTest {
private JsonSerializationSchema<TestSerializable> serializationSchema;
@BeforeMethod
public void init() {
serializationSchema = new JsonSerializationSchema<>();
}
@Test
public void testJsonSerializationSchemaHappyCase() {
TestSerializable serializable = new TestSerializable(1, "Test description");
byte[] serialized = serializationSchema.serialize(serializable);
assertNotNull(serialized);
}
@Test(expectedExceptions = NullPointerException.class)
public void testJsonSerializationSchemaNullCase() {
serializationSchema.serialize(null);
}
@Test(expectedExceptions = SerializationException.class,
expectedExceptionsMessageRegExp = "Failed trying to serialize.*")
public void testJsonSerializationSchemaInvalidSerializable() {
JsonSerializationSchema<TestInvalidSerializable> serializationSchema =
new JsonSerializationSchema<>();
TestInvalidSerializable invalidSerializable = new TestInvalidSerializable("Unit", "Test");
serializationSchema.serialize(invalidSerializable);
}
private static class TestSerializable {
@JsonSerialize
private final int id;
@JsonSerialize
private final String description;
@JsonCreator
public TestSerializable(final int id, final String desc) {
this.id = id;
this.description = desc;
}
}
private static class TestInvalidSerializable {
private final String firstName;
private final String lastName;
public TestInvalidSerializable(final String firstName, final String lastName) {
this.firstName = firstName;
this.lastName = lastName;
}
}
}
| 7,276 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider/credential/BasicCredentialProviderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.provider.credential;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import java.util.Properties;
import static org.assertj.core.api.Assertions.assertThat;
public class BasicCredentialProviderTest {
private BasicCredentialProvider basicCredentialProvider;
@BeforeMethod
public void setUp() {
Properties properties = new Properties();
properties.put(AWSConfigConstants.accessKeyId(), "ACCESS");
properties.put(AWSConfigConstants.secretKey(), "SECRET");
properties.put(AWSConfigConstants.AWS_REGION, "eu-west-2");
basicCredentialProvider = new BasicCredentialProvider(properties);
}
@Test
public void testGetAwsCredentialsProvider() {
AWSCredentials credentials = basicCredentialProvider.getAwsCredentialsProvider().getCredentials();
assertThat(credentials.getAWSAccessKeyId()).isEqualTo("ACCESS");
assertThat(credentials.getAWSSecretKey()).isEqualTo("SECRET");
}
@Test
public void testGetAwsCredentialsProviderSuppliesCredentialsAfterRefresh() {
AWSCredentialsProvider provider = basicCredentialProvider.getAwsCredentialsProvider();
provider.refresh();
AWSCredentials credentials = provider.getCredentials();
assertThat(credentials.getAWSAccessKeyId()).isEqualTo("ACCESS");
assertThat(credentials.getAWSSecretKey()).isEqualTo("SECRET");
}
} | 7,277 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider/credential/ProfileCredentialProviderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.provider.credential;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants;
import org.testng.annotations.Test;
import java.util.Properties;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_CREDENTIALS_PROVIDER;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_REGION;
import static org.assertj.core.api.Assertions.assertThat;
public class ProfileCredentialProviderTest {
@Test
public void testGetAwsCredentialsProvider() {
Properties properties = new Properties();
properties.put(AWS_REGION, "eu-west-2");
properties.put(AWSConfigConstants.profileName(AWS_CREDENTIALS_PROVIDER), "default");
properties.put(AWSConfigConstants.profilePath(AWS_CREDENTIALS_PROVIDER), "src/test/resources/profile");
AWSCredentials credentials = new ProfileCredentialProvider(properties)
.getAwsCredentialsProvider().getCredentials();
assertThat(credentials.getAWSAccessKeyId()).isEqualTo("AKIAIOSFODNN7EXAMPLE");
assertThat(credentials.getAWSSecretKey()).isEqualTo("wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY");
}
} | 7,278 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider/credential/CredentialProviderFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.provider.credential;
import com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import java.util.Properties;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_ACCESS_KEY_ID;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_CREDENTIALS_PROVIDER;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_PROFILE_NAME;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_REGION;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_SECRET_ACCESS_KEY;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.CredentialProviderType.ASSUME_ROLE;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.CredentialProviderType.AUTO;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.CredentialProviderType.BASIC;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.CredentialProviderType.ENV_VARIABLES;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.CredentialProviderType.PROFILE;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.CredentialProviderType.SYS_PROPERTIES;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.provider.credential.factory.CredentialProviderFactory.newCredentialProvider;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatExceptionOfType;
public class CredentialProviderFactoryTest {
private Properties configProps;
@BeforeMethod
public void setUp() {
configProps = new Properties();
configProps.setProperty(AWS_REGION, "us-west-2");
}
@Test
public void testBasicCredentialProviderHappyCase() {
configProps.setProperty(AWS_ACCESS_KEY_ID, "accessKeyId");
configProps.setProperty(AWS_SECRET_ACCESS_KEY, "secretAccessKey");
CredentialProvider credentialProvider = newCredentialProvider(BASIC, configProps);
assertThat(credentialProvider).isInstanceOf(BasicCredentialProvider.class);
}
@Test
public void testBasicCredentialProviderWithNullProviderKey() {
configProps.setProperty(AWS_ACCESS_KEY_ID, "accessKeyId");
configProps.setProperty(AWS_SECRET_ACCESS_KEY, "secretAccessKey");
CredentialProvider credentialProvider = newCredentialProvider(BASIC, configProps, null);
assertThat(credentialProvider).isInstanceOf(BasicCredentialProvider.class);
}
@Test
public void testBasicCredentialProviderInvalidConfigurationProperties() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> newCredentialProvider(BASIC, configProps))
.withMessageContaining("AWS access key must be specified with credential provider BASIC.");
}
@Test
public void testProfileCredentialProviderHappyCase() {
configProps.setProperty(AWS_PROFILE_NAME, "TEST");
CredentialProvider credentialProvider = newCredentialProvider(PROFILE, configProps);
assertThat(credentialProvider).isInstanceOf(ProfileCredentialProvider.class);
}
@Test
public void testProfileCredentialProviderInvalidConfigurationProperties() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> newCredentialProvider(PROFILE, configProps))
.withMessageContaining("AWS profile name should be specified with credential provider PROFILE.");
}
@Test
public void testEnvironmentCredentialProviderHappyCase() {
CredentialProvider credentialProvider = newCredentialProvider(ENV_VARIABLES, configProps);
assertThat(credentialProvider).isInstanceOf(EnvironmentCredentialProvider.class);
}
@Test
public void testSystemCredentialProviderHappyCase() {
CredentialProvider credentialProvider = newCredentialProvider(SYS_PROPERTIES, configProps);
assertThat(credentialProvider).isInstanceOf(SystemCredentialProvider.class);
}
@Test
public void testDefaultCredentialProviderHappyCase() {
CredentialProvider credentialProvider = newCredentialProvider(AUTO, configProps);
assertThat(credentialProvider).isInstanceOf(DefaultCredentialProvider.class);
}
@Test
public void testCredentialProviderWithNullProvider() {
CredentialProvider credentialProvider = newCredentialProvider(null, configProps);
assertThat(credentialProvider).isInstanceOf(DefaultCredentialProvider.class);
}
@Test
public void testAssumeRoleCredentialProviderHappyCase() {
configProps.setProperty(AWSConfigConstants.roleArn(AWS_CREDENTIALS_PROVIDER), "arn-1234567812345678");
configProps.setProperty(AWSConfigConstants.roleSessionName(AWS_CREDENTIALS_PROVIDER), "role-session");
CredentialProvider credentialProvider = newCredentialProvider(ASSUME_ROLE, configProps);
assertThat(credentialProvider).isInstanceOf(AssumeRoleCredentialsProvider.class);
}
@Test
public void testAssumeRoleCredentialProviderInvalidConfigurationProperties() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> newCredentialProvider(ASSUME_ROLE, configProps))
.withMessageContaining("AWS role arn to be assumed must be provided with credential provider type ASSUME_ROLE");
}
}
| 7,279 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider/credential/CredentialProviderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.provider.credential;
import com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants;
import org.testng.annotations.Test;
import java.util.Properties;
import static org.assertj.core.api.Assertions.assertThat;
public class CredentialProviderTest {
@Test
public void testGetProperties() {
String key = "key";
Properties properties = new Properties();
properties.put(AWSConfigConstants.accessKeyId(key), "ACCESS");
properties.put(AWSConfigConstants.secretKey(key), "SECRET");
properties.put(AWSConfigConstants.AWS_REGION, "eu-west-2");
CredentialProvider provider = new BasicCredentialProvider(properties, key);
assertThat(provider.getProperties()).isEqualTo(properties);
assertThat(provider.getProviderKey()).isEqualTo(key);
}
} | 7,280 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider/credential/AssumeRoleCredentialsProviderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.provider.credential;
import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider;
import com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants;
import org.testng.annotations.Test;
import javax.annotation.Nonnull;
import java.util.Properties;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_CREDENTIALS_PROVIDER;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
public class AssumeRoleCredentialsProviderTest {
@Test
public void testGetAwsCredentialsProviderWithDefaultPrefix() {
Properties properties = createAssumeRoleProperties(AWS_CREDENTIALS_PROVIDER);
AssumeRoleCredentialsProvider credentialsProvider = new AssumeRoleCredentialsProvider(properties);
assertGetAwsCredentialsProvider(credentialsProvider);
}
@Test
public void testGetAwsCredentialsProviderWithCustomPrefix() {
Properties properties = createAssumeRoleProperties("prefix");
AssumeRoleCredentialsProvider credentialsProvider = new AssumeRoleCredentialsProvider(properties, "prefix");
assertGetAwsCredentialsProvider(credentialsProvider);
}
private void assertGetAwsCredentialsProvider(@Nonnull final AssumeRoleCredentialsProvider credentialsProvider) {
STSAssumeRoleSessionCredentialsProvider expected = mock(STSAssumeRoleSessionCredentialsProvider.class);
AssumeRoleCredentialsProvider provider = spy(credentialsProvider);
doReturn(expected).when(provider).createAwsCredentialsProvider(any(), anyString(), anyString(), any());
assertThat(provider.getAwsCredentialsProvider()).isEqualTo(expected);
verify(provider).createAwsCredentialsProvider(eq("arn-1234567812345678"), eq("session-name"), eq("external-id"), any());
}
@Nonnull
private Properties createAssumeRoleProperties(@Nonnull final String prefix) {
Properties properties = new Properties();
properties.put(AWSConfigConstants.AWS_REGION, "eu-west-2");
properties.put(AWSConfigConstants.roleArn(prefix), "arn-1234567812345678");
properties.put(AWSConfigConstants.roleSessionName(prefix), "session-name");
properties.put(AWSConfigConstants.externalId(prefix), "external-id");
return properties;
}
} | 7,281 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/firehose | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/firehose/examples/SimpleStreamString.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.firehose.examples;
import com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants;
import com.amazonaws.services.kinesisanalytics.flink.connectors.producer.FlinkKinesisFirehoseProducer;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import java.util.Properties;
public class SimpleStreamString {
private static final String SINK_NAME = "Flink Kinesis Firehose Sink";
public static void main(String[] args) throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
DataStream<String> simpleStringStream = env.addSource(new EventsGenerator());
Properties configProps = new Properties();
configProps.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, "aws_access_key_id");
configProps.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, "aws_secret_access_key");
configProps.setProperty(AWSConfigConstants.AWS_REGION, "us-east-1");
FlinkKinesisFirehoseProducer<String> producer =
new FlinkKinesisFirehoseProducer<>("firehose-delivery-stream-name", new SimpleStringSchema(),
configProps);
simpleStringStream.addSink(producer).name(SINK_NAME);
env.execute();
}
/**
* Data generator that creates strings starting with a sequence number followed by a dash and 12 random characters.
*/
public static class EventsGenerator implements SourceFunction<String> {
private boolean running = true;
@Override
public void run(SourceContext<String> ctx) throws Exception {
long seq = 0;
while (running) {
Thread.sleep(10);
ctx.collect((seq++) + "-" + RandomStringUtils.randomAlphabetic(12));
}
}
@Override
public void cancel() {
running = false;
}
}
}
| 7,282 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/firehose | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/firehose/examples/WordCountData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.firehose.examples;
import org.apache.flink.api.java.DataSet;
import org.apache.flink.api.java.ExecutionEnvironment;
public class WordCountData {
public static final String[] WORDS = new String[] {
"To be, or not to be,--that is the question:--",
"Whether 'tis nobler in the mind to suffer",
"The slings and arrows of outrageous fortune",
"Or to take arms against a sea of troubles,",
"And by opposing end them?--To die,--to sleep,--",
"No more; and by a sleep to say we end",
"The heartache, and the thousand natural shocks",
"That flesh is heir to,--'tis a consummation",
"Devoutly to be wish'd. To die,--to sleep;--",
"To sleep! perchance to dream:--ay, there's the rub;",
"For in that sleep of death what dreams may come,",
"When we have shuffled off this mortal coil,",
"Must give us pause: there's the respect",
"That makes calamity of so long life;",
"For who would bear the whips and scorns of time,",
"The oppressor's wrong, the proud man's contumely,",
"The pangs of despis'd love, the law's delay,",
"The insolence of office, and the spurns",
"That patient merit of the unworthy takes,",
"When he himself might his quietus make",
"With a bare bodkin? who would these fardels bear,",
"To grunt and sweat under a weary life,",
"But that the dread of something after death,--",
"The undiscover'd country, from whose bourn",
"No traveller returns,--puzzles the will,",
"And makes us rather bear those ills we have",
"Than fly to others that we know not of?",
"Thus conscience does make cowards of us all;",
"And thus the native hue of resolution",
"Is sicklied o'er with the pale cast of thought;",
"And enterprises of great pith and moment,",
"With this regard, their currents turn awry,",
"And lose the name of action.--Soft you now!",
"The fair Ophelia!--Nymph, in thy orisons",
"Be all my sins remember'd."
};
public static DataSet<String> getDefaultTextLineDataSet(ExecutionEnvironment env) {
return env.fromElements(WORDS);
}
}
| 7,283 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/firehose | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/firehose/examples/SimpleWordCount.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.firehose.examples;
import com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants;
import com.amazonaws.services.kinesisanalytics.flink.connectors.producer.FlinkKinesisFirehoseProducer;
import com.amazonaws.services.kinesisanalytics.flink.connectors.serialization.JsonSerializationSchema;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;
import java.util.Arrays;
import java.util.Properties;
public class SimpleWordCount {
private static final String SINK_NAME = "Flink Kinesis Firehose Sink";
public static void main(String[] args) throws Exception {
// set up the execution environment
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
// get input data
DataStream<String> text = env.fromElements(WordCountData.WORDS);
DataStream<Tuple2<String, Integer>> counts =
// normalize and split each line
text.map(line -> line.toLowerCase().split("\\W+"))
// convert split line in pairs (2-tuples) containing: (word,1)
.flatMap(new FlatMapFunction<String[], Tuple2<String, Integer>>() {
@Override
public void flatMap(String[] value, Collector<Tuple2<String, Integer>> out) throws Exception {
Arrays.stream(value)
.filter(t -> t.length() > 0)
.forEach(t -> out.collect(new Tuple2<>(t, 1)));
}
})
// group by the tuple field "0" and sum up tuple field "1"
.keyBy(0)
.sum(1);
Properties configProps = new Properties();
configProps.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, "aws_access_key_id");
configProps.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, "aws_secret_access_key");
configProps.setProperty(AWSConfigConstants.AWS_REGION, "us-east-1");
FlinkKinesisFirehoseProducer<Tuple2<String, Integer>> producer =
new FlinkKinesisFirehoseProducer<>("firehose-delivery-stream", new JsonSerializationSchema<>(),
configProps);
counts.addSink(producer).name(SINK_NAME);
env.execute();
}
}
| 7,284 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/firehose | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/firehose/examples/AssumeRoleSimpleStreamString.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.firehose.examples;
import com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants;
import com.amazonaws.services.kinesisanalytics.flink.connectors.producer.FlinkKinesisFirehoseProducer;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import java.util.Properties;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.CredentialProviderType.ASSUME_ROLE;
/**
* This example application streams dummy data to the specified Firehose using Assume Role authentication mechanism.
* See https://docs.aws.amazon.com/kinesisanalytics/latest/java/examples-cross.html for more information.
*/
public class AssumeRoleSimpleStreamString {
private static final String SINK_NAME = "Flink Kinesis Firehose Sink";
private static final String STREAM_NAME = "<replace-with-your-stream>";
private static final String ROLE_ARN = "<replace-with-your-role-arn>";
private static final String ROLE_SESSION_NAME = "<replace-with-your-role-session-name>";
private static final String REGION = "us-east-1";
public static void main(String[] args) throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
DataStream<String> simpleStringStream = env.addSource(new SimpleStreamString.EventsGenerator());
Properties configProps = new Properties();
configProps.setProperty(AWSConfigConstants.AWS_CREDENTIALS_PROVIDER, ASSUME_ROLE.name());
configProps.setProperty(AWSConfigConstants.AWS_ROLE_ARN, ROLE_ARN);
configProps.setProperty(AWSConfigConstants.AWS_ROLE_SESSION_NAME, ROLE_SESSION_NAME);
configProps.setProperty(AWSConfigConstants.AWS_REGION, REGION);
FlinkKinesisFirehoseProducer<String> producer =
new FlinkKinesisFirehoseProducer<>(STREAM_NAME, new SimpleStringSchema(), configProps);
simpleStringStream.addSink(producer).name(SINK_NAME);
env.execute();
}
}
| 7,285 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors | Create_ds/aws-kinesisanalytics-flink-connectors/src/test/java/com/amazonaws/services/kinesisanalytics/flink/connectors/testutils/TestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.testutils;
import com.amazonaws.services.kinesisanalytics.flink.connectors.serialization.KinesisFirehoseSerializationSchema;
import org.apache.flink.api.common.serialization.SerializationSchema;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Properties;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_ACCESS_KEY_ID;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_REGION;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_SECRET_ACCESS_KEY;
import static org.apache.flink.streaming.api.functions.sink.SinkFunction.Context;
public final class TestUtils {
private TestUtils() {
}
public static final String DEFAULT_DELIVERY_STREAM = "test-stream";
public static final String DEFAULT_TEST_ERROR_MSG = "Test exception";
public static Properties getStandardProperties() {
Properties config = new Properties();
config.setProperty(AWS_REGION, "us-east-1");
config.setProperty(AWS_ACCESS_KEY_ID, "accessKeyId");
config.setProperty(AWS_SECRET_ACCESS_KEY, "awsSecretAccessKey");
return config;
}
public static KinesisFirehoseSerializationSchema<String> getKinesisFirehoseSerializationSchema() {
return (KinesisFirehoseSerializationSchema<String>) element -> ByteBuffer.wrap(element.getBytes(StandardCharsets.UTF_8));
}
public static SerializationSchema<String> getSerializationSchema() {
return (SerializationSchema<String>) element ->
ByteBuffer.wrap(element.getBytes(StandardCharsets.UTF_8)).array();
}
public static Context<String> getContext() {
return new Context<String>() {
@Override
public long currentProcessingTime() {
return System.currentTimeMillis();
}
@Override
public long currentWatermark() {
return 10L;
}
@Override
public Long timestamp() {
return System.currentTimeMillis();
}
};
}
}
| 7,286 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/producer/FlinkKinesisFirehoseProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.producer;
import com.amazonaws.services.kinesisanalytics.flink.connectors.exception.FlinkKinesisFirehoseException;
import com.amazonaws.services.kinesisanalytics.flink.connectors.exception.RecordCouldNotBeSentException;
import com.amazonaws.services.kinesisanalytics.flink.connectors.producer.impl.FirehoseProducer;
import com.amazonaws.services.kinesisanalytics.flink.connectors.provider.credential.CredentialProvider;
import com.amazonaws.services.kinesisanalytics.flink.connectors.provider.credential.factory.CredentialProviderFactory;
import com.amazonaws.services.kinesisanalytics.flink.connectors.serialization.KinesisFirehoseSerializationSchema;
import com.amazonaws.services.kinesisanalytics.flink.connectors.util.AWSUtil;
import com.amazonaws.services.kinesisfirehose.AmazonKinesisFirehose;
import com.amazonaws.services.kinesisfirehose.model.Record;
import org.apache.commons.lang3.Validate;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.runtime.state.FunctionSnapshotContext;
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nonnull;
import java.nio.ByteBuffer;
import java.util.Properties;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_CREDENTIALS_PROVIDER;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.CredentialProviderType;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.producer.impl.FirehoseProducer.UserRecordResult;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.util.AWSUtil.getCredentialProviderType;
public class FlinkKinesisFirehoseProducer<OUT> extends RichSinkFunction<OUT> implements CheckpointedFunction {
private static final Logger LOGGER = LoggerFactory.getLogger(FlinkKinesisFirehoseProducer.class);
private final KinesisFirehoseSerializationSchema<OUT> schema;
private final Properties config;
private final CredentialProviderType credentialProviderType;
/** Name of the default delivery stream to produce to. Can be overwritten by the serialization schema */
private final String defaultDeliveryStream;
/** Specify whether stop and fail in case of an error */
private boolean failOnError;
/** Remembers the last Async thrown exception */
private transient volatile Throwable lastThrownException;
/** The Crendential provider should be not serialized */
private transient CredentialProvider credentialsProvider;
/** AWS client cannot be serialized when building the Flink Job graph */
private transient AmazonKinesisFirehose firehoseClient;
/** AWS Kinesis Firehose producer */
private transient IProducer<UserRecordResult, Record> firehoseProducer;
/**
* Creates a new Flink Kinesis Firehose Producer.
* @param deliveryStream The AWS Kinesis Firehose delivery stream.
* @param schema The Serialization schema for the given data type.
* @param configProps The properties used to configure Kinesis Firehose client.
* @param credentialProviderType The specified Credential Provider type.
*/
public FlinkKinesisFirehoseProducer(final String deliveryStream,
final KinesisFirehoseSerializationSchema<OUT> schema,
final Properties configProps,
final CredentialProviderType credentialProviderType) {
this.defaultDeliveryStream = Validate.notBlank(deliveryStream, "Delivery stream cannot be null or empty");
this.schema = Validate.notNull(schema, "Kinesis serialization schema cannot be null");
this.config = Validate.notNull(configProps, "Configuration properties cannot be null");
this.credentialProviderType = Validate.notNull(credentialProviderType,
"Credential Provider type cannot be null");
}
public FlinkKinesisFirehoseProducer(final String deliveryStream , final SerializationSchema<OUT> schema,
final Properties configProps,
final CredentialProviderType credentialProviderType) {
this(deliveryStream, new KinesisFirehoseSerializationSchema<OUT>() {
@Override
public ByteBuffer serialize(OUT element) {
return ByteBuffer.wrap(schema.serialize(element));
}
}, configProps, credentialProviderType);
}
public FlinkKinesisFirehoseProducer(final String deliveryStream, final KinesisFirehoseSerializationSchema<OUT> schema,
final Properties configProps) {
this(deliveryStream, schema, configProps, getCredentialProviderType(configProps, AWS_CREDENTIALS_PROVIDER));
}
public FlinkKinesisFirehoseProducer(final String deliveryStream, final SerializationSchema<OUT> schema,
final Properties configProps) {
this(deliveryStream, schema, configProps, getCredentialProviderType(configProps, AWS_CREDENTIALS_PROVIDER));
}
public void setFailOnError(final boolean failOnError) {
this.failOnError = failOnError;
}
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
this.credentialsProvider = CredentialProviderFactory.newCredentialProvider(credentialProviderType, config);
LOGGER.info("Credential provider: {}", credentialsProvider.getAwsCredentialsProvider().getClass().getName() );
this.firehoseClient = createKinesisFirehoseClient();
this.firehoseProducer = createFirehoseProducer();
LOGGER.info("Started Kinesis Firehose client. Delivering to stream: {}", defaultDeliveryStream);
}
@Nonnull
AmazonKinesisFirehose createKinesisFirehoseClient() {
return AWSUtil.createKinesisFirehoseClientFromConfiguration(config, credentialsProvider);
}
@Nonnull
IProducer<UserRecordResult, Record> createFirehoseProducer() {
return new FirehoseProducer<>(defaultDeliveryStream, firehoseClient, config);
}
@Override
public void invoke(final OUT value, final Context context) throws Exception {
Validate.notNull(value);
ByteBuffer serializedValue = schema.serialize(value);
Validate.validState((firehoseProducer != null && !firehoseProducer.isDestroyed()),
"Firehose producer has been destroyed");
Validate.validState(firehoseClient != null, "Kinesis Firehose client has been closed");
propagateAsyncExceptions();
firehoseProducer
.addUserRecord(new Record().withData(serializedValue))
.handleAsync((record, throwable) -> {
if (throwable != null) {
final String msg = "An error has occurred trying to write a record.";
if (failOnError) {
lastThrownException = throwable;
} else {
LOGGER.warn(msg, throwable);
}
}
if (record != null && !record.isSuccessful()) {
final String msg = "Record could not be successfully sent.";
if (failOnError && lastThrownException == null) {
lastThrownException = new RecordCouldNotBeSentException(msg, record.getException());
} else {
LOGGER.warn(msg, record.getException());
}
}
return null;
});
}
@Override
public void snapshotState(final FunctionSnapshotContext functionSnapshotContext) throws Exception {
//Propagates asynchronously wherever exception that might happened previously.
propagateAsyncExceptions();
//Forces the Firehose producer to flush the buffer.
LOGGER.debug("Outstanding records before snapshot: {}", firehoseProducer.getOutstandingRecordsCount());
flushSync();
LOGGER.debug("Outstanding records after snapshot: {}", firehoseProducer.getOutstandingRecordsCount());
if (firehoseProducer.getOutstandingRecordsCount() > 0) {
throw new IllegalStateException("An error has occurred trying to flush the buffer synchronously.");
}
// If the flush produced any exceptions, we should propagates it also and fail the checkpoint.
propagateAsyncExceptions();
}
@Override
public void initializeState(final FunctionInitializationContext functionInitializationContext) throws Exception {
//No Op
}
@Override
public void close() throws Exception {
try {
super.close();
propagateAsyncExceptions();
} catch (Exception ex) {
LOGGER.error(ex.getMessage(), ex);
throw ex;
} finally {
flushSync();
firehoseProducer.destroy();
if (firehoseClient != null) {
LOGGER.debug("Shutting down Kinesis Firehose client...");
firehoseClient.shutdown();
}
}
}
private void propagateAsyncExceptions() throws Exception {
if (lastThrownException == null) {
return;
}
final String msg = "An exception has been thrown while trying to process a record";
if (failOnError) {
throw new FlinkKinesisFirehoseException(msg, lastThrownException);
} else {
LOGGER.warn(msg, lastThrownException);
lastThrownException = null;
}
}
/**
* This method waits until the buffer is flushed, an error has occurred or the thread was interrupted.
*/
private void flushSync() {
while (firehoseProducer.getOutstandingRecordsCount() > 0 && !firehoseProducer.isFlushFailed()) {
firehoseProducer.flush();
try {
LOGGER.debug("Number of outstanding records before going to sleep: {}", firehoseProducer.getOutstandingRecordsCount());
Thread.sleep(500);
} catch (InterruptedException ex) {
LOGGER.warn("Flushing has been interrupted.");
break;
}
}
}
}
| 7,287 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/producer/IProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.producer;
import java.util.concurrent.CompletableFuture;
/**
* Interface responsible for sending data a specific sink
*/
public interface IProducer<O, R> {
/**
* This method should send data to an specific destination.
* @param record the type of data to be sent
* @return a {@code ListenableFuture} with the result for the operation.
* @throws Exception
*/
CompletableFuture<O> addUserRecord(final R record) throws Exception;
/**
* This method should send data to an specific destination
* @param record the type of data to be sent
* @param operationTimeoutInMillis the expected operation timeout
* @return a {@code ListenableFuture} with the result for the operation.
* @throws Exception
*/
CompletableFuture<O> addUserRecord(final R record, final long operationTimeoutInMillis) throws Exception;
/**
* Destroy and release any used resource.
* @throws Exception
*/
void destroy() throws Exception;
/**
* Returns whether the producer has been destroyed or not
* @return
*/
boolean isDestroyed();
/**
* Should return the number of outstanding records if the producer implements buffering.
* @return an integer with the number of outstanding records.
*/
int getOutstandingRecordsCount();
/**
* This method flushes the buffer immediately.
*/
void flush();
/**
* Performs a synchronous flush on the buffer waiting until the whole buffer is drained.
*/
void flushSync();
/**
* A flag representing whether the flush has failed or not.
* @return {@code boolean} representing whether the success of failure of flush buffer operation.
*/
boolean isFlushFailed();
}
| 7,288 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/producer | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/producer/impl/FirehoseProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.producer.impl;
import com.amazonaws.services.kinesisanalytics.flink.connectors.exception.FlinkKinesisFirehoseException;
import com.amazonaws.services.kinesisanalytics.flink.connectors.exception.RecordCouldNotBeSentException;
import com.amazonaws.services.kinesisanalytics.flink.connectors.exception.TimeoutExpiredException;
import com.amazonaws.services.kinesisanalytics.flink.connectors.producer.IProducer;
import com.amazonaws.services.kinesisfirehose.AmazonKinesisFirehose;
import com.amazonaws.services.kinesisfirehose.model.AmazonKinesisFirehoseException;
import com.amazonaws.services.kinesisfirehose.model.PutRecordBatchRequest;
import com.amazonaws.services.kinesisfirehose.model.PutRecordBatchResponseEntry;
import com.amazonaws.services.kinesisfirehose.model.PutRecordBatchResult;
import com.amazonaws.services.kinesisfirehose.model.Record;
import com.amazonaws.services.kinesisfirehose.model.ServiceUnavailableException;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nonnull;
import javax.annotation.concurrent.GuardedBy;
import javax.annotation.concurrent.ThreadSafe;
import java.util.ArrayDeque;
import java.util.Properties;
import java.util.Queue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.producer.impl.FirehoseProducer.UserRecordResult;
@ThreadSafe
public class FirehoseProducer<O extends UserRecordResult, R extends Record> implements IProducer<O, R> {
private static final Logger LOGGER = LoggerFactory.getLogger(FirehoseProducer.class);
/** A configuration object holding validated properties for the producer */
private final FirehoseProducerConfiguration configuration;
/** AWS Kinesis Firehose client */
private final AmazonKinesisFirehose firehoseClient;
/** Firehose delivery stream */
private final String deliveryStream;
/** Scheduler service responsible for flushing the producer Buffer pool */
private final ExecutorService flusher;
/** Object lock responsible for guarding the producer Buffer pool */
@GuardedBy("this")
private final Object producerBufferLock = new Object();
/** Producer Buffer pool */
private volatile Queue<Record> producerBuffer;
/** Flusher Buffer pool */
private volatile Queue<Record> flusherBuffer;
/** A timestamp responsible to store the last timestamp after the flusher thread has been performed */
private volatile long lastSucceededFlushTimestamp;
/** Reports if the Firehose Producer was destroyed, shutting down the flusher thread. */
private volatile boolean isDestroyed;
/** A sentinel flag to notify the flusher thread to flush the buffer immediately.
* This flag should be used only to request a flush from the caller thread through the {@link #flush()} method. */
private volatile boolean syncFlush;
/** A flag representing if the Flusher thread has failed. */
private volatile boolean isFlusherFailed;
public FirehoseProducer(@Nonnull final String deliveryStream,
@Nonnull final AmazonKinesisFirehose firehoseClient,
@Nonnull final Properties config) {
this(deliveryStream, firehoseClient, FirehoseProducerConfiguration.builder(config).build());
}
public FirehoseProducer(@Nonnull final String deliveryStream,
@Nonnull final AmazonKinesisFirehose firehoseClient,
@Nonnull final FirehoseProducerConfiguration configuration) {
this.firehoseClient = Validate.notNull(firehoseClient, "Kinesis Firehose client cannot be null");
this.deliveryStream = Validate.notBlank(deliveryStream, "Kinesis Firehose delivery stream cannot be null or empty.");
this.configuration = configuration;
this.producerBuffer = new ArrayDeque<>(configuration.getMaxBufferSize());
this.flusherBuffer = new ArrayDeque<>(configuration.getMaxBufferSize());
flusher = Executors.newSingleThreadExecutor(new FirehoseThreadFactory());
flusher.submit(this::flushBuffer);
}
@Override
public CompletableFuture<O> addUserRecord(final R record) throws Exception {
return addUserRecord(record, configuration.getMaxOperationTimeoutInMillis());
}
/**
* This method is responsible for taking a lock adding a {@code Record} into the producerBuffer, in case the producerBuffer is full
* waits releasing the lock for the given {@code bufferFullWaitTimeoutInMillis}.
* There are cases where the producerBuffer cannot be flushed then this method keeps waiting until the given operation timeout
* passed as {@code timeoutInMillis}
* @param record the type of data to be buffered
* @param timeoutInMillis the operation timeout in case the record cannot be added into the producerBuffer.
* @return
* @throws TimeoutExpiredException if the operation got stuck and is not able to proceed.
* @throws InterruptedException if any thread interrupted the current thread before or while the current thread
* was waiting for a notification. The <i>interrupted status</i> of the current thread is cleared when
* this exception is thrown.
*/
@Override
public CompletableFuture<O> addUserRecord(final R record, final long timeoutInMillis)
throws TimeoutExpiredException, InterruptedException {
Validate.notNull(record, "Record cannot be null.");
Validate.isTrue(timeoutInMillis > 0, "Operation timeout should be > 0.");
long operationTimeoutInNanos = TimeUnit.MILLISECONDS.toNanos(timeoutInMillis);
synchronized (producerBufferLock) {
/* This happens whenever the current thread is trying to write, however, the Producer Buffer is full.
* This guarantees if the writer thread is already running, should wait.
* In addition, implements a kind of back pressure mechanism with a bail out condition, so we don't incur
* in cases where the current thread waits forever.
*/
long lastTimestamp = System.nanoTime();
while (producerBuffer.size() >= configuration.getMaxBufferSize()) {
if ((System.nanoTime() - lastTimestamp) >= operationTimeoutInNanos) {
throw new TimeoutExpiredException("Timeout has expired for the given operation");
}
/* If the buffer is filled and the flusher isn't running yet we notify to wake up the flusher */
if (flusherBuffer.isEmpty()) {
producerBufferLock.notify();
}
producerBufferLock.wait(configuration.getBufferFullWaitTimeoutInMillis());
}
producerBuffer.offer(record);
/* If the buffer was filled up right after the last insertion we would like to wake up the flusher thread
* and send the buffered data to Kinesis Firehose as soon as possible */
if (producerBuffer.size() >= configuration.getMaxBufferSize() && flusherBuffer.isEmpty()) {
producerBufferLock.notify();
}
}
UserRecordResult recordResult = new UserRecordResult().setSuccessful(true);
return CompletableFuture.completedFuture((O) recordResult);
}
/**
* This method runs in a background thread responsible for flushing the Producer Buffer in case the buffer is full,
* not enough records into the buffer and timeout has expired or flusher timeout has expired.
* If an unhandled exception is thrown the flusher thread should fail, logging the failure.
* However, this behavior will block the producer to move on until hit the given timeout and throw {@code {@link TimeoutExpiredException}}
*/
private void flushBuffer() {
lastSucceededFlushTimestamp = System.nanoTime();
long bufferTimeoutInNanos = TimeUnit.MILLISECONDS.toNanos(configuration.getBufferTimeoutInMillis());
boolean timeoutFlush;
while (true) {
timeoutFlush = (System.nanoTime() - lastSucceededFlushTimestamp) >= bufferTimeoutInNanos;
synchronized (producerBufferLock) {
/* If the flusher buffer is not empty at this point we should fail, otherwise we would end up looping
* forever since we are swapping references */
Validate.validState(flusherBuffer.isEmpty());
if (isDestroyed) {
return;
} else if (syncFlush || (producerBuffer.size() >= configuration.getMaxBufferSize() ||
(timeoutFlush && producerBuffer.size() > 0))) {
prepareRecordsToSubmit(producerBuffer, flusherBuffer);
producerBufferLock.notify();
} else {
try {
producerBufferLock.wait(configuration.getBufferTimeoutBetweenFlushes());
} catch (InterruptedException e) {
LOGGER.info("An interrupted exception has been thrown, while trying to sleep and release the lock during a flush.", e);
}
continue;
}
}
/* It's OK calling {@code submitBatchWithRetry} outside the critical section because this method does not make
* any changes to the object and the producer thread does not make any modifications to the flusherBuffer.
* The only agent making changes to flusherBuffer is the flusher thread. */
try {
submitBatchWithRetry(flusherBuffer);
Queue<Record> emptyFlushBuffer = new ArrayDeque<>(configuration.getMaxBufferSize());
synchronized (producerBufferLock) {
/* We perform a swap at this point because {@code ArrayDeque<>.clear()} iterates over the items nullifying
* the items, and we would like to avoid such iteration just swapping references. */
Validate.validState(!flusherBuffer.isEmpty());
flusherBuffer = emptyFlushBuffer;
if (syncFlush) {
syncFlush = false;
producerBufferLock.notify();
}
}
} catch (Exception ex) {
String errorMsg = "An error has occurred while trying to send data to Kinesis Firehose.";
if (ex instanceof AmazonKinesisFirehoseException &&
((AmazonKinesisFirehoseException) ex).getStatusCode() == 413) {
LOGGER.error(errorMsg +
"Batch of records too large. Please try to reduce your batch size by passing " +
"FIREHOSE_PRODUCER_BUFFER_MAX_SIZE into your configuration.", ex);
} else {
LOGGER.error(errorMsg, ex);
}
synchronized (producerBufferLock) {
isFlusherFailed = true;
}
throw ex;
}
}
}
/**
* Populates the target queue with messages from the source queue.
* Up to the maximum capacity defined by {@code maxPutRecordBatchBytes}.
*/
private void prepareRecordsToSubmit(@Nonnull final Queue<Record> sourceQueue, @Nonnull final Queue<Record> targetQueue) {
int total = 0;
while (!sourceQueue.isEmpty() && (total + sourceQueue.peek().getData().capacity()) <= configuration.getMaxPutRecordBatchBytes()) {
total += sourceQueue.peek().getData().capacity();
targetQueue.add(sourceQueue.poll());
}
}
private void submitBatchWithRetry(final Queue<Record> records) throws AmazonKinesisFirehoseException,
RecordCouldNotBeSentException {
PutRecordBatchResult lastResult;
String warnMessage = null;
for (int attempts = 0; attempts < configuration.getNumberOfRetries(); attempts++) {
try {
LOGGER.debug("Trying to flush Buffer of size: {} on attempt: {}", records.size(), attempts);
lastResult = submitBatch(records);
if (lastResult.getFailedPutCount() == null || lastResult.getFailedPutCount() == 0) {
lastSucceededFlushTimestamp = System.nanoTime();
LOGGER.debug("Firehose Buffer has been flushed with size: {} on attempt: {}",
records.size(), attempts);
return;
}
PutRecordBatchResponseEntry failedRecord = lastResult.getRequestResponses()
.stream()
.filter(r -> r.getRecordId() == null)
.findFirst()
.orElse(null);
warnMessage = String.format("Number of failed records: %s.", lastResult.getFailedPutCount());
if (failedRecord != null) {
warnMessage = String.format("Last Kinesis Firehose putRecordBatch encountered an error and failed " +
"trying to put: %s records with error: %s - %s.",
lastResult.getFailedPutCount(), failedRecord.getErrorCode(), failedRecord.getErrorMessage());
}
LOGGER.warn(warnMessage);
//Full Jitter: https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
long timeToSleep = RandomUtils.nextLong(0,
Math.min(configuration.getMaxBackOffInMillis(), (configuration.getBaseBackOffInMillis() * 2 * attempts)));
LOGGER.info("Sleeping for: {}ms on attempt: {}", timeToSleep, attempts);
Thread.sleep(timeToSleep);
} catch (ServiceUnavailableException ex) {
LOGGER.info("Kinesis Firehose has thrown a recoverable exception.", ex);
} catch (InterruptedException e) {
LOGGER.info("An interrupted exception has been thrown between retry attempts.", e);
} catch (AmazonKinesisFirehoseException ex) {
throw ex;
}
}
throw new RecordCouldNotBeSentException("Exceeded number of attempts! " + warnMessage);
}
/**
* Sends the actual batch of records to Kinesis Firehose
* @param records a Collection of records
* @return {@code PutRecordBatchResult}
*/
private PutRecordBatchResult submitBatch(final Queue<Record> records) throws AmazonKinesisFirehoseException {
LOGGER.debug("Sending {} records to Kinesis Firehose on stream: {}", records.size(), deliveryStream);
PutRecordBatchResult result;
try {
result = firehoseClient.putRecordBatch(new PutRecordBatchRequest()
.withDeliveryStreamName(deliveryStream)
.withRecords(records));
} catch (AmazonKinesisFirehoseException e) {
throw e;
}
return result;
}
/**
* Make sure that any pending scheduled thread terminates before closing as well as cleans the producerBuffer pool,
* allowing GC to collect.
*/
@Override
public void destroy() throws Exception {
synchronized (producerBufferLock) {
isDestroyed = true;
producerBuffer = null;
producerBufferLock.notify();
}
if (!flusher.isShutdown() && !flusher.isTerminated()) {
LOGGER.info("Shutting down scheduled service.");
flusher.shutdown();
try {
LOGGER.info("Awaiting executor service termination...");
flusher.awaitTermination(1L, TimeUnit.MINUTES);
} catch (InterruptedException e) {
final String errorMsg = "Error waiting executor writer termination.";
LOGGER.error(errorMsg, e);
throw new FlinkKinesisFirehoseException(errorMsg, e);
}
}
}
@Override
public boolean isDestroyed() {
synchronized (producerBufferLock) {
return isDestroyed;
}
}
@Override
public int getOutstandingRecordsCount() {
synchronized (producerBufferLock) {
return producerBuffer.size() + flusherBuffer.size();
}
}
@Override
public boolean isFlushFailed() {
synchronized (producerBufferLock) {
return isFlusherFailed;
}
}
/**
* This method instructs the flusher thread to perform a flush on the buffer without waiting for completion.
* <p>
* This implementation does not guarantee the whole buffer is flushed or if the flusher thread
* has completed the flush or not.
* In order to flush all records and wait until completion, use {@code {@link #flushSync()}}
* </p>
*/
@Override
public void flush() {
synchronized (producerBufferLock) {
syncFlush = true;
producerBufferLock.notify();
}
}
/**
* This method instructs the flusher thread to perform the flush on the buffer and wait for the completion.
* <p>
* This implementation is useful once there is a need to guarantee the buffer is flushed before making further progress.
* i.e. Shutting down the producer.
* i.e. Taking synchronous snapshots.
* </p>
* The caller needs to make sure to assert the status of {@link #isFlushFailed()} in order guarantee whether
* the flush has successfully completed or not.
*/
@Override
public void flushSync() {
while (getOutstandingRecordsCount() > 0 && !isFlushFailed()) {
flush();
try {
Thread.sleep(500);
} catch (InterruptedException e) {
LOGGER.warn("An interruption has happened while trying to flush the buffer synchronously.");
Thread.currentThread().interrupt();
}
}
if (isFlushFailed()) {
LOGGER.warn("The flusher thread has failed trying to synchronously flush the buffer.");
}
}
public static class UserRecordResult {
private Throwable exception;
private boolean successful;
public Throwable getException() {
return exception;
}
public UserRecordResult setException(Throwable exception) {
this.exception = exception;
return this;
}
public boolean isSuccessful() {
return successful;
}
public UserRecordResult setSuccessful(boolean successful) {
this.successful = successful;
return this;
}
}
static class FirehoseThreadFactory implements ThreadFactory {
/** Static threadsafe counter use to generate thread name suffix. */
private static final AtomicLong count = new AtomicLong(0);
@Override
public Thread newThread(@Nonnull final Runnable runnable) {
Thread thread = Executors.defaultThreadFactory().newThread(runnable);
thread.setName("kda-writer-thread-" + count.getAndIncrement());
thread.setDaemon(false);
return thread;
}
}
}
| 7,289 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/producer | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/producer/impl/FirehoseProducerConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.producer.impl;
import com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants;
import com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants;
import com.amazonaws.services.kinesisanalytics.flink.connectors.util.AWSUtil;
import org.apache.commons.lang3.Validate;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Properties;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.DEFAULT_MAXIMUM_BATCH_BYTES;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.DEFAULT_MAX_BUFFER_SIZE;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_BASE_BACKOFF_TIMEOUT;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_FLUSH_MAX_NUMBER_OF_RETRIES;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_FLUSH_TIMEOUT;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_FULL_WAIT_TIMEOUT;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_MAX_BACKOFF_TIMEOUT;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_MAX_BATCH_BYTES;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_MAX_SIZE;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_BUFFER_MAX_TIMEOUT;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.FIREHOSE_PRODUCER_MAX_OPERATION_TIMEOUT;
import static java.util.Optional.ofNullable;
/** An immutable configuration class for {@link FirehoseProducer}. */
public class FirehoseProducerConfiguration {
/** The default MAX producerBuffer size. Users should be able to specify a smaller producerBuffer if needed.
* However, this value should be exercised with caution, since Kinesis Firehose limits PutRecordBatch at 500 records or 4MiB per call.
* Please refer to https://docs.aws.amazon.com/firehose/latest/dev/limits.html for further reference.
* */
private final int maxBufferSize;
/** The maximum number of bytes that can be sent in a single PutRecordBatch operation */
private final int maxPutRecordBatchBytes;
/** The specified amount timeout the producerBuffer must be flushed if haven't met any other conditions previously */
private final long bufferTimeoutInMillis;
/** The wait time in milliseconds in case a producerBuffer is full */
private final long bufferFullWaitTimeoutInMillis;
/** The interval between producerBuffer flushes */
private final long bufferTimeoutBetweenFlushes;
/** The MAX number of retries in case of recoverable failures */
private final int numberOfRetries;
/** The default MAX backoff timeout
* https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
*/
private final long maxBackOffInMillis;
/** The default BASE timeout to be used on Jitter backoff
* https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
*/
private final long baseBackOffInMillis;
/** The MAX timeout for a given addUserRecord operation */
private final long maxOperationTimeoutInMillis;
private FirehoseProducerConfiguration(@Nonnull final Builder builder) {
this.maxBufferSize = builder.maxBufferSize;
this.maxPutRecordBatchBytes = builder.maxPutRecordBatchBytes;
this.bufferTimeoutInMillis = builder.bufferTimeoutInMillis;
this.bufferFullWaitTimeoutInMillis = builder.bufferFullWaitTimeoutInMillis;
this.bufferTimeoutBetweenFlushes = builder.bufferTimeoutBetweenFlushes;
this.numberOfRetries = builder.numberOfRetries;
this.maxBackOffInMillis = builder.maxBackOffInMillis;
this.baseBackOffInMillis = builder.baseBackOffInMillis;
this.maxOperationTimeoutInMillis = builder.maxOperationTimeoutInMillis;
}
/**
* The max producer buffer size; the maximum number of records that will be sent in a PutRecordBatch request.
* @return the max producer buffer size.
*/
public int getMaxBufferSize() {
return maxBufferSize;
}
/**
* The maximum number of bytes that will be sent in a single PutRecordBatch operation.
* @return the maximum number of PutRecordBatch bytes
*/
public int getMaxPutRecordBatchBytes() {
return maxPutRecordBatchBytes;
}
/**
* The specified amount timeout the producerBuffer must be flushed if haven't met any other conditions previously.
* @return the specified amount timeout the producerBuffer must be flushed
*/
public long getBufferTimeoutInMillis() {
return bufferTimeoutInMillis;
}
/**
* The wait time in milliseconds in case a producerBuffer is full.
* @return The wait time in milliseconds
*/
public long getBufferFullWaitTimeoutInMillis() {
return bufferFullWaitTimeoutInMillis;
}
/**
* The interval between producerBuffer flushes.
* @return The interval between producerBuffer flushes
*/
public long getBufferTimeoutBetweenFlushes() {
return bufferTimeoutBetweenFlushes;
}
/**
* The max number of retries in case of recoverable failures.
* @return the max number of retries in case of recoverable failures
*/
public int getNumberOfRetries() {
return numberOfRetries;
}
/**
* The max backoff timeout (https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/)
* @return The max backoff timeout
*/
public long getMaxBackOffInMillis() {
return maxBackOffInMillis;
}
/**
* The base backoff timeout (https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/)
* @return The base backoff timeout
*/
public long getBaseBackOffInMillis() {
return baseBackOffInMillis;
}
/**
* The max timeout for a given addUserRecord operation.
* @return the max timeout for a given addUserRecord operation
*/
public long getMaxOperationTimeoutInMillis() {
return maxOperationTimeoutInMillis;
}
@Nonnull
public static Builder builder(@Nonnull final Properties config) {
final String region = config.getProperty(AWSConfigConstants.AWS_REGION);
return builder(region).withProperties(config);
}
@Nonnull
public static Builder builder(@Nullable final String region) {
return new Builder(region);
}
public static class Builder {
private int maxBufferSize = ProducerConfigConstants.DEFAULT_MAX_BUFFER_SIZE;
private int maxPutRecordBatchBytes;
private int numberOfRetries = ProducerConfigConstants.DEFAULT_NUMBER_OF_RETRIES;
private long bufferTimeoutInMillis = ProducerConfigConstants.DEFAULT_MAX_BUFFER_TIMEOUT;
private long maxOperationTimeoutInMillis = ProducerConfigConstants.DEFAULT_MAX_OPERATION_TIMEOUT;
private long bufferFullWaitTimeoutInMillis = ProducerConfigConstants.DEFAULT_WAIT_TIME_FOR_BUFFER_FULL;
private long bufferTimeoutBetweenFlushes = ProducerConfigConstants.DEFAULT_INTERVAL_BETWEEN_FLUSHES;
private long maxBackOffInMillis = ProducerConfigConstants.DEFAULT_MAX_BACKOFF;
private long baseBackOffInMillis = ProducerConfigConstants.DEFAULT_BASE_BACKOFF;
public Builder(@Nullable final String region) {
this.maxPutRecordBatchBytes = AWSUtil.getDefaultMaxPutRecordBatchBytes(region);
}
@Nonnull
public FirehoseProducerConfiguration build() {
return new FirehoseProducerConfiguration(this);
}
/**
* The max producer buffer size; the maximum number of records that will be sent in a PutRecordBatch request.
* @param maxBufferSize the max producer buffer size
* @return this builder
*/
@Nonnull
public Builder withMaxBufferSize(final int maxBufferSize) {
Validate.isTrue(maxBufferSize > 0 && maxBufferSize <= DEFAULT_MAX_BUFFER_SIZE,
String.format("Buffer size must be between 1 and %d", DEFAULT_MAX_BUFFER_SIZE));
this.maxBufferSize = maxBufferSize;
return this;
}
/**
* The maximum number of bytes that will be sent in a single PutRecordBatch operation.
* @param maxPutRecordBatchBytes the maximum number of PutRecordBatch bytes
* @return this builder
*/
@Nonnull
public Builder withMaxPutRecordBatchBytes(final int maxPutRecordBatchBytes) {
Validate.isTrue(maxPutRecordBatchBytes > 0 && maxPutRecordBatchBytes <= DEFAULT_MAXIMUM_BATCH_BYTES,
String.format("Maximum batch size in bytes must be between 1 and %d", DEFAULT_MAXIMUM_BATCH_BYTES));
this.maxPutRecordBatchBytes = maxPutRecordBatchBytes;
return this;
}
/**
* The max number of retries in case of recoverable failures.
* @param numberOfRetries the max number of retries in case of recoverable failures.
* @return this builder
*/
@Nonnull
public Builder withNumberOfRetries(final int numberOfRetries) {
Validate.isTrue(numberOfRetries >= 0, "Number of retries cannot be negative.");
this.numberOfRetries = numberOfRetries;
return this;
}
/**
* The specified amount timeout the producerBuffer must be flushed if haven't met any other conditions previously.
* @param bufferTimeoutInMillis the specified amount timeout the producerBuffer must be flushed
* @return this builder
*/
@Nonnull
public Builder withBufferTimeoutInMillis(final long bufferTimeoutInMillis) {
Validate.isTrue(bufferTimeoutInMillis >= 0, "Flush timeout should be greater than 0.");
this.bufferTimeoutInMillis = bufferTimeoutInMillis;
return this;
}
/**
* The max timeout for a given addUserRecord operation.
* @param maxOperationTimeoutInMillis The max timeout for a given addUserRecord operation
* @return this builder
*/
@Nonnull
public Builder withMaxOperationTimeoutInMillis(final long maxOperationTimeoutInMillis) {
Validate.isTrue(maxOperationTimeoutInMillis >= 0, "Max operation timeout should be greater than 0.");
this.maxOperationTimeoutInMillis = maxOperationTimeoutInMillis;
return this;
}
/**
* The wait time in milliseconds in case a producerBuffer is full.
* @param bufferFullWaitTimeoutInMillis the wait time in milliseconds in case a producerBuffer is full
* @return this builder
*/
@Nonnull
public Builder withBufferFullWaitTimeoutInMillis(final long bufferFullWaitTimeoutInMillis) {
Validate.isTrue(bufferFullWaitTimeoutInMillis >= 0, "Buffer full waiting timeout should be greater than 0.");
this.bufferFullWaitTimeoutInMillis = bufferFullWaitTimeoutInMillis;
return this;
}
/**
* The interval between producerBuffer flushes.
* @param bufferTimeoutBetweenFlushes the interval between producerBuffer flushes
* @return this builder
*/
@Nonnull
public Builder withBufferTimeoutBetweenFlushes(final long bufferTimeoutBetweenFlushes) {
Validate.isTrue(bufferTimeoutBetweenFlushes >= 0, "Interval between flushes cannot be negative.");
this.bufferTimeoutBetweenFlushes = bufferTimeoutBetweenFlushes;
return this;
}
/**
* The max backoff timeout (https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/)
* @param maxBackOffInMillis the max backoff timeout
* @return this builder
*/
@Nonnull
public Builder withMaxBackOffInMillis(final long maxBackOffInMillis) {
Validate.isTrue(maxBackOffInMillis >= 0, "Max backoff timeout should be greater than 0.");
this.maxBackOffInMillis = maxBackOffInMillis;
return this;
}
/**
* The base backoff timeout (https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/)
* @param baseBackOffInMillis The base backoff timeout
* @return this builder
*/
@Nonnull
public Builder withBaseBackOffInMillis(final long baseBackOffInMillis) {
Validate.isTrue(baseBackOffInMillis >= 0, "Base backoff timeout should be greater than 0.");
this.baseBackOffInMillis = baseBackOffInMillis;
return this;
}
/**
* Creates a Builder populated with values from the Properties.
* @param config the configuration properties
* @return this builder
*/
@Nonnull
public Builder withProperties(@Nonnull final Properties config) {
ofNullable(config.getProperty(FIREHOSE_PRODUCER_BUFFER_MAX_SIZE))
.map(Integer::parseInt)
.ifPresent(this::withMaxBufferSize);
ofNullable(config.getProperty(FIREHOSE_PRODUCER_BUFFER_MAX_BATCH_BYTES))
.map(Integer::parseInt)
.ifPresent(this::withMaxPutRecordBatchBytes);
ofNullable(config.getProperty(FIREHOSE_PRODUCER_BUFFER_FLUSH_MAX_NUMBER_OF_RETRIES))
.map(Integer::parseInt)
.ifPresent(this::withNumberOfRetries);
ofNullable(config.getProperty(FIREHOSE_PRODUCER_BUFFER_MAX_TIMEOUT))
.map(Long::parseLong)
.ifPresent(this::withBufferTimeoutInMillis);
ofNullable(config.getProperty(FIREHOSE_PRODUCER_BUFFER_FULL_WAIT_TIMEOUT))
.map(Long::parseLong)
.ifPresent(this::withBufferFullWaitTimeoutInMillis);
ofNullable(config.getProperty(FIREHOSE_PRODUCER_BUFFER_FLUSH_TIMEOUT))
.map(Long::parseLong)
.ifPresent(this::withBufferTimeoutBetweenFlushes);
ofNullable(config.getProperty(FIREHOSE_PRODUCER_BUFFER_MAX_BACKOFF_TIMEOUT))
.map(Long::parseLong)
.ifPresent(this::withMaxBackOffInMillis);
ofNullable(config.getProperty(FIREHOSE_PRODUCER_BUFFER_BASE_BACKOFF_TIMEOUT))
.map(Long::parseLong)
.ifPresent(this::withBaseBackOffInMillis);
ofNullable(config.getProperty(FIREHOSE_PRODUCER_MAX_OPERATION_TIMEOUT))
.map(Long::parseLong)
.ifPresent(this::withMaxOperationTimeoutInMillis);
return this;
}
}
}
| 7,290 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/util/AWSUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.util;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants;
import com.amazonaws.services.kinesisanalytics.flink.connectors.provider.credential.CredentialProvider;
import com.amazonaws.services.kinesisfirehose.AmazonKinesisFirehose;
import com.amazonaws.services.kinesisfirehose.AmazonKinesisFirehoseClientBuilder;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.Validate;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Properties;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_CREDENTIALS_PROVIDER;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_KINESIS_FIREHOSE_ENDPOINT;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_KINESIS_FIREHOSE_ENDPOINT_SIGNING_REGION;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_REGION;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.accessKeyId;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.profileName;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.roleArn;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.roleSessionName;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.secretKey;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.DEFAULT_MAXIMUM_BATCH_BYTES;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.ProducerConfigConstants.REDUCED_QUOTA_MAXIMUM_THROUGHPUT;
public final class AWSUtil {
private AWSUtil() {
}
public static AmazonKinesisFirehose createKinesisFirehoseClientFromConfiguration(@Nonnull final Properties configProps,
@Nonnull final CredentialProvider credentialsProvider) {
validateConfiguration(configProps);
Validate.notNull(credentialsProvider, "Credential Provider cannot be null.");
AmazonKinesisFirehoseClientBuilder firehoseClientBuilder = AmazonKinesisFirehoseClientBuilder
.standard()
.withCredentials(credentialsProvider.getAwsCredentialsProvider());
final String region = configProps.getProperty(AWS_REGION, null);
final String firehoseEndpoint = configProps.getProperty(
AWS_KINESIS_FIREHOSE_ENDPOINT, null);
final String firehoseEndpointSigningRegion = configProps.getProperty(
AWS_KINESIS_FIREHOSE_ENDPOINT_SIGNING_REGION, null);
firehoseClientBuilder = (region != null) ? firehoseClientBuilder.withRegion(region)
: firehoseClientBuilder.withEndpointConfiguration(
new AwsClientBuilder.EndpointConfiguration(firehoseEndpoint, firehoseEndpointSigningRegion));
return firehoseClientBuilder.build();
}
public static Properties validateConfiguration(final Properties configProps) {
Validate.notNull(configProps, "Configuration properties cannot be null.");
if (!configProps.containsKey(AWS_REGION) ^ (configProps.containsKey(AWS_KINESIS_FIREHOSE_ENDPOINT) &&
configProps.containsKey(AWS_KINESIS_FIREHOSE_ENDPOINT_SIGNING_REGION))) {
throw new IllegalArgumentException(
"Either AWS region should be specified or AWS Firehose endpoint and endpoint signing region.");
}
return configProps;
}
public static Properties validateBasicProviderConfiguration(final Properties configProps, final String providerKey) {
validateConfiguration(configProps);
Validate.isTrue(configProps.containsKey(accessKeyId(providerKey)),
"AWS access key must be specified with credential provider BASIC.");
Validate.isTrue(configProps.containsKey(secretKey(providerKey)),
"AWS secret key must be specified with credential provider BASIC.");
return configProps;
}
public static Properties validateBasicProviderConfiguration(final Properties configProps) {
return validateBasicProviderConfiguration(configProps, null);
}
public static boolean containsBasicProperties(final Properties configProps, final String providerKey) {
Validate.notNull(configProps);
return configProps.containsKey(accessKeyId(providerKey)) && configProps.containsKey(secretKey(providerKey));
}
public static AWSConfigConstants.CredentialProviderType getCredentialProviderType(final Properties configProps,
final String providerKey) {
if (providerKey == null || !configProps.containsKey(providerKey)) {
return containsBasicProperties(configProps, providerKey) ?
AWSConfigConstants.CredentialProviderType.BASIC : AWSConfigConstants.CredentialProviderType.AUTO;
}
final String providerTypeString = configProps.getProperty(providerKey);
if (StringUtils.isEmpty(providerTypeString)) {
return AWSConfigConstants.CredentialProviderType.AUTO;
}
try {
return AWSConfigConstants.CredentialProviderType.valueOf(providerTypeString);
} catch (IllegalArgumentException e) {
return AWSConfigConstants.CredentialProviderType.AUTO;
}
}
public static Properties validateProfileProviderConfiguration(final Properties configProps, final String providerKey) {
validateConfiguration(configProps);
Validate.notBlank(providerKey);
Validate.isTrue(configProps.containsKey(profileName(providerKey)),
"AWS profile name should be specified with credential provider PROFILE.");
return configProps;
}
public static Properties validateProfileProviderConfiguration(final Properties configProps) {
return validateProfileProviderConfiguration(configProps, AWS_CREDENTIALS_PROVIDER);
}
public static Properties validateAssumeRoleCredentialsProvider(final Properties configProps, final String providerKey) {
validateConfiguration(configProps);
Validate.isTrue(configProps.containsKey(roleArn(providerKey)),
"AWS role arn to be assumed must be provided with credential provider type ASSUME_ROLE");
Validate.isTrue(configProps.containsKey(roleSessionName(providerKey)),
"AWS role session name must be provided with credential provider type ASSUME_ROLE");
return configProps;
}
public static Properties validateAssumeRoleCredentialsProvider(final Properties configProps) {
return validateAssumeRoleCredentialsProvider(configProps, AWS_CREDENTIALS_PROVIDER);
}
/**
* Computes a sensible maximum put record batch size based on region.
* There is a maximum batch size of 4 MiB per call, this will exceed the 1 MiB/second quota in some regions.
* https://docs.aws.amazon.com/firehose/latest/dev/limits.html
*
* If the region is null, it falls back to the lower batch size.
* Customer can override this value in producer properties.
*
* @param region the region the producer is running in
* @return a sensible maximum batch size
*/
public static int getDefaultMaxPutRecordBatchBytes(@Nullable final String region) {
if (region != null) {
switch (region) {
case "us-east-1":
case "us-west-2":
case "eu-west-1":
return DEFAULT_MAXIMUM_BATCH_BYTES;
}
}
return REDUCED_QUOTA_MAXIMUM_THROUGHPUT;
}
}
| 7,291 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/config/ProducerConfigConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.config;
import java.util.concurrent.TimeUnit;
public class ProducerConfigConstants {
/** The default MAX buffer size. Users should be able to specify a larger buffer if needed, since we don't bound it.
* However, this value should be exercised with caution, since Kinesis Firehose limits PutRecordBatch at 500 records or 4MiB per call.
* Please refer to https://docs.aws.amazon.com/firehose/latest/dev/limits.html for further reference.
* */
public static final int DEFAULT_MAX_BUFFER_SIZE = 500;
/** The maximum number of bytes that can be sent in a single PutRecordBatch operation */
public static final int DEFAULT_MAXIMUM_BATCH_BYTES = 4 * 1_024 * 1_024;
/** The MAX default timeout for the buffer to be flushed */
public static final long DEFAULT_MAX_BUFFER_TIMEOUT = TimeUnit.MINUTES.toMillis(5);
/** The MAX default timeout for a given addUserRecord operation */
public static final long DEFAULT_MAX_OPERATION_TIMEOUT = TimeUnit.MINUTES.toMillis(5);
/** The default wait time in milliseconds in case a buffer is full */
public static final long DEFAULT_WAIT_TIME_FOR_BUFFER_FULL = 100L;
/** The default interval between buffer flushes */
public static final long DEFAULT_INTERVAL_BETWEEN_FLUSHES = 50L;
/** The default MAX number of retries in case of recoverable failures */
public static final int DEFAULT_NUMBER_OF_RETRIES = 10;
/** The default MAX backoff timeout
* https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
* */
public static final long DEFAULT_MAX_BACKOFF = 100L;
/** The default BASE timeout to be used on Jitter backoff
* https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
* */
public static final long DEFAULT_BASE_BACKOFF = 10L;
/** The reduced quota maximum throughout.
* Some regions have lower throughput quotas than others.
* Please refer to https://docs.aws.amazon.com/firehose/latest/dev/limits.html for further reference. */
public static final int REDUCED_QUOTA_MAXIMUM_THROUGHPUT = 1_024 * 1_024;
public static final String FIREHOSE_PRODUCER_BUFFER_MAX_SIZE = "firehose.producer.batch.size";
public static final String FIREHOSE_PRODUCER_BUFFER_MAX_BATCH_BYTES = "firehose.producer.batch.bytes";
public static final String FIREHOSE_PRODUCER_BUFFER_MAX_TIMEOUT = "firehose.producer.buffer.timeout";
public static final String FIREHOSE_PRODUCER_BUFFER_FULL_WAIT_TIMEOUT = "firehose.producer.buffer.full.wait.timeout";
public static final String FIREHOSE_PRODUCER_BUFFER_FLUSH_TIMEOUT = "firehose.producer.buffer.flush.timeout";
public static final String FIREHOSE_PRODUCER_BUFFER_FLUSH_MAX_NUMBER_OF_RETRIES = "firehose.producer.buffer.flush.retries";
public static final String FIREHOSE_PRODUCER_BUFFER_MAX_BACKOFF_TIMEOUT = "firehose.producer.buffer.max.backoff";
public static final String FIREHOSE_PRODUCER_BUFFER_BASE_BACKOFF_TIMEOUT = "firehose.producer.buffer.base.backoff";
public static final String FIREHOSE_PRODUCER_MAX_OPERATION_TIMEOUT = "firehose.producer.operation.timeout";
private ProducerConfigConstants() {
// Prevent instantiation
}
}
| 7,292 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/config/AWSConfigConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.config;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.Validate;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
/**
* AWS Kinesis Firehose configuration constants
*/
public class AWSConfigConstants {
public enum CredentialProviderType {
/** Look for AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY into passed configuration */
BASIC,
/** Look for the environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY to create AWS credentials. */
ENV_VARIABLES,
/** Look for Java system properties aws.accessKeyId and aws.secretKey to create AWS credentials. */
SYS_PROPERTIES,
/** Use a AWS credentials profile file to create the AWS credentials. */
PROFILE,
/** Create AWS credentials by assuming a role. The credentials for assuming the role must be supplied. **/
ASSUME_ROLE,
/** A credentials provider chain will be used that searches for credentials in this order:
* ENV_VARIABLES, SYS_PROPERTIES, PROFILE in the AWS instance metadata. **/
AUTO
}
/** The AWS access key for provider type basic */
public static final String AWS_ACCESS_KEY_ID = "aws_access_key_id";
/** The AWS secret key for provider type basic */
public static final String AWS_SECRET_ACCESS_KEY = "aws_secret_access_key";
/** The AWS Kinesis Firehose region, if not specified defaults to us-east-1 */
public static final String AWS_REGION = "aws.region";
/**
* The credential provider type to use when AWS credentials are required
* (AUTO is used if not set, unless access key id and access secret key are set, then BASIC is used).
*/
public static final String AWS_CREDENTIALS_PROVIDER = "aws.credentials.provider";
/** The Kinesis Firehose endpoint */
public static final String AWS_KINESIS_FIREHOSE_ENDPOINT = "aws.kinesis.firehose.endpoint";
public static final String AWS_KINESIS_FIREHOSE_ENDPOINT_SIGNING_REGION = "aws.kinesis.firehose.endpoint.signing.region";
/** Optional configuration in case the provider is AwsProfileCredentialProvider */
public static final String AWS_PROFILE_NAME = profileName(AWS_CREDENTIALS_PROVIDER);
/** Optional configuration in case the provider is AwsProfileCredentialProvider */
public static final String AWS_PROFILE_PATH = profilePath(AWS_CREDENTIALS_PROVIDER);
/** The role ARN to use when credential provider type is set to ASSUME_ROLE. */
public static final String AWS_ROLE_ARN = roleArn(AWS_CREDENTIALS_PROVIDER);
/** The role session name to use when credential provider type is set to ASSUME_ROLE. */
public static final String AWS_ROLE_SESSION_NAME = roleSessionName(AWS_CREDENTIALS_PROVIDER);
/** The external ID to use when credential provider type is set to ASSUME_ROLE. */
public static final String AWS_ROLE_EXTERNAL_ID = externalId(AWS_CREDENTIALS_PROVIDER);
/**
* The credentials provider that provides credentials for assuming the role when credential
* provider type is set to ASSUME_ROLE.
* Roles can be nested, so AWS_ROLE_CREDENTIALS_PROVIDER can again be set to "ASSUME_ROLE"
*/
public static final String AWS_ROLE_CREDENTIALS_PROVIDER = roleCredentialsProvider(AWS_CREDENTIALS_PROVIDER);
private AWSConfigConstants() {
// Prevent instantiation
}
@Nonnull
public static String accessKeyId(@Nullable String prefix) {
return (!StringUtils.isEmpty(prefix) ? prefix + ".basic." : "") + AWS_ACCESS_KEY_ID;
}
@Nonnull
public static String accessKeyId() {
return accessKeyId(null);
}
@Nonnull
public static String secretKey(@Nullable String prefix) {
return (!StringUtils.isEmpty(prefix) ? prefix + ".basic." : "") + AWS_SECRET_ACCESS_KEY;
}
@Nonnull
public static String secretKey() {
return secretKey(null);
}
@Nonnull
public static String profilePath(@Nonnull String prefix) {
Validate.notBlank(prefix);
return prefix + ".profile.path";
}
@Nonnull
public static String profileName(@Nonnull String prefix) {
Validate.notBlank(prefix);
return prefix + ".profile.name";
}
@Nonnull
public static String roleArn(@Nonnull String prefix) {
Validate.notBlank(prefix);
return prefix + ".role.arn";
}
@Nonnull
public static String roleSessionName(@Nonnull String prefix) {
Validate.notBlank(prefix);
return prefix + ".role.sessionName";
}
@Nonnull
public static String externalId(@Nonnull String prefix) {
Validate.notBlank(prefix);
return prefix + ".role.externalId";
}
@Nonnull
public static String roleCredentialsProvider(@Nonnull String prefix) {
Validate.notBlank(prefix);
return prefix + ".role.provider";
}
}
| 7,293 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/serialization/JsonSerializationSchema.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.serialization;
import com.amazonaws.services.kinesisanalytics.flink.connectors.exception.SerializationException;
import org.apache.commons.lang3.Validate;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonProcessingException;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
public class JsonSerializationSchema<T> implements SerializationSchema<T> {
private static final ObjectMapper mapper = new ObjectMapper();
/**
* Serializes the incoming element to a specified type.
*
* @param element The incoming element to be serialized
* @return The serialized element.
*/
@Override
public byte[] serialize(T element) {
Validate.notNull(element);
try {
return mapper.writeValueAsBytes(element);
} catch (JsonProcessingException e) {
throw new SerializationException("Failed trying to serialize", e);
}
}
}
| 7,294 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/serialization/KinesisFirehoseSerializationSchema.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.serialization;
import java.io.Serializable;
import java.nio.ByteBuffer;
public interface KinesisFirehoseSerializationSchema<T> extends Serializable {
ByteBuffer serialize(T element);
}
| 7,295 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider/credential/DefaultCredentialProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.provider.credential;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import java.util.Properties;
public class DefaultCredentialProvider extends CredentialProvider {
public DefaultCredentialProvider(final Properties properties, final String providerKey) {
super(properties, providerKey);
}
public DefaultCredentialProvider(final Properties properties) {
this(properties, null);
}
@Override
public AWSCredentialsProvider getAwsCredentialsProvider() {
return new DefaultAWSCredentialsProviderChain();
}
}
| 7,296 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider/credential/ProfileCredentialProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.provider.credential;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.profile.ProfileCredentialsProvider;
import com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants;
import org.apache.commons.lang3.StringUtils;
import java.util.Properties;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.config.AWSConfigConstants.AWS_CREDENTIALS_PROVIDER;
import static com.amazonaws.services.kinesisanalytics.flink.connectors.util.AWSUtil.validateProfileProviderConfiguration;
public class ProfileCredentialProvider extends CredentialProvider {
public ProfileCredentialProvider(final Properties properties, final String providerKey) {
super(validateProfileProviderConfiguration(properties, providerKey), providerKey);
}
public ProfileCredentialProvider(final Properties properties) {
this(properties, AWS_CREDENTIALS_PROVIDER);
}
@Override
public AWSCredentialsProvider getAwsCredentialsProvider() {
final String profileName = properties.getProperty(AWSConfigConstants.profileName(providerKey));
final String profilePath = properties.getProperty(AWSConfigConstants.profilePath(providerKey));
return StringUtils.isEmpty(profilePath) ? new ProfileCredentialsProvider(profileName) :
new ProfileCredentialsProvider(profilePath, profileName);
}
}
| 7,297 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider/credential/CredentialProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.provider.credential;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.kinesisanalytics.flink.connectors.util.AWSUtil;
import java.util.Properties;
public abstract class CredentialProvider {
final Properties properties;
final String providerKey;
CredentialProvider(final Properties properties, final String providerKey) {
this.properties = AWSUtil.validateConfiguration(properties);
this.providerKey = providerKey == null ? "" : providerKey;
}
public CredentialProvider(final Properties properties) {
this(properties, null);
}
public abstract AWSCredentialsProvider getAwsCredentialsProvider();
protected Properties getProperties() {
return this.properties;
}
protected String getProviderKey() {
return this.providerKey;
}
}
| 7,298 |
0 | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider | Create_ds/aws-kinesisanalytics-flink-connectors/src/main/java/com/amazonaws/services/kinesisanalytics/flink/connectors/provider/credential/SystemCredentialProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.services.kinesisanalytics.flink.connectors.provider.credential;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.SystemPropertiesCredentialsProvider;
import java.util.Properties;
public class SystemCredentialProvider extends CredentialProvider {
public SystemCredentialProvider(final Properties properties, final String providerKey) {
super(properties, providerKey);
}
public SystemCredentialProvider(final Properties properties) {
this(properties, null);
}
@Override
public AWSCredentialsProvider getAwsCredentialsProvider() {
return new SystemPropertiesCredentialsProvider();
}
}
| 7,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.